X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=include%2Flinux%2Fmm.h;h=498ff8778fb6d6c39ef8fb8472f32d03cc33952f;hb=d4f0f122ec407bd1b4d1c773d0c87edc6a92cd26;hp=a06a84d347fb5cbbc3b29ff3dfa727f429cc0f7b;hpb=aaadff81195056c7c14e0d834b3318c624c0fd78;p=linux-2.6-omap-h63xx.git diff --git a/include/linux/mm.h b/include/linux/mm.h index a06a84d347f..498ff8778fb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3,6 +3,7 @@ #include #include +#include #ifdef __KERNEL__ @@ -13,6 +14,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -223,24 +225,27 @@ struct page { * & limit reverse map searches. */ union { - unsigned long private; /* Mapping-private opaque data: - * usually used for buffer_heads - * if PagePrivate set; used for - * swp_entry_t if PageSwapCache - * When page is free, this indicates - * order in the buddy system. - */ + struct { + unsigned long private; /* Mapping-private opaque data: + * usually used for buffer_heads + * if PagePrivate set; used for + * swp_entry_t if PageSwapCache. + * When page is free, this + * indicates order in the buddy + * system. + */ + struct address_space *mapping; /* If low bit clear, points to + * inode address_space, or NULL. + * If page mapped as anonymous + * memory, low bit is set, and + * it points to anon_vma object: + * see PAGE_MAPPING_ANON below. + */ + }; #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS - spinlock_t ptl; + spinlock_t ptl; #endif - } u; - struct address_space *mapping; /* If low bit clear, points to - * inode address_space, or NULL. - * If page mapped as anonymous - * memory, low bit is set, and - * it points to anon_vma object: - * see PAGE_MAPPING_ANON below. - */ + }; pgoff_t index; /* Our offset within mapping. */ struct list_head lru; /* Pageout list, eg. active_list * protected by zone->lru_lock ! @@ -261,8 +266,8 @@ struct page { #endif /* WANT_PAGE_VIRTUAL */ }; -#define page_private(page) ((page)->u.private) -#define set_page_private(page, v) ((page)->u.private = (v)) +#define page_private(page) ((page)->private) +#define set_page_private(page, v) ((page)->private = (v)) /* * FIXME: take this include out, include page-flags.h in @@ -298,7 +303,7 @@ struct page { */ #define put_page_testzero(p) \ ({ \ - BUG_ON(page_count(p) == 0); \ + BUG_ON(atomic_read(&(p)->_count) == -1);\ atomic_add_negative(-1, &(p)->_count); \ }) @@ -308,7 +313,7 @@ struct page { */ #define get_page_testone(p) atomic_inc_and_test(&(p)->_count) -#define set_page_count(p,v) atomic_set(&(p)->_count, v - 1) +#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1) #define __put_page(p) atomic_dec(&(p)->_count) extern void FASTCALL(__page_cache_release(struct page *)); @@ -507,7 +512,7 @@ static inline void set_page_links(struct page *page, unsigned long zone, extern struct page *mem_map; #endif -static inline void *lowmem_page_address(struct page *page) +static __always_inline void *lowmem_page_address(struct page *page) { return __va(page_to_pfn(page) << PAGE_SHIFT); } @@ -634,14 +639,38 @@ struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, int shmem_lock(struct file *file, int lock, struct user_struct *user); #else #define shmem_nopage filemap_nopage -#define shmem_lock(a, b, c) ({0;}) /* always in memory, no need to lock */ -#define shmem_set_policy(a, b) (0) -#define shmem_get_policy(a, b) (NULL) + +static inline int shmem_lock(struct file *file, int lock, + struct user_struct *user) +{ + return 0; +} + +static inline int shmem_set_policy(struct vm_area_struct *vma, + struct mempolicy *new) +{ + return 0; +} + +static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, + unsigned long addr) +{ + return NULL; +} #endif struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); +extern int shmem_mmap(struct file *file, struct vm_area_struct *vma); int shmem_zero_setup(struct vm_area_struct *); +#ifndef CONFIG_MMU +extern unsigned long shmem_get_unmapped_area(struct file *file, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +#endif + static inline int can_do_mlock(void) { if (capable(CAP_IPC_LOCK)) @@ -690,14 +719,31 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, } extern int vmtruncate(struct inode * inode, loff_t offset); +extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); -extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access); -static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access) +#ifdef CONFIG_MMU +extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, + unsigned long address, int write_access); + +static inline int handle_mm_fault(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, + int write_access) +{ + return __handle_mm_fault(mm, vma, address, write_access) & + (~VM_FAULT_WRITE); +} +#else +static inline int handle_mm_fault(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, + int write_access) { - return __handle_mm_fault(mm, vma, address, write_access) & (~VM_FAULT_WRITE); + /* should never happen if there's no MMU */ + BUG(); + return VM_FAULT_SIGBUS; } +#endif extern int make_pages_present(unsigned long addr, unsigned long end); extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); @@ -774,7 +820,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a * overflow into the next struct page (as it might with DEBUG_SPINLOCK). * When freeing, reset page->mapping so free_pages_check won't complain. */ -#define __pte_lockptr(page) &((page)->u.ptl) +#define __pte_lockptr(page) &((page)->ptl) #define pte_lock_init(_page) do { \ spin_lock_init(__pte_lockptr(_page)); \ } while (0) @@ -896,6 +942,8 @@ extern unsigned long do_brk(unsigned long, unsigned long); /* filemap.c */ extern unsigned long page_unuse(struct page *); extern void truncate_inode_pages(struct address_space *, loff_t); +extern void truncate_inode_pages_range(struct address_space *, + loff_t lstart, loff_t lend); /* generic vm_area_ops exported for stackable file systems */ extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *); @@ -978,6 +1026,9 @@ static inline void vm_stat_account(struct mm_struct *mm, static inline void kernel_map_pages(struct page *page, int numpages, int enable) { + if (!PageHighMem(page) && !enable) + mutex_debug_check_no_locks_freed(page_address(page), + numpages * PAGE_SIZE); } #endif @@ -993,5 +1044,18 @@ int in_gate_area_no_task(unsigned long addr); /* /proc//oom_adj set to -17 protects from the oom-killer */ #define OOM_DISABLE -17 +int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, + void __user *, size_t *, loff_t *); +int shrink_slab(unsigned long scanned, gfp_t gfp_mask, + unsigned long lru_pages); +void drop_pagecache(void); +void drop_slab(void); + +#ifndef CONFIG_MMU +#define randomize_va_space 0 +#else +extern int randomize_va_space; +#endif + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */