X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=mm%2Ffremap.c;h=14bd3bf7826edcecd2f61e6465b61078a84b2340;hb=b192541b39ed29ff82f9f2d5427f451e89617f1c;hp=5f50d736a03779dc12606abf99c46031ee45437a;hpb=d0217ac04ca6591841e5665f518e38064f4e65bd;p=linux-2.6-omap-h63xx.git diff --git a/mm/fremap.c b/mm/fremap.c index 5f50d736a03..14bd3bf7826 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -5,7 +5,7 @@ * * started by Ingo Molnar, Copyright (C) 2002, 2003 */ - +#include #include #include #include @@ -97,26 +97,28 @@ static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma, } -/*** - * sys_remap_file_pages - remap arbitrary pages of a shared backing store - * file within an existing vma. +/** + * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma * @start: start of the remapped virtual memory range * @size: size of the remapped virtual memory range - * @prot: new protection bits of the range - * @pgoff: to be mapped page of the backing store file + * @prot: new protection bits of the range (see NOTE) + * @pgoff: to-be-mapped page of the backing store file * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO. * - * this syscall works purely via pagetables, so it's the most efficient + * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma + * (shared backing store file). + * + * This syscall works purely via pagetables, so it's the most efficient * way to map the same (large) file into a given virtual window. Unlike * mmap()/mremap() it does not create any new vmas. The new mappings are * also safe across swapout. * - * NOTE: the 'prot' parameter right now is ignored, and the vma's default - * protection is used. Arbitrary protections might be implemented in the - * future. + * NOTE: the 'prot' parameter right now is ignored (but must be zero), + * and the vma's default protection is used. Arbitrary protections + * might be implemented in the future. */ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, - unsigned long __prot, unsigned long pgoff, unsigned long flags) + unsigned long prot, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct address_space *mapping; @@ -125,7 +127,7 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, int err = -EINVAL; int has_write_lock = 0; - if (__prot) + if (prot) return err; /* * Sanitize the syscall parameters: @@ -160,7 +162,7 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR)) goto out; - if (!vma->vm_flags & VM_CAN_NONLINEAR) + if (!(vma->vm_flags & VM_CAN_NONLINEAR)) goto out; if (end <= start || start < vma->vm_start || end > vma->vm_end) @@ -181,6 +183,25 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, goto retry; } mapping = vma->vm_file->f_mapping; + /* + * page_mkclean doesn't work on nonlinear vmas, so if + * dirty pages need to be accounted, emulate with linear + * vmas. + */ + if (mapping_cap_account_dirty(mapping)) { + unsigned long addr; + + flags &= MAP_NONBLOCK; + addr = mmap_region(vma->vm_file, start, size, + flags, vma->vm_flags, pgoff, 1); + if (IS_ERR_VALUE(addr)) { + err = addr; + } else { + BUG_ON(addr != start); + err = 0; + } + goto out; + } spin_lock(&mapping->i_mmap_lock); flush_dcache_mmap_lock(mapping); vma->vm_flags |= VM_NONLINEAR;