]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/mmap.c
Add a real API for dealing with blk_congestion_wait()
[linux-2.6-omap-h63xx.git] / mm / mmap.c
index e780d19aa21447df1b48d92273276700cf2b56f4..d799d896d74aae5f19c604928eb6a52738676d66 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
 #include <asm/cacheflush.h>
 #include <asm/tlb.h>
 
+#ifndef arch_mmap_check
+#define arch_mmap_check(addr, len, flags)      (0)
+#endif
+
 static void unmap_region(struct mm_struct *mm,
                struct vm_area_struct *vma, struct vm_area_struct *prev,
                unsigned long start, unsigned long end);
@@ -60,6 +64,13 @@ pgprot_t protection_map[16] = {
        __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
 };
 
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+       return protection_map[vm_flags &
+                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+}
+EXPORT_SYMBOL(vm_get_page_prot);
+
 int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
 int sysctl_overcommit_ratio = 50;      /* default is 50% */
 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
@@ -96,7 +107,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
        if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
                unsigned long n;
 
-               free = get_page_cache_size();
+               free = global_page_state(NR_FILE_PAGES);
                free += nr_swap_pages;
 
                /*
@@ -121,14 +132,26 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
                 * only call if we're about to fail.
                 */
                n = nr_free_pages();
+
+               /*
+                * Leave reserved pages. The pages are not for anonymous pages.
+                */
+               if (n <= totalreserve_pages)
+                       goto error;
+               else
+                       n -= totalreserve_pages;
+
+               /*
+                * Leave the last 3% for root
+                */
                if (!cap_sys_admin)
                        n -= n / 32;
                free += n;
 
                if (free > pages)
                        return 0;
-               vm_unacct_memory(pages);
-               return -ENOMEM;
+
+               goto error;
        }
 
        allowed = (totalram_pages - hugetlb_total_pages())
@@ -150,7 +173,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
         */
        if (atomic_read(&vm_committed_space) < (long)allowed)
                return 0;
-
+error:
        vm_unacct_memory(pages);
 
        return -ENOMEM;
@@ -220,6 +243,17 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
 
        if (brk < mm->end_code)
                goto out;
+
+       /*
+        * Check against rlimit here. If this check is done later after the test
+        * of oldbrk with newbrk then it can escape the test and let the data
+        * segment grow beyond its set limit the in case where the limit is
+        * not page aligned -Ram Gupta
+        */
+       rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+       if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
+               goto out;
+
        newbrk = PAGE_ALIGN(brk);
        oldbrk = PAGE_ALIGN(mm->brk);
        if (oldbrk == newbrk)
@@ -232,11 +266,6 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
                goto out;
        }
 
-       /* Check against rlimit.. */
-       rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
-       if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
-               goto out;
-
        /* Check against existing mmap mappings. */
        if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
                goto out;
@@ -895,6 +924,10 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
        if (!len)
                return -EINVAL;
 
+       error = arch_mmap_check(addr, len, flags);
+       if (error)
+               return error;
+
        /* Careful about overflows.. */
        len = PAGE_ALIGN(len);
        if (!len || len > TASK_SIZE)
@@ -1047,7 +1080,8 @@ munmap_back:
        vma->vm_start = addr;
        vma->vm_end = addr + len;
        vma->vm_flags = vm_flags;
-       vma->vm_page_prot = protection_map[vm_flags & 0x0f];
+       vma->vm_page_prot = protection_map[vm_flags &
+                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
        vma->vm_pgoff = pgoff;
 
        if (file) {
@@ -1071,6 +1105,12 @@ munmap_back:
                        goto free_vma;
        }
 
+       /* Don't make the VMA automatically writable if it's shared, but the
+        * backer wishes to know when pages are first written to */
+       if (vma->vm_ops && vma->vm_ops->page_mkwrite)
+               vma->vm_page_prot =
+                       protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
+
        /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
         * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
         * that memory reservation must be checked; but that reservation
@@ -1834,6 +1874,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        unsigned long flags;
        struct rb_node ** rb_link, * rb_parent;
        pgoff_t pgoff = addr >> PAGE_SHIFT;
+       int error;
 
        len = PAGE_ALIGN(len);
        if (!len)
@@ -1842,6 +1883,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if ((addr + len) > TASK_SIZE || (addr + len) < addr)
                return -EINVAL;
 
+       flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
+
+       error = arch_mmap_check(addr, len, flags);
+       if (error)
+               return error;
+
        /*
         * mlock MCL_FUTURE?
         */
@@ -1882,8 +1929,6 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if (security_vm_enough_memory(len >> PAGE_SHIFT))
                return -ENOMEM;
 
-       flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
-
        /* Can we just expand an old private anonymous mapping? */
        if (vma_merge(mm, prev, addr, addr + len, flags,
                                        NULL, NULL, pgoff, NULL))
@@ -1903,7 +1948,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        vma->vm_end = addr + len;
        vma->vm_pgoff = pgoff;
        vma->vm_flags = flags;
-       vma->vm_page_prot = protection_map[flags & 0x0f];
+       vma->vm_page_prot = protection_map[flags &
+                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
        vma_link(mm, vma, prev, rb_link, rb_parent);
 out:
        mm->total_vm += len >> PAGE_SHIFT;