]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/memory.c
mm: introduce node_zonelist() for accessing the zonelist for a GFP mask
[linux-2.6-omap-h63xx.git] / mm / memory.c
index ce3c9e4492d803b011f50ea8641e477056116ddd..46958fb97c2d95d9d6efff5b8ff8ce1934d0eb01 100644 (file)
@@ -1057,8 +1057,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                if (pages)
                        foll_flags |= FOLL_GET;
                if (!write && !(vma->vm_flags & VM_LOCKED) &&
-                   (!vma->vm_ops || (!vma->vm_ops->nopage &&
-                                       !vma->vm_ops->fault)))
+                   (!vma->vm_ops || !vma->vm_ops->fault))
                        foll_flags |= FOLL_ANON;
 
                do {
@@ -1711,7 +1710,7 @@ unlock:
        }
        return ret;
 oom_free_new:
-       __free_page(new_page);
+       page_cache_release(new_page);
 oom:
        if (old_page)
                page_cache_release(old_page);
@@ -2093,12 +2092,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        unlock_page(page);
 
        if (write_access) {
-               /* XXX: We could OR the do_wp_page code with this one? */
-               if (do_wp_page(mm, vma, address,
-                               page_table, pmd, ptl, pte) & VM_FAULT_OOM) {
-                       mem_cgroup_uncharge_page(page);
-                       ret = VM_FAULT_OOM;
-               }
+               ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
+               if (ret & VM_FAULT_ERROR)
+                       ret &= VM_FAULT_ERROR;
                goto out;
        }
 
@@ -2163,7 +2159,7 @@ release:
        page_cache_release(page);
        goto unlock;
 oom_free_page:
-       __free_page(page);
+       page_cache_release(page);
 oom:
        return VM_FAULT_OOM;
 }
@@ -2202,20 +2198,9 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        BUG_ON(vma->vm_flags & VM_PFNMAP);
 
-       if (likely(vma->vm_ops->fault)) {
-               ret = vma->vm_ops->fault(vma, &vmf);
-               if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
-                       return ret;
-       } else {
-               /* Legacy ->nopage path */
-               ret = 0;
-               vmf.page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
-               /* no page was available -- either SIGBUS or OOM */
-               if (unlikely(vmf.page == NOPAGE_SIGBUS))
-                       return VM_FAULT_SIGBUS;
-               else if (unlikely(vmf.page == NOPAGE_OOM))
-                       return VM_FAULT_OOM;
-       }
+       ret = vma->vm_ops->fault(vma, &vmf);
+       if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
+               return ret;
 
        /*
         * For consistency in subsequent calls, make the faulted page always
@@ -2461,7 +2446,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
        if (!pte_present(entry)) {
                if (pte_none(entry)) {
                        if (vma->vm_ops) {
-                               if (vma->vm_ops->fault || vma->vm_ops->nopage)
+                               if (likely(vma->vm_ops->fault))
                                        return do_linear_fault(mm, vma, address,
                                                pte, pmd, write_access, entry);
                                if (unlikely(vma->vm_ops->nopfn))