]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/memory.c
irq: change references from NR_IRQS to nr_irqs
[linux-2.6-omap-h63xx.git] / mm / memory.c
index 1358012ffa73f000105e7336968fdb1b0d5ad813..e009ce8708597fe3c7542a92ec9201fe8d5f73b6 100644 (file)
@@ -2000,7 +2000,7 @@ gotten:
        cow_user_page(new_page, old_page, address, vma);
        __SetPageUptodate(new_page);
 
-       if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE))
+       if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
                goto oom_free_new;
 
        /*
@@ -2431,8 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
-       if (mem_cgroup_try_charge_swapin(mm, page,
-                               GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) {
+       if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
                ret = VM_FAULT_OOM;
                unlock_page(page);
                goto out;
@@ -2458,22 +2457,23 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * while the page is counted on swap but not yet in mapcount i.e.
         * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
         * must be called after the swap_free(), or it will never succeed.
-        * And mem_cgroup_commit_charge_swapin(), which uses the swp_entry
-        * in page->private, must be called before reuse_swap_page(),
-        * which may delete_from_swap_cache().
+        * Because delete_from_swap_page() may be called by reuse_swap_page(),
+        * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
+        * in page->private. In this case, a record in swap_cgroup  is silently
+        * discarded at swap_free().
         */
 
-       mem_cgroup_commit_charge_swapin(page, ptr);
        inc_mm_counter(mm, anon_rss);
        pte = mk_pte(page, vma->vm_page_prot);
        if (write_access && reuse_swap_page(page)) {
                pte = maybe_mkwrite(pte_mkdirty(pte), vma);
                write_access = 0;
        }
-
        flush_icache_page(vma, page);
        set_pte_at(mm, address, page_table, pte);
        page_add_anon_rmap(page, vma, address);
+       /* It's better to call commit-charge after rmap is established */
+       mem_cgroup_commit_charge_swapin(page, ptr);
 
        swap_free(entry);
        if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
@@ -2524,7 +2524,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto oom;
        __SetPageUptodate(page);
 
-       if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE))
+       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
                goto oom_free_page;
 
        entry = mk_pte(page, vma->vm_page_prot);
@@ -2615,8 +2615,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                ret = VM_FAULT_OOM;
                                goto out;
                        }
-                       if (mem_cgroup_newpage_charge(page,
-                                               mm, GFP_HIGHUSER_MOVABLE)) {
+                       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
                                ret = VM_FAULT_OOM;
                                page_cache_release(page);
                                goto out;