]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/migrate.c
virtio: force callback on empty.
[linux-2.6-omap-h63xx.git] / mm / migrate.c
index a73504ff5ab982007b2e0355e49f0dedcac65899..449d77d409f52622bbf825b7fd17d43df7ff5fbf 100644 (file)
@@ -153,11 +153,6 @@ static void remove_migration_pte(struct vm_area_struct *vma,
                return;
        }
 
-       if (mem_cgroup_charge(new, mm, GFP_KERNEL)) {
-               pte_unmap(ptep);
-               return;
-       }
-
        ptl = pte_lockptr(mm, pmd);
        spin_lock(ptl);
        pte = *ptep;
@@ -169,6 +164,20 @@ static void remove_migration_pte(struct vm_area_struct *vma,
        if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
                goto out;
 
+       /*
+        * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge.
+        * Failure is not an option here: we're now expected to remove every
+        * migration pte, and will cause crashes otherwise.  Normally this
+        * is not an issue: mem_cgroup_prepare_migration bumped up the old
+        * page_cgroup count for safety, that's now attached to the new page,
+        * so this charge should just be another incrementation of the count,
+        * to keep in balance with rmap.c's mem_cgroup_uncharging.  But if
+        * there's been a force_empty, those reference counts may no longer
+        * be reliable, and this charge can actually fail: oh well, we don't
+        * make the situation any worse by proceeding as if it had succeeded.
+        */
+       mem_cgroup_charge(new, mm, GFP_ATOMIC);
+
        get_page(new);
        pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
        if (is_write_migration_entry(entry))
@@ -374,7 +383,14 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
 
        if (PageDirty(page)) {
                clear_page_dirty_for_io(page);
-               set_page_dirty(newpage);
+               /*
+                * Want to mark the page and the radix tree as dirty, and
+                * redo the accounting that clear_page_dirty_for_io undid,
+                * but we can't use set_page_dirty because that function
+                * is actually a signal that all of the page has become dirty.
+                * Wheras only part of our page may be dirty.
+                */
+               __set_page_dirty_nobuffers(newpage);
        }
 
 #ifdef CONFIG_SWAP