]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/migrate.c
mmc_block: use proper sg iterators
[linux-2.6-omap-h63xx.git] / mm / migrate.c
index 376cceba82f98e7110520acc885f6bce0687482b..153572fb60b8debb24359e195e7a53b50f8e1c37 100644 (file)
@@ -285,7 +285,15 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 
        page = migration_entry_to_page(entry);
 
-       get_page(page);
+       /*
+        * Once radix-tree replacement of page migration started, page_count
+        * *must* be zero. And, we don't want to call wait_on_page_locked()
+        * against a page without get_page().
+        * So, we use get_page_unless_zero(), here. Even failed, page fault
+        * will occur again.
+        */
+       if (!get_page_unless_zero(page))
+               goto out;
        pte_unmap_unlock(ptep, ptl);
        wait_on_page_locked(page);
        put_page(page);
@@ -305,6 +313,7 @@ out:
 static int migrate_page_move_mapping(struct address_space *mapping,
                struct page *newpage, struct page *page)
 {
+       int expected_count;
        void **pslot;
 
        if (!mapping) {
@@ -314,14 +323,20 @@ static int migrate_page_move_mapping(struct address_space *mapping,
                return 0;
        }
 
-       write_lock_irq(&mapping->tree_lock);
+       spin_lock_irq(&mapping->tree_lock);
 
        pslot = radix_tree_lookup_slot(&mapping->page_tree,
                                        page_index(page));
 
-       if (page_count(page) != 2 + !!PagePrivate(page) ||
+       expected_count = 2 + !!PagePrivate(page);
+       if (page_count(page) != expected_count ||
                        (struct page *)radix_tree_deref_slot(pslot) != page) {
-               write_unlock_irq(&mapping->tree_lock);
+               spin_unlock_irq(&mapping->tree_lock);
+               return -EAGAIN;
+       }
+
+       if (!page_freeze_refs(page, expected_count)) {
+               spin_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
 
@@ -338,6 +353,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
 
        radix_tree_replace_slot(pslot, newpage);
 
+       page_unfreeze_refs(page, expected_count);
        /*
         * Drop cache reference from old page.
         * We know this isn't the last reference.
@@ -357,7 +373,9 @@ static int migrate_page_move_mapping(struct address_space *mapping,
        __dec_zone_page_state(page, NR_FILE_PAGES);
        __inc_zone_page_state(newpage, NR_FILE_PAGES);
 
-       write_unlock_irq(&mapping->tree_lock);
+       spin_unlock_irq(&mapping->tree_lock);
+       if (!PageSwapCache(newpage))
+               mem_cgroup_uncharge_cache_page(page);
 
        return 0;
 }
@@ -611,7 +629,6 @@ static int move_to_new_page(struct page *newpage, struct page *page)
                rc = fallback_migrate_page(mapping, newpage, page);
 
        if (!rc) {
-               mem_cgroup_page_migration(page, newpage);
                remove_migration_ptes(page, newpage);
        } else
                newpage->mapping = NULL;
@@ -641,6 +658,14 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                /* page was freed from under us. So we are done. */
                goto move_newpage;
 
+       charge = mem_cgroup_prepare_migration(page, newpage);
+       if (charge == -ENOMEM) {
+               rc = -ENOMEM;
+               goto move_newpage;
+       }
+       /* prepare cgroup just returns 0 or -ENOMEM */
+       BUG_ON(charge);
+
        rc = -EAGAIN;
        if (TestSetPageLocked(page)) {
                if (!force)
@@ -692,19 +717,14 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                goto rcu_unlock;
        }
 
-       charge = mem_cgroup_prepare_migration(page);
        /* Establish migration ptes or remove ptes */
        try_to_unmap(page, 1);
 
        if (!page_mapped(page))
                rc = move_to_new_page(newpage, page);
 
-       if (rc) {
+       if (rc)
                remove_migration_ptes(page, page);
-               if (charge)
-                       mem_cgroup_end_migration(page);
-       } else if (charge)
-               mem_cgroup_end_migration(newpage);
 rcu_unlock:
        if (rcu_locked)
                rcu_read_unlock();
@@ -725,6 +745,8 @@ unlock:
        }
 
 move_newpage:
+       if (!charge)
+               mem_cgroup_end_migration(newpage);
        /*
         * Move the new page to the LRU. If migration was not successful
         * then this will free the page.