]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/vmscan.c
[PATCH] Permit multiple inclusion of linux/pagevec.h
[linux-2.6-omap-h63xx.git] / mm / vmscan.c
index 261a56ee11b690b8e182fbbe84a0b42c097c446f..bf903b2d198f0820a2d03041b06de25af7a4d1d7 100644 (file)
@@ -268,9 +268,7 @@ static inline int is_page_cache_freeable(struct page *page)
 
 static int may_write_to_queue(struct backing_dev_info *bdi)
 {
-       if (current_is_kswapd())
-               return 1;
-       if (current_is_pdflush())       /* This is unlikely, but why not... */
+       if (current->flags & PF_SWAPWRITE)
                return 1;
        if (!bdi_write_congested(bdi))
                return 1;
@@ -375,6 +373,43 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
        return PAGE_CLEAN;
 }
 
+static int remove_mapping(struct address_space *mapping, struct page *page)
+{
+       if (!mapping)
+               return 0;               /* truncate got there first */
+
+       write_lock_irq(&mapping->tree_lock);
+
+       /*
+        * The non-racy check for busy page.  It is critical to check
+        * PageDirty _after_ making sure that the page is freeable and
+        * not in use by anybody.       (pagecache + us == 2)
+        */
+       if (unlikely(page_count(page) != 2))
+               goto cannot_free;
+       smp_rmb();
+       if (unlikely(PageDirty(page)))
+               goto cannot_free;
+
+       if (PageSwapCache(page)) {
+               swp_entry_t swap = { .val = page_private(page) };
+               __delete_from_swap_cache(page);
+               write_unlock_irq(&mapping->tree_lock);
+               swap_free(swap);
+               __put_page(page);       /* The pagecache ref */
+               return 1;
+       }
+
+       __remove_from_page_cache(page);
+       write_unlock_irq(&mapping->tree_lock);
+       __put_page(page);
+       return 1;
+
+cannot_free:
+       write_unlock_irq(&mapping->tree_lock);
+       return 0;
+}
+
 /*
  * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed
  */
@@ -423,7 +458,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                 * Try to allocate it some swap space here.
                 */
                if (PageAnon(page) && !PageSwapCache(page)) {
-                       if (!add_to_swap(page))
+                       if (!add_to_swap(page, GFP_ATOMIC))
                                goto activate_locked;
                }
 #endif /* CONFIG_SWAP */
@@ -506,36 +541,8 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                                goto free_it;
                }
 
-               if (!mapping)
-                       goto keep_locked;       /* truncate got there first */
-
-               write_lock_irq(&mapping->tree_lock);
-
-               /*
-                * The non-racy check for busy page.  It is critical to check
-                * PageDirty _after_ making sure that the page is freeable and
-                * not in use by anybody.       (pagecache + us == 2)
-                */
-               if (unlikely(page_count(page) != 2))
-                       goto cannot_free;
-               smp_rmb();
-               if (unlikely(PageDirty(page)))
-                       goto cannot_free;
-
-#ifdef CONFIG_SWAP
-               if (PageSwapCache(page)) {
-                       swp_entry_t swap = { .val = page_private(page) };
-                       __delete_from_swap_cache(page);
-                       write_unlock_irq(&mapping->tree_lock);
-                       swap_free(swap);
-                       __put_page(page);       /* The pagecache ref */
-                       goto free_it;
-               }
-#endif /* CONFIG_SWAP */
-
-               __remove_from_page_cache(page);
-               write_unlock_irq(&mapping->tree_lock);
-               __put_page(page);
+               if (!remove_mapping(mapping, page))
+                       goto keep_locked;
 
 free_it:
                unlock_page(page);
@@ -544,10 +551,6 @@ free_it:
                        __pagevec_release_nonlru(&freed_pvec);
                continue;
 
-cannot_free:
-               write_unlock_irq(&mapping->tree_lock);
-               goto keep_locked;
-
 activate_locked:
                SetPageActive(page);
                pgactivate++;
@@ -565,51 +568,196 @@ keep:
        return reclaimed;
 }
 
+#ifdef CONFIG_MIGRATION
+static inline void move_to_lru(struct page *page)
+{
+       list_del(&page->lru);
+       if (PageActive(page)) {
+               /*
+                * lru_cache_add_active checks that
+                * the PG_active bit is off.
+                */
+               ClearPageActive(page);
+               lru_cache_add_active(page);
+       } else {
+               lru_cache_add(page);
+       }
+       put_page(page);
+}
+
 /*
- * zone->lru_lock is heavily contended.  Some of the functions that
- * shrink the lists perform better by taking out a batch of pages
- * and working on them outside the LRU lock.
+ * Add isolated pages on the list back to the LRU
  *
- * For pagecache intensive workloads, this function is the hottest
- * spot in the kernel (apart from copy_*_user functions).
+ * returns the number of pages put back.
+ */
+int putback_lru_pages(struct list_head *l)
+{
+       struct page *page;
+       struct page *page2;
+       int count = 0;
+
+       list_for_each_entry_safe(page, page2, l, lru) {
+               move_to_lru(page);
+               count++;
+       }
+       return count;
+}
+
+/*
+ * swapout a single page
+ * page is locked upon entry, unlocked on exit
+ */
+static int swap_page(struct page *page)
+{
+       struct address_space *mapping = page_mapping(page);
+
+       if (page_mapped(page) && mapping)
+               if (try_to_unmap(page) != SWAP_SUCCESS)
+                       goto unlock_retry;
+
+       if (PageDirty(page)) {
+               /* Page is dirty, try to write it out here */
+               switch(pageout(page, mapping)) {
+               case PAGE_KEEP:
+               case PAGE_ACTIVATE:
+                       goto unlock_retry;
+
+               case PAGE_SUCCESS:
+                       goto retry;
+
+               case PAGE_CLEAN:
+                       ; /* try to free the page below */
+               }
+       }
+
+       if (PagePrivate(page)) {
+               if (!try_to_release_page(page, GFP_KERNEL) ||
+                   (!mapping && page_count(page) == 1))
+                       goto unlock_retry;
+       }
+
+       if (remove_mapping(mapping, page)) {
+               /* Success */
+               unlock_page(page);
+               return 0;
+       }
+
+unlock_retry:
+       unlock_page(page);
+
+retry:
+       return -EAGAIN;
+}
+/*
+ * migrate_pages
  *
- * Appropriate locks must be held before calling this function.
+ * Two lists are passed to this function. The first list
+ * contains the pages isolated from the LRU to be migrated.
+ * The second list contains new pages that the pages isolated
+ * can be moved to. If the second list is NULL then all
+ * pages are swapped out.
  *
- * @nr_to_scan:        The number of pages to look through on the list.
- * @src:       The LRU list to pull pages off.
- * @dst:       The temp list to put pages on to.
- * @scanned:   The number of pages that were scanned.
+ * The function returns after 10 attempts or if no pages
+ * are movable anymore because t has become empty
+ * or no retryable pages exist anymore.
  *
- * returns how many pages were moved onto *@dst.
+ * SIMPLIFIED VERSION: This implementation of migrate_pages
+ * is only swapping out pages and never touches the second
+ * list. The direct migration patchset
+ * extends this function to avoid the use of swap.
+ *
+ * Return: Number of pages not migrated when "to" ran empty.
  */
-static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
-                            struct list_head *dst, int *scanned)
+int migrate_pages(struct list_head *from, struct list_head *to,
+                 struct list_head *moved, struct list_head *failed)
 {
-       int nr_taken = 0;
+       int retry;
+       int nr_failed = 0;
+       int pass = 0;
        struct page *page;
-       int scan = 0;
+       struct page *page2;
+       int swapwrite = current->flags & PF_SWAPWRITE;
+       int rc;
 
-       while (scan++ < nr_to_scan && !list_empty(src)) {
-               page = lru_to_page(src);
-               prefetchw_prev_lru_page(page, src, flags);
+       if (!swapwrite)
+               current->flags |= PF_SWAPWRITE;
 
-               switch (__isolate_lru_page(page)) {
-               case 1:
-                       /* Succeeded to isolate page */
-                       list_move(&page->lru, dst);
-                       nr_taken++;
-                       break;
-               case -ENOENT:
-                       /* Not possible to isolate */
-                       list_move(&page->lru, src);
-                       break;
-               default:
-                       BUG();
+redo:
+       retry = 0;
+
+       list_for_each_entry_safe(page, page2, from, lru) {
+               cond_resched();
+
+               rc = 0;
+               if (page_count(page) == 1)
+                       /* page was freed from under us. So we are done. */
+                       goto next;
+
+               /*
+                * Skip locked pages during the first two passes to give the
+                * functions holding the lock time to release the page. Later we
+                * use lock_page() to have a higher chance of acquiring the
+                * lock.
+                */
+               rc = -EAGAIN;
+               if (pass > 2)
+                       lock_page(page);
+               else
+                       if (TestSetPageLocked(page))
+                               goto next;
+
+               /*
+                * Only wait on writeback if we have already done a pass where
+                * we we may have triggered writeouts for lots of pages.
+                */
+               if (pass > 0) {
+                       wait_on_page_writeback(page);
+               } else {
+                       if (PageWriteback(page))
+                               goto unlock_page;
+               }
+
+               /*
+                * Anonymous pages must have swap cache references otherwise
+                * the information contained in the page maps cannot be
+                * preserved.
+                */
+               if (PageAnon(page) && !PageSwapCache(page)) {
+                       if (!add_to_swap(page, GFP_KERNEL)) {
+                               rc = -ENOMEM;
+                               goto unlock_page;
+                       }
+               }
+
+               /*
+                * Page is properly locked and writeback is complete.
+                * Try to migrate the page.
+                */
+               rc = swap_page(page);
+               goto next;
+
+unlock_page:
+               unlock_page(page);
+
+next:
+               if (rc == -EAGAIN) {
+                       retry++;
+               } else if (rc) {
+                       /* Permanent failure */
+                       list_move(&page->lru, failed);
+                       nr_failed++;
+               } else {
+                       /* Success */
+                       list_move(&page->lru, moved);
                }
        }
+       if (retry && pass++ < 10)
+               goto redo;
 
-       *scanned = scan;
-       return nr_taken;
+       if (!swapwrite)
+               current->flags &= ~PF_SWAPWRITE;
+
+       return nr_failed + retry;
 }
 
 static void lru_add_drain_per_cpu(void *dummy)
@@ -653,6 +801,54 @@ redo:
        }
        return rc;
 }
+#endif
+
+/*
+ * zone->lru_lock is heavily contended.  Some of the functions that
+ * shrink the lists perform better by taking out a batch of pages
+ * and working on them outside the LRU lock.
+ *
+ * For pagecache intensive workloads, this function is the hottest
+ * spot in the kernel (apart from copy_*_user functions).
+ *
+ * Appropriate locks must be held before calling this function.
+ *
+ * @nr_to_scan:        The number of pages to look through on the list.
+ * @src:       The LRU list to pull pages off.
+ * @dst:       The temp list to put pages on to.
+ * @scanned:   The number of pages that were scanned.
+ *
+ * returns how many pages were moved onto *@dst.
+ */
+static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
+                            struct list_head *dst, int *scanned)
+{
+       int nr_taken = 0;
+       struct page *page;
+       int scan = 0;
+
+       while (scan++ < nr_to_scan && !list_empty(src)) {
+               page = lru_to_page(src);
+               prefetchw_prev_lru_page(page, src, flags);
+
+               switch (__isolate_lru_page(page)) {
+               case 1:
+                       /* Succeeded to isolate page */
+                       list_move(&page->lru, dst);
+                       nr_taken++;
+                       break;
+               case -ENOENT:
+                       /* Not possible to isolate */
+                       list_move(&page->lru, src);
+                       break;
+               default:
+                       BUG();
+               }
+       }
+
+       *scanned = scan;
+       return nr_taken;
+}
 
 /*
  * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
@@ -719,40 +915,6 @@ done:
        pagevec_release(&pvec);
 }
 
-static inline void move_to_lru(struct page *page)
-{
-       list_del(&page->lru);
-       if (PageActive(page)) {
-               /*
-                * lru_cache_add_active checks that
-                * the PG_active bit is off.
-                */
-               ClearPageActive(page);
-               lru_cache_add_active(page);
-       } else {
-               lru_cache_add(page);
-       }
-       put_page(page);
-}
-
-/*
- * Add isolated pages on the list back to the LRU
- *
- * returns the number of pages put back.
- */
-int putback_lru_pages(struct list_head *l)
-{
-       struct page *page;
-       struct page *page2;
-       int count = 0;
-
-       list_for_each_entry_safe(page, page2, l, lru) {
-               move_to_lru(page);
-               count++;
-       }
-       return count;
-}
-
 /*
  * This moves pages from the active list to the inactive list.
  *
@@ -1299,7 +1461,7 @@ static int kswapd(void *p)
         * us from recursively trying to free more memory as we're
         * trying to free the first piece of memory in the first place).
         */
-       tsk->flags |= PF_MEMALLOC|PF_KSWAPD;
+       tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
 
        order = 0;
        for ( ; ; ) {