]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/swap_state.c
Slab allocators: fail if ksize is called with a NULL parameter
[linux-2.6-omap-h63xx.git] / mm / swap_state.c
index db8a3d3e163651c7c3baa85cd1c82434db177ae8..b52635601dfe0054ea4a4eddbba6317b71e93272 100644 (file)
 #include <linux/buffer_head.h>
 #include <linux/backing-dev.h>
 #include <linux/pagevec.h>
+#include <linux/migrate.h>
 
 #include <asm/pgtable.h>
 
 /*
  * swapper_space is a fiction, retained to simplify the path through
- * vmscan's shrink_list, to make sync_page look nicer, and to allow
+ * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
  * future use of radix_tree tags in the swap cache.
  */
-static struct address_space_operations swap_aops = {
+static const struct address_space_operations swap_aops = {
        .writepage      = swap_writepage,
        .sync_page      = block_sync_page,
        .set_page_dirty = __set_page_dirty_nobuffers,
@@ -37,7 +38,7 @@ static struct backing_dev_info swap_backing_dev_info = {
 
 struct address_space swapper_space = {
        .page_tree      = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
-       .tree_lock      = RW_LOCK_UNLOCKED,
+       .tree_lock      = __RW_LOCK_UNLOCKED(swapper_space.tree_lock),
        .a_ops          = &swap_aops,
        .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
        .backing_dev_info = &swap_backing_dev_info,
@@ -73,6 +74,7 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
 {
        int error;
 
+       BUG_ON(!PageLocked(page));
        BUG_ON(PageSwapCache(page));
        BUG_ON(PagePrivate(page));
        error = radix_tree_preload(gfp_mask);
@@ -82,11 +84,10 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
                                                entry.val, page);
                if (!error) {
                        page_cache_get(page);
-                       SetPageLocked(page);
                        SetPageSwapCache(page);
                        set_page_private(page, entry.val);
                        total_swapcache_pages++;
-                       pagecache_acct(1);
+                       __inc_zone_page_state(page, NR_FILE_PAGES);
                }
                write_unlock_irq(&swapper_space.tree_lock);
                radix_tree_preload_end();
@@ -98,15 +99,18 @@ static int add_to_swap_cache(struct page *page, swp_entry_t entry)
 {
        int error;
 
+       BUG_ON(PageLocked(page));
        if (!swap_duplicate(entry)) {
                INC_CACHE_INFO(noent_race);
                return -ENOENT;
        }
+       SetPageLocked(page);
        error = __add_to_swap_cache(page, entry, GFP_KERNEL);
        /*
         * Anon pages are already on the LRU, we don't run lru_cache_add here.
         */
        if (error) {
+               ClearPageLocked(page);
                swap_free(entry);
                if (error == -EEXIST)
                        INC_CACHE_INFO(exist_race);
@@ -131,7 +135,7 @@ void __delete_from_swap_cache(struct page *page)
        set_page_private(page, 0);
        ClearPageSwapCache(page);
        total_swapcache_pages--;
-       pagecache_acct(-1);
+       __dec_zone_page_state(page, NR_FILE_PAGES);
        INC_CACHE_INFO(del_total);
 }
 
@@ -147,8 +151,7 @@ int add_to_swap(struct page * page, gfp_t gfp_mask)
        swp_entry_t entry;
        int err;
 
-       if (!PageLocked(page))
-               BUG();
+       BUG_ON(!PageLocked(page));
 
        for (;;) {
                entry = get_swap_page();
@@ -334,7 +337,8 @@ struct page *read_swap_cache_async(swp_entry_t entry,
                 * Get a new page to read into from swap.
                 */
                if (!new_page) {
-                       new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
+                       new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
+                                                               vma, addr);
                        if (!new_page)
                                break;          /* Out of memory */
                }