If we add a failing stub for add_to_swap(), then we can remove the #ifdef
CONFIG_SWAP from mm/vmscan.c.
This was intended as a source cleanup, but looking more closely, it turns
out that the !CONFIG_SWAP case was going to keep_locked for an anonymous
page, whereas now it goes to the more suitable activate_locked, like the
CONFIG_SWAP nr_swap_pages 0 case.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Robin Holt <holt@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
        return NULL;
 }
 
+static inline int add_to_swap(struct page *page)
+{
+       return 0;
+}
+
 static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
                                                        gfp_t gfp_mask)
 {
 
                                        referenced && page_mapping_inuse(page))
                        goto activate_locked;
 
-#ifdef CONFIG_SWAP
                /*
                 * Anonymous process memory has backing store?
                 * Try to allocate it some swap space here.
                                goto activate_locked;
                        may_enter_fs = 1;
                }
-#endif /* CONFIG_SWAP */
 
                mapping = page_mapping(page);