gfp_t gfp_mask);
 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                        gfp_t gfp_mask);
+extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru);
 extern void mem_cgroup_uncharge_page(struct page *page);
 extern void mem_cgroup_uncharge_cache_page(struct page *page);
-extern void mem_cgroup_move_lists(struct page *page, bool active);
 extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask);
 
 extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
 
        enum lru_list l = LRU_BASE;
 
        list_del(&page->lru);
-       if (PageActive(page)) {
-               __ClearPageActive(page);
-               l += LRU_ACTIVE;
+       if (PageUnevictable(page)) {
+               __ClearPageUnevictable(page);
+               l = LRU_UNEVICTABLE;
+       } else {
+               if (PageActive(page)) {
+                       __ClearPageActive(page);
+                       l += LRU_ACTIVE;
+               }
+               l += page_is_file_cache(page);
        }
-       l += page_is_file_cache(page);
        __dec_zone_state(zone, NR_LRU_BASE + l);
 }
 
 {
        enum lru_list lru = LRU_BASE;
 
-       if (PageActive(page))
-               lru += LRU_ACTIVE;
-       lru += page_is_file_cache(page);
+       if (PageUnevictable(page))
+               lru = LRU_UNEVICTABLE;
+       else {
+               if (PageActive(page))
+                       lru += LRU_ACTIVE;
+               lru += page_is_file_cache(page);
+       }
 
        return lru;
 }
 
        NR_ACTIVE_ANON,         /*  "     "     "   "       "         */
        NR_INACTIVE_FILE,       /*  "     "     "   "       "         */
        NR_ACTIVE_FILE,         /*  "     "     "   "       "         */
+#ifdef CONFIG_UNEVICTABLE_LRU
+       NR_UNEVICTABLE,         /*  "     "     "   "       "         */
+#else
+       NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */
+#endif
        NR_ANON_PAGES,  /* Mapped anonymous pages */
        NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
                           only modified from process context */
        LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
        LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
        LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
-       NR_LRU_LISTS };
+#ifdef CONFIG_UNEVICTABLE_LRU
+       LRU_UNEVICTABLE,
+#else
+       LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */
+#endif
+       NR_LRU_LISTS
+};
 
 #define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)
 
+#define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++)
+
 static inline int is_file_lru(enum lru_list l)
 {
        return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
        return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
 }
 
+static inline int is_unevictable_lru(enum lru_list l)
+{
+#ifdef CONFIG_UNEVICTABLE_LRU
+       return (l == LRU_UNEVICTABLE);
+#else
+       return 0;
+#endif
+}
+
 struct per_cpu_pages {
        int count;              /* number of pages in the list */
        int high;               /* high watermark, emptying needed */
 
        PG_reclaim,             /* To be reclaimed asap */
        PG_buddy,               /* Page is free, on buddy lists */
        PG_swapbacked,          /* Page is backed by RAM/swap */
+#ifdef CONFIG_UNEVICTABLE_LRU
+       PG_unevictable,         /* Page is "unevictable"  */
+#endif
 #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
        PG_uncached,            /* Page has been mapped as uncached */
 #endif
 PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
 PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
 PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
+       TESTCLEARFLAG(Active, active)
 __PAGEFLAG(Slab, slab)
 PAGEFLAG(Checked, checked)             /* Used by some filesystems */
 PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned)    /* Xen */
 PAGEFLAG_FALSE(SwapCache)
 #endif
 
+#ifdef CONFIG_UNEVICTABLE_LRU
+PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
+       TESTCLEARFLAG(Unevictable, unevictable)
+#else
+PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
+       SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
+       __CLEARPAGEFLAG_NOOP(Unevictable)
+#endif
+
 #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
 PAGEFLAG(Uncached, uncached)
 #else
 
 #endif /* !PAGEFLAGS_EXTENDED */
 
+#ifdef CONFIG_UNEVICTABLE_LRU
+#define __PG_UNEVICTABLE (1 << PG_unevictable)
+#else
+#define __PG_UNEVICTABLE 0
+#endif
+
 #define PAGE_FLAGS     (1 << PG_lru   | 1 << PG_private   | 1 << PG_locked | \
                         1 << PG_buddy | 1 << PG_writeback | \
-                        1 << PG_slab  | 1 << PG_swapcache | 1 << PG_active)
+                        1 << PG_slab  | 1 << PG_swapcache | 1 << PG_active | \
+                        __PG_UNEVICTABLE)
 
 /*
  * Flags checked in bad_page().  Pages on the free list should not have
 
        ____pagevec_lru_add(pvec, LRU_ACTIVE_FILE);
 }
 
-
 static inline void pagevec_lru_add_file(struct pagevec *pvec)
 {
        if (pagevec_count(pvec))
 
 extern void rotate_reclaimable_page(struct page *page);
 extern void swap_setup(void);
 
+extern void add_page_to_unevictable_list(struct page *page);
+
 /**
  * lru_cache_add: add a page to the page lists
  * @page: the page to add
 }
 #endif
 
+#ifdef CONFIG_UNEVICTABLE_LRU
+extern int page_evictable(struct page *page, struct vm_area_struct *vma);
+#else
+static inline int page_evictable(struct page *page,
+                                               struct vm_area_struct *vma)
+{
+       return 1;
+}
+#endif
+
 extern int kswapd_run(int nid);
 
 #ifdef CONFIG_MMU
 
        def_bool y
        depends on !ARCH_NO_VIRT_TO_BUS
 
+config UNEVICTABLE_LRU
+       bool "Add LRU list to track non-evictable pages"
+       default y
+       depends on MMU
+       help
+         Keeps unevictable pages off of the active and inactive pageout
+         lists, so kswapd will not waste CPU time or have its balancing
+         algorithms thrown off by scanning these pages.  Selecting this
+         will use one page flag and increase the code size a little,
+         say Y unless you know what you are doing.
+
 config MMU_NOTIFIER
        bool
 
        atomic_dec(&page->_count);
 }
 
+/*
+ * in mm/vmscan.c:
+ */
 extern int isolate_lru_page(struct page *page);
+extern void putback_lru_page(struct page *page);
 
+/*
+ * in mm/page_alloc.c
+ */
 extern void __free_pages_bootmem(struct page *page, unsigned int order);
 
 /*
        return page_private(page);
 }
 
+#ifdef CONFIG_UNEVICTABLE_LRU
+/*
+ * unevictable_migrate_page() called only from migrate_page_copy() to
+ * migrate unevictable flag to new page.
+ * Note that the old page has been isolated from the LRU lists at this
+ * point so we don't need to worry about LRU statistics.
+ */
+static inline void unevictable_migrate_page(struct page *new, struct page *old)
+{
+       if (TestClearPageUnevictable(old))
+               SetPageUnevictable(new);
+}
+#else
+static inline void unevictable_migrate_page(struct page *new, struct page *old)
+{
+}
+#endif
+
+
 /*
  * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
  * so all functions starting at paging_init should be marked __init
 
        struct mem_cgroup *mem_cgroup;
        int flags;
 };
-#define PAGE_CGROUP_FLAG_CACHE (0x1)   /* charged as cache */
-#define PAGE_CGROUP_FLAG_ACTIVE (0x2)  /* page is active in this cgroup */
-#define PAGE_CGROUP_FLAG_FILE  (0x4)   /* page is file system backed */
+#define PAGE_CGROUP_FLAG_CACHE    (0x1)        /* charged as cache */
+#define PAGE_CGROUP_FLAG_ACTIVE    (0x2)       /* page is active in this cgroup */
+#define PAGE_CGROUP_FLAG_FILE     (0x4)        /* page is file system backed */
+#define PAGE_CGROUP_FLAG_UNEVICTABLE (0x8)     /* page is unevictableable */
 
 static int page_cgroup_nid(struct page_cgroup *pc)
 {
 {
        int lru = LRU_BASE;
 
-       if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE)
-               lru += LRU_ACTIVE;
-       if (pc->flags & PAGE_CGROUP_FLAG_FILE)
-               lru += LRU_FILE;
+       if (pc->flags & PAGE_CGROUP_FLAG_UNEVICTABLE)
+               lru = LRU_UNEVICTABLE;
+       else {
+               if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE)
+                       lru += LRU_ACTIVE;
+               if (pc->flags & PAGE_CGROUP_FLAG_FILE)
+                       lru += LRU_FILE;
+       }
 
        MEM_CGROUP_ZSTAT(mz, lru) -= 1;
 
 {
        int lru = LRU_BASE;
 
-       if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE)
-               lru += LRU_ACTIVE;
-       if (pc->flags & PAGE_CGROUP_FLAG_FILE)
-               lru += LRU_FILE;
+       if (pc->flags & PAGE_CGROUP_FLAG_UNEVICTABLE)
+               lru = LRU_UNEVICTABLE;
+       else {
+               if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE)
+                       lru += LRU_ACTIVE;
+               if (pc->flags & PAGE_CGROUP_FLAG_FILE)
+                       lru += LRU_FILE;
+       }
 
        MEM_CGROUP_ZSTAT(mz, lru) += 1;
        list_add(&pc->lru, &mz->lists[lru]);
        mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
 }
 
-static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
+static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru)
 {
        struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
-       int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
-       int file = pc->flags & PAGE_CGROUP_FLAG_FILE;
-       int lru = LRU_FILE * !!file + !!from;
+       int active    = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
+       int file      = pc->flags & PAGE_CGROUP_FLAG_FILE;
+       int unevictable = pc->flags & PAGE_CGROUP_FLAG_UNEVICTABLE;
+       enum lru_list from = unevictable ? LRU_UNEVICTABLE :
+                               (LRU_FILE * !!file + !!active);
 
-       MEM_CGROUP_ZSTAT(mz, lru) -= 1;
+       if (lru == from)
+               return;
 
-       if (active)
-               pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
-       else
+       MEM_CGROUP_ZSTAT(mz, from) -= 1;
+
+       if (is_unevictable_lru(lru)) {
                pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
+               pc->flags |= PAGE_CGROUP_FLAG_UNEVICTABLE;
+       } else {
+               if (is_active_lru(lru))
+                       pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
+               else
+                       pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
+               pc->flags &= ~PAGE_CGROUP_FLAG_UNEVICTABLE;
+       }
 
-       lru = LRU_FILE * !!file + !!active;
        MEM_CGROUP_ZSTAT(mz, lru) += 1;
        list_move(&pc->lru, &mz->lists[lru]);
 }
 /*
  * This routine assumes that the appropriate zone's lru lock is already held
  */
-void mem_cgroup_move_lists(struct page *page, bool active)
+void mem_cgroup_move_lists(struct page *page, enum lru_list lru)
 {
        struct page_cgroup *pc;
        struct mem_cgroup_per_zone *mz;
        if (pc) {
                mz = page_cgroup_zoneinfo(pc);
                spin_lock_irqsave(&mz->lru_lock, flags);
-               __mem_cgroup_move_lists(pc, active);
+               __mem_cgroup_move_lists(pc, lru);
                spin_unlock_irqrestore(&mz->lru_lock, flags);
        }
        unlock_page_cgroup(page);
                /*
                 * TODO: play better with lumpy reclaim, grabbing anything.
                 */
-               if (PageActive(page) && !active) {
-                       __mem_cgroup_move_lists(pc, true);
-                       continue;
-               }
-               if (!PageActive(page) && active) {
-                       __mem_cgroup_move_lists(pc, false);
+               if (PageUnevictable(page) ||
+                   (PageActive(page) && !active) ||
+                   (!PageActive(page) && active)) {
+                       __mem_cgroup_move_lists(pc, page_lru(page));
                        continue;
                }
 
 
        if (PageSwapCache(page))
                md->swapcache++;
 
-       if (PageActive(page))
+       if (PageActive(page) || PageUnevictable(page))
                md->active++;
 
        if (PageWriteback(page))
 
        return 0;
 }
 
-static inline void move_to_lru(struct page *page)
-{
-       lru_cache_add_lru(page, page_lru(page));
-       put_page(page);
-}
-
 /*
- * Add isolated pages on the list back to the LRU.
+ * Add isolated pages on the list back to the LRU under page lock
+ * to avoid leaking evictable pages back onto unevictable list.
  *
  * returns the number of pages put back.
  */
 
        list_for_each_entry_safe(page, page2, l, lru) {
                list_del(&page->lru);
-               move_to_lru(page);
+               putback_lru_page(page);
                count++;
        }
        return count;
                SetPageReferenced(newpage);
        if (PageUptodate(page))
                SetPageUptodate(newpage);
-       if (PageActive(page))
+       if (TestClearPageActive(page)) {
+               VM_BUG_ON(PageUnevictable(page));
                SetPageActive(newpage);
+       } else
+               unevictable_migrate_page(newpage, page);
        if (PageChecked(page))
                SetPageChecked(newpage);
        if (PageMappedToDisk(page))
 #ifdef CONFIG_SWAP
        ClearPageSwapCache(page);
 #endif
-       ClearPageActive(page);
        ClearPagePrivate(page);
        set_page_private(page, 0);
        page->mapping = NULL;
  *
  * The new page will have replaced the old page if this function
  * is successful.
+ *
+ * Return value:
+ *   < 0 - error code
+ *  == 0 - success
  */
 static int move_to_new_page(struct page *newpage, struct page *page)
 {
        if (!newpage)
                return -ENOMEM;
 
-       if (page_count(page) == 1)
+       if (page_count(page) == 1) {
                /* page was freed from under us. So we are done. */
                goto move_newpage;
+       }
 
        charge = mem_cgroup_prepare_migration(page, newpage);
        if (charge == -ENOMEM) {
                rcu_read_unlock();
 
 unlock:
-
        unlock_page(page);
 
        if (rc != -EAGAIN) {
                 * restored.
                 */
                list_del(&page->lru);
-               move_to_lru(page);
+               putback_lru_page(page);
        }
 
 move_newpage:
        if (!charge)
                mem_cgroup_end_migration(newpage);
+
        /*
         * Move the new page to the LRU. If migration was not successful
         * then this will free the page.
         */
-       move_to_lru(newpage);
+       putback_lru_page(newpage);
+
        if (result) {
                if (rc)
                        *result = rc;
 
                        zone = pagezone;
                        spin_lock(&zone->lru_lock);
                }
-               if (PageLRU(page) && !PageActive(page)) {
+               if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
                        int lru = page_is_file_cache(page);
                        list_move_tail(&page->lru, &zone->lru[lru].list);
                        pgmoved++;
 void  rotate_reclaimable_page(struct page *page)
 {
        if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
-           PageLRU(page)) {
+           !PageUnevictable(page) && PageLRU(page)) {
                struct pagevec *pvec;
                unsigned long flags;
 
        struct zone *zone = page_zone(page);
 
        spin_lock_irq(&zone->lru_lock);
-       if (PageLRU(page) && !PageActive(page)) {
+       if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
                int file = page_is_file_cache(page);
                int lru = LRU_BASE + file;
                del_page_from_lru_list(zone, page, lru);
                lru += LRU_ACTIVE;
                add_page_to_lru_list(zone, page, lru);
                __count_vm_event(PGACTIVATE);
-               mem_cgroup_move_lists(page, true);
+               mem_cgroup_move_lists(page, lru);
 
                zone->recent_rotated[!!file]++;
                zone->recent_scanned[!!file]++;
  */
 void mark_page_accessed(struct page *page)
 {
-       if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
+       if (!PageActive(page) && !PageUnevictable(page) &&
+                       PageReferenced(page) && PageLRU(page)) {
                activate_page(page);
                ClearPageReferenced(page);
        } else if (!PageReferenced(page)) {
 void lru_cache_add_lru(struct page *page, enum lru_list lru)
 {
        if (PageActive(page)) {
+               VM_BUG_ON(PageUnevictable(page));
                ClearPageActive(page);
+       } else if (PageUnevictable(page)) {
+               VM_BUG_ON(PageActive(page));
+               ClearPageUnevictable(page);
        }
 
-       VM_BUG_ON(PageLRU(page) || PageActive(page));
+       VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
        __lru_cache_add(page, lru);
 }
 
+/**
+ * add_page_to_unevictable_list - add a page to the unevictable list
+ * @page:  the page to be added to the unevictable list
+ *
+ * Add page directly to its zone's unevictable list.  To avoid races with
+ * tasks that might be making the page evictable, through eg. munlock,
+ * munmap or exit, while it's not on the lru, we want to add the page
+ * while it's locked or otherwise "invisible" to other tasks.  This is
+ * difficult to do when using the pagevec cache, so bypass that.
+ */
+void add_page_to_unevictable_list(struct page *page)
+{
+       struct zone *zone = page_zone(page);
+
+       spin_lock_irq(&zone->lru_lock);
+       SetPageUnevictable(page);
+       SetPageLRU(page);
+       add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
+       spin_unlock_irq(&zone->lru_lock);
+}
+
 /*
  * Drain pages out of the cpu's pagevecs.
  * Either "cpu" is the current CPU, and preemption has already been
 
                if (PageLRU(page)) {
                        struct zone *pagezone = page_zone(page);
+
                        if (pagezone != zone) {
                                if (zone)
                                        spin_unlock_irqrestore(&zone->lru_lock,
 {
        int i;
        struct zone *zone = NULL;
+       VM_BUG_ON(is_unevictable_lru(lru));
 
        for (i = 0; i < pagevec_count(pvec); i++) {
                struct page *page = pvec->pages[i];
                        zone = pagezone;
                        spin_lock_irq(&zone->lru_lock);
                }
+               VM_BUG_ON(PageActive(page));
+               VM_BUG_ON(PageUnevictable(page));
                VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
                if (is_active_lru(lru))
 
        return 0;
 }
 
+/**
+ * putback_lru_page - put previously isolated page onto appropriate LRU list
+ * @page: page to be put back to appropriate lru list
+ *
+ * Add previously isolated @page to appropriate LRU list.
+ * Page may still be unevictable for other reasons.
+ *
+ * lru_lock must not be held, interrupts must be enabled.
+ */
+#ifdef CONFIG_UNEVICTABLE_LRU
+void putback_lru_page(struct page *page)
+{
+       int lru;
+       int active = !!TestClearPageActive(page);
+
+       VM_BUG_ON(PageLRU(page));
+
+redo:
+       ClearPageUnevictable(page);
+
+       if (page_evictable(page, NULL)) {
+               /*
+                * For evictable pages, we can use the cache.
+                * In event of a race, worst case is we end up with an
+                * unevictable page on [in]active list.
+                * We know how to handle that.
+                */
+               lru = active + page_is_file_cache(page);
+               lru_cache_add_lru(page, lru);
+       } else {
+               /*
+                * Put unevictable pages directly on zone's unevictable
+                * list.
+                */
+               lru = LRU_UNEVICTABLE;
+               add_page_to_unevictable_list(page);
+       }
+       mem_cgroup_move_lists(page, lru);
+
+       /*
+        * page's status can change while we move it among lru. If an evictable
+        * page is on unevictable list, it never be freed. To avoid that,
+        * check after we added it to the list, again.
+        */
+       if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
+               if (!isolate_lru_page(page)) {
+                       put_page(page);
+                       goto redo;
+               }
+               /* This means someone else dropped this page from LRU
+                * So, it will be freed or putback to LRU again. There is
+                * nothing to do here.
+                */
+       }
+
+       put_page(page);         /* drop ref from isolate */
+}
+
+#else /* CONFIG_UNEVICTABLE_LRU */
+
+void putback_lru_page(struct page *page)
+{
+       int lru;
+       VM_BUG_ON(PageLRU(page));
+
+       lru = !!TestClearPageActive(page) + page_is_file_cache(page);
+       lru_cache_add_lru(page, lru);
+       mem_cgroup_move_lists(page, lru);
+       put_page(page);
+}
+#endif /* CONFIG_UNEVICTABLE_LRU */
+
+
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
 
                sc->nr_scanned++;
 
+               if (unlikely(!page_evictable(page, NULL))) {
+                       unlock_page(page);
+                       putback_lru_page(page);
+                       continue;
+               }
+
                if (!sc->may_swap && page_mapped(page))
                        goto keep_locked;
 
                 * possible for a page to have PageDirty set, but it is actually
                 * clean (all its buffers are clean).  This happens if the
                 * buffers were written out directly, with submit_bh(). ext3
-                * will do this, as well as the blockdev mapping. 
+                * will do this, as well as the blockdev mapping.
                 * try_to_release_page() will discover that cleanness and will
                 * drop the buffers and mark the page clean - it can be freed.
                 *
                /* Not a candidate for swapping, so reclaim swap space. */
                if (PageSwapCache(page) && vm_swap_full())
                        remove_exclusive_swap_page_ref(page);
+               VM_BUG_ON(PageActive(page));
                SetPageActive(page);
                pgactivate++;
 keep_locked:
        if (mode != ISOLATE_BOTH && (!page_is_file_cache(page) != !file))
                return ret;
 
+       /*
+        * When this function is being called for lumpy reclaim, we
+        * initially look into all LRU pages, active, inactive and
+        * unevictable; only give shrink_page_list evictable pages.
+        */
+       if (PageUnevictable(page))
+               return ret;
+
        ret = -EBUSY;
        if (likely(get_page_unless_zero(page))) {
                /*
                                /* else it is being freed elsewhere */
                                list_move(&cursor_page->lru, src);
                        default:
-                               break;
+                               break;  /* ! on LRU or wrong list */
                        }
                }
        }
  * Returns -EBUSY if the page was not on an LRU list.
  *
  * The returned page will have PageLRU() cleared.  If it was found on
- * the active list, it will have PageActive set.  That flag may need
- * to be cleared by the caller before letting the page go.
+ * the active list, it will have PageActive set.  If it was found on
+ * the unevictable list, it will have the PageUnevictable bit set. That flag
+ * may need to be cleared by the caller before letting the page go.
  *
  * The vmstat statistic corresponding to the list on which the page was
  * found will be decremented.
 
                spin_lock_irq(&zone->lru_lock);
                if (PageLRU(page) && get_page_unless_zero(page)) {
-                       int lru = LRU_BASE;
+                       int lru = page_lru(page);
                        ret = 0;
                        ClearPageLRU(page);
 
-                       lru += page_is_file_cache(page) + !!PageActive(page);
                        del_page_from_lru_list(zone, page, lru);
                }
                spin_unlock_irq(&zone->lru_lock);
                 * Put back any unfreeable pages.
                 */
                while (!list_empty(&page_list)) {
+                       int lru;
                        page = lru_to_page(&page_list);
                        VM_BUG_ON(PageLRU(page));
-                       SetPageLRU(page);
                        list_del(&page->lru);
-                       add_page_to_lru_list(zone, page, page_lru(page));
+                       if (unlikely(!page_evictable(page, NULL))) {
+                               spin_unlock_irq(&zone->lru_lock);
+                               putback_lru_page(page);
+                               spin_lock_irq(&zone->lru_lock);
+                               continue;
+                       }
+                       SetPageLRU(page);
+                       lru = page_lru(page);
+                       add_page_to_lru_list(zone, page, lru);
+                       mem_cgroup_move_lists(page, lru);
                        if (PageActive(page) && scan_global_lru(sc)) {
                                int file = !!page_is_file_cache(page);
                                zone->recent_rotated[file]++;
                page = lru_to_page(&l_hold);
                list_del(&page->lru);
 
+               if (unlikely(!page_evictable(page, NULL))) {
+                       putback_lru_page(page);
+                       continue;
+               }
+
                /* page_referenced clears PageReferenced */
                if (page_mapping_inuse(page) &&
                    page_referenced(page, 0, sc->mem_cgroup))
                ClearPageActive(page);
 
                list_move(&page->lru, &zone->lru[lru].list);
-               mem_cgroup_move_lists(page, false);
+               mem_cgroup_move_lists(page, lru);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
                        __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
 
        get_scan_ratio(zone, sc, percent);
 
-       for_each_lru(l) {
+       for_each_evictable_lru(l) {
                if (scan_global_lru(sc)) {
                        int file = is_file_lru(l);
                        int scan;
 
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
                                        nr[LRU_INACTIVE_FILE]) {
-               for_each_lru(l) {
+               for_each_evictable_lru(l) {
                        if (nr[l]) {
                                nr_to_scan = min(nr[l],
                                        (unsigned long)sc->swap_cluster_max);
                if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
                        continue;
 
-               for_each_lru(l) {
-                       /* For pass = 0 we don't shrink the active list */
+               for_each_evictable_lru(l) {
+                       /* For pass = 0, we don't shrink the active list */
                        if (pass == 0 &&
                                (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE))
                                continue;
        return ret;
 }
 #endif
+
+#ifdef CONFIG_UNEVICTABLE_LRU
+/*
+ * page_evictable - test whether a page is evictable
+ * @page: the page to test
+ * @vma: the VMA in which the page is or will be mapped, may be NULL
+ *
+ * Test whether page is evictable--i.e., should be placed on active/inactive
+ * lists vs unevictable list.
+ *
+ * Reasons page might not be evictable:
+ * TODO - later patches
+ */
+int page_evictable(struct page *page, struct vm_area_struct *vma)
+{
+
+       /* TODO:  test page [!]evictable conditions */
+
+       return 1;
+}
+#endif