#include <linux/backing-dev.h>
#include <linux/memcontrol.h>
+#include "internal.h"
+
/* How many pages do we try to swap or page in/out together? */
int page_cluster;
spin_unlock_irq(&zone->lru_lock);
}
+/**
+ * lru_cache_add_active_or_unevictable
+ * @page: the page to be added to LRU
+ * @vma: vma in which page is mapped for determining reclaimability
+ *
+ * place @page on active or unevictable LRU list, depending on
+ * page_evictable(). Note that if the page is not evictable,
+ * it goes directly back onto it's zone's unevictable list. It does
+ * NOT use a per cpu pagevec.
+ */
+void lru_cache_add_active_or_unevictable(struct page *page,
+ struct vm_area_struct *vma)
+{
+ if (page_evictable(page, vma))
+ lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page));
+ else
+ add_page_to_unevictable_list(page);
+}
+
/*
* Drain pages out of the cpu's pagevecs.
* Either "cpu" is the current CPU, and preemption has already been
put_cpu();
}
-#ifdef CONFIG_NUMA
static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
lru_add_drain();
return schedule_on_each_cpu(lru_add_drain_per_cpu);
}
-#else
-
-/*
- * Returns 0 for success
- */
-int lru_add_drain_all(void)
-{
- lru_add_drain();
- return 0;
-}
-#endif
-
/*
* Batched page_cache_release(). Decrement the reference count on all the
* passed pages. If it fell to zero then remove the page from the LRU and
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
struct zone *pagezone = page_zone(page);
+ int file;
if (pagezone != zone) {
if (zone)
VM_BUG_ON(PageUnevictable(page));
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
- if (is_active_lru(lru))
+ file = is_file_lru(lru);
+ zone->recent_scanned[file]++;
+ if (is_active_lru(lru)) {
SetPageActive(page);
+ zone->recent_rotated[file]++;
+ }
add_page_to_lru_list(zone, page, lru);
}
if (zone)