* the page.
*/
if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
- return 1;
+ return -EAGAIN;
/*
* Establish swap ptes for anonymous pages or destroy pte
* If the page was not migrated then the PageSwapCache bit
* is still set and the operation may continue.
*/
- try_to_unmap(page, 1);
+ if (try_to_unmap(page, 1) == SWAP_FAIL)
+ /* A vma has VM_LOCKED set -> Permanent failure */
+ return -EPERM;
/*
* Give up if we were unable to remove all mappings.
*/
if (page_mapcount(page))
- return 1;
+ return -EAGAIN;
write_lock_irq(&mapping->tree_lock);
if (!page_mapping(page) || page_count(page) != nr_refs ||
*radix_pointer != page) {
write_unlock_irq(&mapping->tree_lock);
- return 1;
+ return -EAGAIN;
}
/*
*/
int migrate_page(struct page *newpage, struct page *page)
{
+ int rc;
+
BUG_ON(PageWriteback(page)); /* Writeback must be complete */
- if (migrate_page_remove_references(newpage, page, 2))
- return -EAGAIN;
+ rc = migrate_page_remove_references(newpage, page, 2);
+
+ if (rc)
+ return rc;
migrate_page_copy(newpage, page);
struct page *page;
struct pagevec pvec;
int reclaim_mapped = 0;
- long mapped_ratio;
- long distress;
- long swap_tendency;
+
+ if (unlikely(sc->may_swap)) {
+ long mapped_ratio;
+ long distress;
+ long swap_tendency;
+
+ /*
+ * `distress' is a measure of how much trouble we're having
+ * reclaiming pages. 0 -> no problems. 100 -> great trouble.
+ */
+ distress = 100 >> zone->prev_priority;
+
+ /*
+ * The point of this algorithm is to decide when to start
+ * reclaiming mapped memory instead of just pagecache. Work out
+ * how much memory
+ * is mapped.
+ */
+ mapped_ratio = (sc->nr_mapped * 100) / total_memory;
+
+ /*
+ * Now decide how much we really want to unmap some pages. The
+ * mapped ratio is downgraded - just because there's a lot of
+ * mapped memory doesn't necessarily mean that page reclaim
+ * isn't succeeding.
+ *
+ * The distress ratio is important - we don't want to start
+ * going oom.
+ *
+ * A 100% value of vm_swappiness overrides this algorithm
+ * altogether.
+ */
+ swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
+
+ /*
+ * Now use this metric to decide whether to start moving mapped
+ * memory onto the inactive list.
+ */
+ if (swap_tendency >= 100)
+ reclaim_mapped = 1;
+ }
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
zone->nr_active -= pgmoved;
spin_unlock_irq(&zone->lru_lock);
- /*
- * `distress' is a measure of how much trouble we're having reclaiming
- * pages. 0 -> no problems. 100 -> great trouble.
- */
- distress = 100 >> zone->prev_priority;
-
- /*
- * The point of this algorithm is to decide when to start reclaiming
- * mapped memory instead of just pagecache. Work out how much memory
- * is mapped.
- */
- mapped_ratio = (sc->nr_mapped * 100) / total_memory;
-
- /*
- * Now decide how much we really want to unmap some pages. The mapped
- * ratio is downgraded - just because there's a lot of mapped memory
- * doesn't necessarily mean that page reclaim isn't succeeding.
- *
- * The distress ratio is important - we don't want to start going oom.
- *
- * A 100% value of vm_swappiness overrides this algorithm altogether.
- */
- swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
-
- /*
- * Now use this metric to decide whether to start moving mapped memory
- * onto the inactive list.
- */
- if (swap_tendency >= 100 && sc->may_swap)
- reclaim_mapped = 1;
-
while (!list_empty(&l_hold)) {
cond_resched();
page = lru_to_page(&l_hold);
sc.nr_reclaimed = 0;
sc.priority = priority;
sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX;
- atomic_inc(&zone->reclaim_in_progress);
shrink_zone(zone, &sc);
- atomic_dec(&zone->reclaim_in_progress);
reclaim_state->reclaimed_slab = 0;
nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
lru_pages);
if (!(gfp_mask & __GFP_WAIT) ||
zone->all_unreclaimable ||
- atomic_read(&zone->reclaim_in_progress) > 0)
+ atomic_read(&zone->reclaim_in_progress) > 0 ||
+ (p->flags & PF_MEMALLOC))
return 0;
node_id = zone->zone_pgdat->node_id;
sc.swap_cluster_max = SWAP_CLUSTER_MAX;
cond_resched();
- p->flags |= PF_MEMALLOC;
+ /*
+ * We need to be able to allocate from the reserves for RECLAIM_SWAP
+ * and we also need to be able to write out pages for RECLAIM_WRITE
+ * and RECLAIM_SWAP.
+ */
+ p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
* a long time.
*/
shrink_slab(sc.nr_scanned, gfp_mask, order);
- sc.nr_reclaimed = 1; /* Avoid getting the off node timeout */
}
p->reclaim_state = NULL;
- current->flags &= ~PF_MEMALLOC;
+ current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
if (sc.nr_reclaimed == 0)
zone->last_unsuccessful_zone_reclaim = jiffies;