X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=mm%2Fvmscan.c;h=b0af7593d01e315a83c79ec6841c9a4a3b91c1e1;hb=7bb3b137abf2b7073e683c14cfe062d811d35247;hp=5e98b86feb74d816c316f383fde56d42f01ba1f0;hpb=a3351e525e4768c29aa5d22ef59b5b38e0361e53;p=linux-2.6-omap-h63xx.git diff --git a/mm/vmscan.c b/mm/vmscan.c index 5e98b86feb7..b0af7593d01 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -443,6 +443,10 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) BUG_ON(PageActive(page)); sc->nr_scanned++; + + if (!sc->may_swap && page_mapped(page)) + goto keep_locked; + /* Double the slab pressure for mapped and swapcache pages */ if (page_mapped(page) || PageSwapCache(page)) sc->nr_scanned++; @@ -614,6 +618,15 @@ int putback_lru_pages(struct list_head *l) return count; } +/* + * Non migratable page + */ +int fail_migrate_page(struct page *newpage, struct page *page) +{ + return -EIO; +} +EXPORT_SYMBOL(fail_migrate_page); + /* * swapout a single page * page is locked upon entry, unlocked on exit @@ -623,7 +636,7 @@ static int swap_page(struct page *page) struct address_space *mapping = page_mapping(page); if (page_mapped(page) && mapping) - if (try_to_unmap(page, 0) != SWAP_SUCCESS) + if (try_to_unmap(page, 1) != SWAP_SUCCESS) goto unlock_retry; if (PageDirty(page)) { @@ -659,6 +672,7 @@ unlock_retry: retry: return -EAGAIN; } +EXPORT_SYMBOL(swap_page); /* * Page migration was first developed in the context of the memory hotplug @@ -674,7 +688,7 @@ retry: * Remove references for a page and establish the new page with the correct * basic settings to be able to stop accesses to the page. */ -static int migrate_page_remove_references(struct page *newpage, +int migrate_page_remove_references(struct page *newpage, struct page *page, int nr_refs) { struct address_space *mapping = page_mapping(page); @@ -749,6 +763,7 @@ static int migrate_page_remove_references(struct page *newpage, return 0; } +EXPORT_SYMBOL(migrate_page_remove_references); /* * Copy the page to its new location @@ -788,6 +803,7 @@ void migrate_page_copy(struct page *newpage, struct page *page) if (PageWriteback(newpage)) end_page_writeback(newpage); } +EXPORT_SYMBOL(migrate_page_copy); /* * Common logic to directly migrate a single page suitable for @@ -815,6 +831,7 @@ int migrate_page(struct page *newpage, struct page *page) remove_from_swap(newpage); return 0; } +EXPORT_SYMBOL(migrate_page); /* * migrate_pages @@ -826,7 +843,7 @@ int migrate_page(struct page *newpage, struct page *page) * pages are swapped out. * * The function returns after 10 attempts or if no pages - * are movable anymore because t has become empty + * are movable anymore because to has become empty * or no retryable pages exist anymore. * * Return: Number of pages not migrated when "to" ran empty. @@ -914,8 +931,22 @@ redo: if (!mapping) goto unlock_both; + if (mapping->a_ops->migratepage) { + /* + * Most pages have a mapping and most filesystems + * should provide a migration function. Anonymous + * pages are part of swap space which also has its + * own migration function. This is the most common + * path for page migration. + */ + rc = mapping->a_ops->migratepage(newpage, page); + goto unlock_both; + } + /* - * Trigger writeout if page is dirty + * Default handling if a filesystem does not provide + * a migration function. We can only migrate clean + * pages so try to write out any dirty pages first. */ if (PageDirty(page)) { switch (pageout(page, mapping)) { @@ -931,9 +962,10 @@ redo: ; /* try to migrate the page below */ } } + /* - * If we have no buffer or can release the buffer - * then do a simple migration. + * Buffers are managed in a filesystem specific way. + * We must have no buffers or drop them. */ if (!page_has_buffers(page) || try_to_release_page(page, GFP_KERNEL)) { @@ -948,6 +980,11 @@ redo: * swap them out. */ if (pass > 4) { + /* + * Persistently unable to drop buffers..... As a + * measure of last resort we fall back to + * swap_page(). + */ unlock_page(newpage); newpage = NULL; rc = swap_page(page); @@ -1158,9 +1195,47 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) struct page *page; struct pagevec pvec; int reclaim_mapped = 0; - long mapped_ratio; - long distress; - long swap_tendency; + + if (unlikely(sc->may_swap)) { + long mapped_ratio; + long distress; + long swap_tendency; + + /* + * `distress' is a measure of how much trouble we're having + * reclaiming pages. 0 -> no problems. 100 -> great trouble. + */ + distress = 100 >> zone->prev_priority; + + /* + * The point of this algorithm is to decide when to start + * reclaiming mapped memory instead of just pagecache. Work out + * how much memory + * is mapped. + */ + mapped_ratio = (sc->nr_mapped * 100) / total_memory; + + /* + * Now decide how much we really want to unmap some pages. The + * mapped ratio is downgraded - just because there's a lot of + * mapped memory doesn't necessarily mean that page reclaim + * isn't succeeding. + * + * The distress ratio is important - we don't want to start + * going oom. + * + * A 100% value of vm_swappiness overrides this algorithm + * altogether. + */ + swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; + + /* + * Now use this metric to decide whether to start moving mapped + * memory onto the inactive list. + */ + if (swap_tendency >= 100) + reclaim_mapped = 1; + } lru_add_drain(); spin_lock_irq(&zone->lru_lock); @@ -1170,37 +1245,6 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) zone->nr_active -= pgmoved; spin_unlock_irq(&zone->lru_lock); - /* - * `distress' is a measure of how much trouble we're having reclaiming - * pages. 0 -> no problems. 100 -> great trouble. - */ - distress = 100 >> zone->prev_priority; - - /* - * The point of this algorithm is to decide when to start reclaiming - * mapped memory instead of just pagecache. Work out how much memory - * is mapped. - */ - mapped_ratio = (sc->nr_mapped * 100) / total_memory; - - /* - * Now decide how much we really want to unmap some pages. The mapped - * ratio is downgraded - just because there's a lot of mapped memory - * doesn't necessarily mean that page reclaim isn't succeeding. - * - * The distress ratio is important - we don't want to start going oom. - * - * A 100% value of vm_swappiness overrides this algorithm altogether. - */ - swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; - - /* - * Now use this metric to decide whether to start moving mapped memory - * onto the inactive list. - */ - if (swap_tendency >= 100) - reclaim_mapped = 1; - while (!list_empty(&l_hold)) { cond_resched(); page = lru_to_page(&l_hold); @@ -1577,9 +1621,7 @@ scan: sc.nr_reclaimed = 0; sc.priority = priority; sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; - atomic_inc(&zone->reclaim_in_progress); shrink_zone(zone, &sc); - atomic_dec(&zone->reclaim_in_progress); reclaim_state->reclaimed_slab = 0; nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages); @@ -1866,7 +1908,12 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) sc.swap_cluster_max = SWAP_CLUSTER_MAX; cond_resched(); - p->flags |= PF_MEMALLOC; + /* + * We need to be able to allocate from the reserves for RECLAIM_SWAP + * and we also need to be able to write out pages for RECLAIM_WRITE + * and RECLAIM_SWAP. + */ + p->flags |= PF_MEMALLOC | PF_SWAPWRITE; reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; @@ -1890,11 +1937,10 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) * a long time. */ shrink_slab(sc.nr_scanned, gfp_mask, order); - sc.nr_reclaimed = 1; /* Avoid getting the off node timeout */ } p->reclaim_state = NULL; - current->flags &= ~PF_MEMALLOC; + current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); if (sc.nr_reclaimed == 0) zone->last_unsuccessful_zone_reclaim = jiffies;