X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=mm%2Fvmscan.c;h=1c8e75a1cfcd30dba6a59c84de754f9dd9be8ec6;hb=794543a236074f49a8af89ef08ef6a753e4777e5;hp=e9813b06c7a32f2be3b534722c521457a384e028;hpb=40b20c257a13c5a526ac540bc5e43d0fdf29792a;p=linux-2.6-omap-h63xx.git diff --git a/mm/vmscan.c b/mm/vmscan.c index e9813b06c7a..1c8e75a1cfc 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -284,12 +284,8 @@ static void handle_write_error(struct address_space *mapping, struct page *page, int error) { lock_page(page); - if (page_mapping(page) == mapping) { - if (error == -ENOSPC) - set_bit(AS_ENOSPC, &mapping->flags); - else - set_bit(AS_EIO, &mapping->flags); - } + if (page_mapping(page) == mapping) + mapping_set_error(mapping, error); unlock_page(page); } @@ -679,7 +675,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, nr_taken = isolate_lru_pages(sc->swap_cluster_max, &zone->inactive_list, &page_list, &nr_scan); - zone->nr_inactive -= nr_taken; + __mod_zone_page_state(zone, NR_INACTIVE, -nr_taken); zone->pages_scanned += nr_scan; spin_unlock_irq(&zone->lru_lock); @@ -692,7 +688,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, __count_vm_events(KSWAPD_STEAL, nr_freed); } else __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan); - __count_vm_events(PGACTIVATE, nr_freed); + __count_zone_vm_events(PGSTEAL, zone, nr_freed); if (nr_taken == 0) goto done; @@ -740,7 +736,8 @@ static inline void note_zone_scanning_priority(struct zone *zone, int priority) static inline int zone_is_near_oom(struct zone *zone) { - return zone->pages_scanned >= (zone->nr_active + zone->nr_inactive)*3; + return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE) + + zone_page_state(zone, NR_INACTIVE))*3; } /* @@ -825,7 +822,7 @@ force_reclaim_mapped: pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, &l_hold, &pgscanned); zone->pages_scanned += pgscanned; - zone->nr_active -= pgmoved; + __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved); spin_unlock_irq(&zone->lru_lock); while (!list_empty(&l_hold)) { @@ -857,7 +854,7 @@ force_reclaim_mapped: list_move(&page->lru, &zone->inactive_list); pgmoved++; if (!pagevec_add(&pvec, page)) { - zone->nr_inactive += pgmoved; + __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); spin_unlock_irq(&zone->lru_lock); pgdeactivate += pgmoved; pgmoved = 0; @@ -867,7 +864,7 @@ force_reclaim_mapped: spin_lock_irq(&zone->lru_lock); } } - zone->nr_inactive += pgmoved; + __mod_zone_page_state(zone, NR_INACTIVE, pgmoved); pgdeactivate += pgmoved; if (buffer_heads_over_limit) { spin_unlock_irq(&zone->lru_lock); @@ -885,14 +882,14 @@ force_reclaim_mapped: list_move(&page->lru, &zone->active_list); pgmoved++; if (!pagevec_add(&pvec, page)) { - zone->nr_active += pgmoved; + __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); pgmoved = 0; spin_unlock_irq(&zone->lru_lock); __pagevec_release(&pvec); spin_lock_irq(&zone->lru_lock); } } - zone->nr_active += pgmoved; + __mod_zone_page_state(zone, NR_ACTIVE, pgmoved); __count_zone_vm_events(PGREFILL, zone, pgscanned); __count_vm_events(PGDEACTIVATE, pgdeactivate); @@ -918,14 +915,16 @@ static unsigned long shrink_zone(int priority, struct zone *zone, * Add one to `nr_to_scan' just to make sure that the kernel will * slowly sift through the active list. */ - zone->nr_scan_active += (zone->nr_active >> priority) + 1; + zone->nr_scan_active += + (zone_page_state(zone, NR_ACTIVE) >> priority) + 1; nr_active = zone->nr_scan_active; if (nr_active >= sc->swap_cluster_max) zone->nr_scan_active = 0; else nr_active = 0; - zone->nr_scan_inactive += (zone->nr_inactive >> priority) + 1; + zone->nr_scan_inactive += + (zone_page_state(zone, NR_INACTIVE) >> priority) + 1; nr_inactive = zone->nr_scan_inactive; if (nr_inactive >= sc->swap_cluster_max) zone->nr_scan_inactive = 0; @@ -949,7 +948,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone, } } - throttle_vm_writeout(); + throttle_vm_writeout(sc->gfp_mask); atomic_dec(&zone->reclaim_in_progress); return nr_reclaimed; @@ -1037,7 +1036,8 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) continue; - lru_pages += zone->nr_active + zone->nr_inactive; + lru_pages += zone_page_state(zone, NR_ACTIVE) + + zone_page_state(zone, NR_INACTIVE); } for (priority = DEF_PRIORITY; priority >= 0; priority--) { @@ -1182,7 +1182,8 @@ loop_again: for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; - lru_pages += zone->nr_active + zone->nr_inactive; + lru_pages += zone_page_state(zone, NR_ACTIVE) + + zone_page_state(zone, NR_INACTIVE); } /* @@ -1219,8 +1220,9 @@ loop_again: if (zone->all_unreclaimable) continue; if (nr_slab == 0 && zone->pages_scanned >= - (zone->nr_active + zone->nr_inactive) * 6) - zone->all_unreclaimable = 1; + (zone_page_state(zone, NR_ACTIVE) + + zone_page_state(zone, NR_INACTIVE)) * 6) + zone->all_unreclaimable = 1; /* * If we've done a decent amount of scanning and * the reclaim ratio is low, start doing writepage @@ -1317,8 +1319,6 @@ static int kswapd(void *p) for ( ; ; ) { unsigned long new_order; - try_to_freeze(); - prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); new_order = pgdat->kswapd_max_order; pgdat->kswapd_max_order = 0; @@ -1329,12 +1329,19 @@ static int kswapd(void *p) */ order = new_order; } else { - schedule(); + if (!freezing(current)) + schedule(); + order = pgdat->kswapd_max_order; } finish_wait(&pgdat->kswapd_wait, &wait); - balance_pgdat(pgdat, order); + if (!try_to_freeze()) { + /* We can speed up thawing tasks if we don't call + * balance_pgdat after returning from the refrigerator + */ + balance_pgdat(pgdat, order); + } } return 0; } @@ -1369,8 +1376,8 @@ void wakeup_kswapd(struct zone *zone, int order) * * For pass > 3 we also try to shrink the LRU lists that contain a few pages */ -static unsigned long shrink_all_zones(unsigned long nr_pages, int pass, - int prio, struct scan_control *sc) +static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, + int pass, struct scan_control *sc) { struct zone *zone; unsigned long nr_to_scan, ret = 0; @@ -1385,18 +1392,22 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int pass, /* For pass = 0 we don't shrink the active list */ if (pass > 0) { - zone->nr_scan_active += (zone->nr_active >> prio) + 1; + zone->nr_scan_active += + (zone_page_state(zone, NR_ACTIVE) >> prio) + 1; if (zone->nr_scan_active >= nr_pages || pass > 3) { zone->nr_scan_active = 0; - nr_to_scan = min(nr_pages, zone->nr_active); + nr_to_scan = min(nr_pages, + zone_page_state(zone, NR_ACTIVE)); shrink_active_list(nr_to_scan, zone, sc, prio); } } - zone->nr_scan_inactive += (zone->nr_inactive >> prio) + 1; + zone->nr_scan_inactive += + (zone_page_state(zone, NR_INACTIVE) >> prio) + 1; if (zone->nr_scan_inactive >= nr_pages || pass > 3) { zone->nr_scan_inactive = 0; - nr_to_scan = min(nr_pages, zone->nr_inactive); + nr_to_scan = min(nr_pages, + zone_page_state(zone, NR_INACTIVE)); ret += shrink_inactive_list(nr_to_scan, zone, sc); if (ret >= nr_pages) return ret; @@ -1406,6 +1417,11 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int pass, return ret; } +static unsigned long count_lru_pages(void) +{ + return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE); +} + /* * Try to free `nr_pages' of memory, system-wide, and return the number of * freed pages. @@ -1420,7 +1436,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages) unsigned long ret = 0; int pass; struct reclaim_state reclaim_state; - struct zone *zone; struct scan_control sc = { .gfp_mask = GFP_KERNEL, .may_swap = 0, @@ -1431,10 +1446,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages) current->reclaim_state = &reclaim_state; - lru_pages = 0; - for_each_zone(zone) - lru_pages += zone->nr_active + zone->nr_inactive; - + lru_pages = count_lru_pages(); nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); /* If slab caches are huge, it's better to hit them first */ while (nr_slab >= lru_pages) { @@ -1461,13 +1473,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages) for (pass = 0; pass < 5; pass++) { int prio; - /* Needed for shrinking slab caches later on */ - if (!lru_pages) - for_each_zone(zone) { - lru_pages += zone->nr_active; - lru_pages += zone->nr_inactive; - } - /* Force reclaiming mapped pages in the passes #3 and #4 */ if (pass > 2) { sc.may_swap = 1; @@ -1483,7 +1488,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages) goto out; reclaim_state.reclaimed_slab = 0; - shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages); + shrink_slab(sc.nr_scanned, sc.gfp_mask, + count_lru_pages()); ret += reclaim_state.reclaimed_slab; if (ret >= nr_pages) goto out; @@ -1491,20 +1497,19 @@ unsigned long shrink_all_memory(unsigned long nr_pages) if (sc.nr_scanned && prio < DEF_PRIORITY - 2) congestion_wait(WRITE, HZ / 10); } - - lru_pages = 0; } /* * If ret = 0, we could not shrink LRUs, but there may be something * in slab caches */ - if (!ret) + if (!ret) { do { reclaim_state.reclaimed_slab = 0; - shrink_slab(nr_pages, sc.gfp_mask, lru_pages); + shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages()); ret += reclaim_state.reclaimed_slab; } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); + } out: current->reclaim_state = NULL;