int order;
- /*
- * Pages that have (or should have) IO pending. If we run into
- * a lot of these, we're better off waiting a little for IO to
- * finish rather than scanning more pages in the VM.
- */
- int nr_io_pages;
-
/* Which cgroup do we reclaim from */
struct mem_cgroup *mem_cgroup;
*/
if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
wait_on_page_writeback(page);
- else {
- sc->nr_io_pages++;
+ else
goto keep_locked;
- }
}
referenced = page_referenced(page, 1, sc->mem_cgroup);
if (PageDirty(page)) {
if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
goto keep_locked;
- if (!may_enter_fs) {
- sc->nr_io_pages++;
+ if (!may_enter_fs)
goto keep_locked;
- }
if (!sc->may_writepage)
goto keep_locked;
case PAGE_ACTIVATE:
goto activate_locked;
case PAGE_SUCCESS:
- if (PageWriteback(page) || PageDirty(page)) {
- sc->nr_io_pages++;
+ if (PageWriteback(page) || PageDirty(page))
goto keep;
- }
/*
* A synchronous write - probably a ramdisk. Go
* ahead and try to reclaim the page.
* If a zone is deemed to be full of pinned pages then just give it a light
* scan then give up on it.
*/
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
{
+ enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
unsigned long nr_reclaimed = 0;
- int i;
-
+ struct zoneref *z;
+ struct zone *zone;
sc->all_unreclaimable = 1;
- for (i = 0; zones[i] != NULL; i++) {
- struct zone *zone = zones[i];
-
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
if (!populated_zone(zone))
continue;
/*
* hope that some of these pages can be written. But if the allocating task
* holds filesystem locks which prevent writeout this might not work, and the
* allocation attempt will fail.
+ *
+ * returns: 0, if no pages reclaimed
+ * else, the number of pages reclaimed
*/
-static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
- struct scan_control *sc)
+static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+ struct scan_control *sc)
{
int priority;
int ret = 0;
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long lru_pages = 0;
- int i;
+ struct zoneref *z;
+ struct zone *zone;
+ enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
if (scan_global_lru(sc))
count_vm_event(ALLOCSTALL);
* mem_cgroup will not do shrink_slab.
*/
if (scan_global_lru(sc)) {
- for (i = 0; zones[i] != NULL; i++) {
- struct zone *zone = zones[i];
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
sc->nr_scanned = 0;
- sc->nr_io_pages = 0;
if (!priority)
disable_swap_token();
- nr_reclaimed += shrink_zones(priority, zones, sc);
+ nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
* Don't shrink slabs when reclaiming memory from
* over limit cgroups
*/
if (scan_global_lru(sc)) {
- shrink_slab(sc->nr_scanned, gfp_mask, lru_pages);
+ shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
if (reclaim_state) {
nr_reclaimed += reclaim_state->reclaimed_slab;
reclaim_state->reclaimed_slab = 0;
}
total_scanned += sc->nr_scanned;
if (nr_reclaimed >= sc->swap_cluster_max) {
- ret = 1;
+ ret = nr_reclaimed;
goto out;
}
}
/* Take a nap, wait for some writeback to complete */
- if (sc->nr_scanned && priority < DEF_PRIORITY - 2 &&
- sc->nr_io_pages > sc->swap_cluster_max)
+ if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
congestion_wait(WRITE, HZ/10);
}
/* top priority shrink_caches still had more to do? don't OOM, then */
if (!sc->all_unreclaimable && scan_global_lru(sc))
- ret = 1;
+ ret = nr_reclaimed;
out:
/*
* Now that we've scanned all the zones at this priority level, note
priority = 0;
if (scan_global_lru(sc)) {
- for (i = 0; zones[i] != NULL; i++) {
- struct zone *zone = zones[i];
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
return ret;
}
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+ gfp_t gfp_mask)
{
struct scan_control sc = {
.gfp_mask = gfp_mask,
.isolate_pages = isolate_pages_global,
};
- return do_try_to_free_pages(zones, gfp_mask, &sc);
+ return do_try_to_free_pages(zonelist, &sc);
}
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
gfp_t gfp_mask)
{
struct scan_control sc = {
- .gfp_mask = gfp_mask,
.may_writepage = !laptop_mode,
.may_swap = 1,
.swap_cluster_max = SWAP_CLUSTER_MAX,
.mem_cgroup = mem_cont,
.isolate_pages = mem_cgroup_isolate_pages,
};
- struct zone **zones;
- int target_zone = gfp_zone(GFP_HIGHUSER_MOVABLE);
+ struct zonelist *zonelist;
- zones = NODE_DATA(numa_node_id())->node_zonelists[target_zone].zones;
- if (do_try_to_free_pages(zones, sc.gfp_mask, &sc))
- return 1;
- return 0;
+ sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
+ (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
+ zonelist = NODE_DATA(numa_node_id())->node_zonelists;
+ return do_try_to_free_pages(zonelist, &sc);
}
#endif
if (!priority)
disable_swap_token();
- sc.nr_io_pages = 0;
all_zones_ok = 1;
/*
* OK, kswapd is getting into trouble. Take a nap, then take
* another pass across the zones.
*/
- if (total_scanned && priority < DEF_PRIORITY - 2 &&
- sc.nr_io_pages > sc.swap_cluster_max)
+ if (total_scanned && priority < DEF_PRIORITY - 2)
congestion_wait(WRITE, HZ/10);
/*
struct reclaim_state reclaim_state = {
.reclaimed_slab = 0,
};
- cpumask_t cpumask;
+ node_to_cpumask_ptr(cpumask, pgdat->node_id);
- cpumask = node_to_cpumask(pgdat->node_id);
- if (!cpus_empty(cpumask))
- set_cpus_allowed(tsk, cpumask);
+ if (!cpus_empty(*cpumask))
+ set_cpus_allowed_ptr(tsk, cpumask);
current->reclaim_state = &reclaim_state;
/*
static int __devinit cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
- pg_data_t *pgdat;
- cpumask_t mask;
int nid;
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
for_each_node_state(nid, N_HIGH_MEMORY) {
- pgdat = NODE_DATA(nid);
- mask = node_to_cpumask(pgdat->node_id);
- if (any_online_cpu(mask) != NR_CPUS)
+ pg_data_t *pgdat = NODE_DATA(nid);
+ node_to_cpumask_ptr(mask, pgdat->node_id);
+
+ if (any_online_cpu(*mask) < nr_cpu_ids)
/* One of our CPUs online: restore mask */
- set_cpus_allowed(pgdat->kswapd, mask);
+ set_cpus_allowed_ptr(pgdat->kswapd, mask);
}
}
return NOTIFY_OK;