/* Incremented by the number of inactive pages that were scanned */
        unsigned long nr_scanned;
 
+       /* Number of pages freed so far during a call to shrink_zones() */
+       unsigned long nr_reclaimed;
+
        /* This context's GFP mask */
        gfp_t gfp_mask;
 
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
-static unsigned long shrink_zone(int priority, struct zone *zone,
+static void shrink_zone(int priority, struct zone *zone,
                                struct scan_control *sc)
 {
        unsigned long nr[NR_LRU_LISTS];
        unsigned long nr_to_scan;
-       unsigned long nr_reclaimed = 0;
        unsigned long percent[2];       /* anon @ 0; file @ 1 */
        enum lru_list l;
 
                                        (unsigned long)sc->swap_cluster_max);
                                nr[l] -= nr_to_scan;
 
-                               nr_reclaimed += shrink_list(l, nr_to_scan,
+                               sc->nr_reclaimed += shrink_list(l, nr_to_scan,
                                                        zone, sc, priority);
                        }
                }
+               /*
+                * On large memory systems, scan >> priority can become
+                * really large. This is fine for the starting priority;
+                * we want to put equal scanning pressure on each zone.
+                * However, if the VM has a harder time of freeing pages,
+                * with multiple processes reclaiming pages, the total
+                * freeing target can get unreasonably large.
+                */
+               if (sc->nr_reclaimed > sc->swap_cluster_max &&
+                       priority < DEF_PRIORITY && !current_is_kswapd())
+                       break;
        }
 
        /*
                shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
 
        throttle_vm_writeout(sc->gfp_mask);
-       return nr_reclaimed;
 }
 
 /*
  * b) The zones may be over pages_high but they must go *over* pages_high to
  *    satisfy the `incremental min' zone defense algorithm.
  *
- * Returns the number of reclaimed pages.
- *
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
+static void shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
-       unsigned long nr_reclaimed = 0;
        struct zoneref *z;
        struct zone *zone;
 
                                                        priority);
                }
 
-               nr_reclaimed += shrink_zone(priority, zone, sc);
+               shrink_zone(priority, zone, sc);
        }
-
-       return nr_reclaimed;
 }
 
 /*
        int priority;
        unsigned long ret = 0;
        unsigned long total_scanned = 0;
-       unsigned long nr_reclaimed = 0;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        unsigned long lru_pages = 0;
        struct zoneref *z;
                sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token();
-               nr_reclaimed += shrink_zones(priority, zonelist, sc);
+               shrink_zones(priority, zonelist, sc);
                /*
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
                if (scan_global_lru(sc)) {
                        shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
                        if (reclaim_state) {
-                               nr_reclaimed += reclaim_state->reclaimed_slab;
+                               sc->nr_reclaimed += reclaim_state->reclaimed_slab;
                                reclaim_state->reclaimed_slab = 0;
                        }
                }
                total_scanned += sc->nr_scanned;
-               if (nr_reclaimed >= sc->swap_cluster_max) {
-                       ret = nr_reclaimed;
+               if (sc->nr_reclaimed >= sc->swap_cluster_max) {
+                       ret = sc->nr_reclaimed;
                        goto out;
                }
 
        }
        /* top priority shrink_zones still had more to do? don't OOM, then */
        if (!sc->all_unreclaimable && scan_global_lru(sc))
-               ret = nr_reclaimed;
+               ret = sc->nr_reclaimed;
 out:
        /*
         * Now that we've scanned all the zones at this priority level, note
        int priority;
        int i;
        unsigned long total_scanned;
-       unsigned long nr_reclaimed;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
 
 loop_again:
        total_scanned = 0;
-       nr_reclaimed = 0;
+       sc.nr_reclaimed = 0;
        sc.may_writepage = !laptop_mode;
        count_vm_event(PAGEOUTRUN);
 
                         */
                        if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
                                                end_zone, 0))
-                               nr_reclaimed += shrink_zone(priority, zone, &sc);
+                               shrink_zone(priority, zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
                        nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
                                                lru_pages);
-                       nr_reclaimed += reclaim_state->reclaimed_slab;
+                       sc.nr_reclaimed += reclaim_state->reclaimed_slab;
                        total_scanned += sc.nr_scanned;
                        if (zone_is_all_unreclaimable(zone))
                                continue;
                         * even in laptop mode
                         */
                        if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
-                           total_scanned > nr_reclaimed + nr_reclaimed / 2)
+                           total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
                                sc.may_writepage = 1;
                }
                if (all_zones_ok)
                 * matches the direct reclaim path behaviour in terms of impact
                 * on zone->*_priority.
                 */
-               if (nr_reclaimed >= SWAP_CLUSTER_MAX)
+               if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
                        break;
        }
 out:
                goto loop_again;
        }
 
-       return nr_reclaimed;
+       return sc.nr_reclaimed;
 }
 
 /*
        struct task_struct *p = current;
        struct reclaim_state reclaim_state;
        int priority;
-       unsigned long nr_reclaimed = 0;
        struct scan_control sc = {
                .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
                .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
                priority = ZONE_RECLAIM_PRIORITY;
                do {
                        note_zone_scanning_priority(zone, priority);
-                       nr_reclaimed += shrink_zone(priority, zone, &sc);
+                       shrink_zone(priority, zone, &sc);
                        priority--;
-               } while (priority >= 0 && nr_reclaimed < nr_pages);
+               } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
        }
 
        slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
                 * Update nr_reclaimed by the number of slab pages we
                 * reclaimed from this zone.
                 */
-               nr_reclaimed += slab_reclaimable -
+               sc.nr_reclaimed += slab_reclaimable -
                        zone_page_state(zone, NR_SLAB_RECLAIMABLE);
        }
 
        p->reclaim_state = NULL;
        current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
-       return nr_reclaimed >= nr_pages;
+       return sc.nr_reclaimed >= nr_pages;
 }
 
 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)