X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=mm%2Fmemcontrol.c;h=631002d085d1374dfdfe734f761aaf3e6a65dde3;hb=3b091cd4941912081730ffa17948da6d148c822d;hp=1637575d33390e44a48739731a094c51ed340c1e;hpb=6d12e2d8ddbe653d80ea4f71578481c1bc933025;p=linux-2.6-omap-h63xx.git diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1637575d333..631002d085d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -89,6 +89,12 @@ enum mem_cgroup_zstat_index { }; struct mem_cgroup_per_zone { + /* + * spin_lock to protect the per cgroup LRU + */ + spinlock_t lru_lock; + struct list_head active_list; + struct list_head inactive_list; unsigned long count[NR_MEM_CGROUP_ZSTAT]; }; /* Macro for accessing counter */ @@ -122,16 +128,10 @@ struct mem_cgroup { /* * Per cgroup active and inactive list, similar to the * per zone LRU lists. - * TODO: Consider making these lists per zone */ - struct list_head active_list; - struct list_head inactive_list; struct mem_cgroup_lru_info info; - /* - * spin_lock to protect the per cgroup LRU - */ - spinlock_t lru_lock; - unsigned long control_type; /* control RSS or RSS+Pagecache */ + + int prev_priority; /* for recording reclaim priority */ /* * statistics. */ @@ -365,10 +365,10 @@ static void __mem_cgroup_add_list(struct page_cgroup *pc) if (!to) { MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; - list_add(&pc->lru, &pc->mem_cgroup->inactive_list); + list_add(&pc->lru, &mz->inactive_list); } else { MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; - list_add(&pc->lru, &pc->mem_cgroup->active_list); + list_add(&pc->lru, &mz->active_list); } mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true); } @@ -386,11 +386,11 @@ static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) if (active) { MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; pc->flags |= PAGE_CGROUP_FLAG_ACTIVE; - list_move(&pc->lru, &pc->mem_cgroup->active_list); + list_move(&pc->lru, &mz->active_list); } else { MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE; - list_move(&pc->lru, &pc->mem_cgroup->inactive_list); + list_move(&pc->lru, &mz->inactive_list); } } @@ -399,7 +399,7 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) int ret; task_lock(task); - ret = task->mm && mm_cgroup(task->mm) == mem; + ret = task->mm && vm_match_cgroup(task->mm, mem); task_unlock(task); return ret; } @@ -409,15 +409,99 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) */ void mem_cgroup_move_lists(struct page_cgroup *pc, bool active) { - struct mem_cgroup *mem; + struct mem_cgroup_per_zone *mz; + unsigned long flags; + if (!pc) return; - mem = pc->mem_cgroup; - - spin_lock(&mem->lru_lock); + mz = page_cgroup_zoneinfo(pc); + spin_lock_irqsave(&mz->lru_lock, flags); __mem_cgroup_move_lists(pc, active); - spin_unlock(&mem->lru_lock); + spin_unlock_irqrestore(&mz->lru_lock, flags); +} + +/* + * Calculate mapped_ratio under memory controller. This will be used in + * vmscan.c for deteremining we have to reclaim mapped pages. + */ +int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) +{ + long total, rss; + + /* + * usage is recorded in bytes. But, here, we assume the number of + * physical pages can be represented by "long" on any arch. + */ + total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L; + rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); + return (int)((rss * 100L) / total); +} +/* + * This function is called from vmscan.c. In page reclaiming loop. balance + * between active and inactive list is calculated. For memory controller + * page reclaiming, we should use using mem_cgroup's imbalance rather than + * zone's global lru imbalance. + */ +long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem) +{ + unsigned long active, inactive; + /* active and inactive are the number of pages. 'long' is ok.*/ + active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE); + inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE); + return (long) (active / (inactive + 1)); +} + +/* + * prev_priority control...this will be used in memory reclaim path. + */ +int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) +{ + return mem->prev_priority; +} + +void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority) +{ + if (priority < mem->prev_priority) + mem->prev_priority = priority; +} + +void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority) +{ + mem->prev_priority = priority; +} + +/* + * Calculate # of pages to be scanned in this priority/zone. + * See also vmscan.c + * + * priority starts from "DEF_PRIORITY" and decremented in each loop. + * (see include/linux/mmzone.h) + */ + +long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem, + struct zone *zone, int priority) +{ + long nr_active; + int nid = zone->zone_pgdat->node_id; + int zid = zone_idx(zone); + struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); + + nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE); + return (nr_active >> priority); +} + +long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem, + struct zone *zone, int priority) +{ + long nr_inactive; + int nid = zone->zone_pgdat->node_id; + int zid = zone_idx(zone); + struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); + + nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE); + + return (nr_inactive >> priority); } unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, @@ -433,19 +517,23 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, LIST_HEAD(pc_list); struct list_head *src; struct page_cgroup *pc, *tmp; + int nid = z->zone_pgdat->node_id; + int zid = zone_idx(z); + struct mem_cgroup_per_zone *mz; + mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); if (active) - src = &mem_cont->active_list; + src = &mz->active_list; else - src = &mem_cont->inactive_list; + src = &mz->inactive_list; - spin_lock(&mem_cont->lru_lock); + + spin_lock(&mz->lru_lock); scan = 0; list_for_each_entry_safe_reverse(pc, tmp, src, lru) { if (scan >= nr_to_scan) break; page = pc->page; - VM_BUG_ON(!pc); if (unlikely(!PageLRU(page))) continue; @@ -459,13 +547,6 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, continue; } - /* - * Reclaim, per zone - * TODO: make the active/inactive lists per zone - */ - if (page_zone(page) != z) - continue; - scan++; list_move(&pc->lru, &pc_list); @@ -476,7 +557,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, } list_splice(&pc_list, src); - spin_unlock(&mem_cont->lru_lock); + spin_unlock(&mz->lru_lock); *scanned = scan; return nr_taken; @@ -495,6 +576,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, struct page_cgroup *pc; unsigned long flags; unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; + struct mem_cgroup_per_zone *mz; /* * Should page_cgroup's go to their own slab? @@ -596,10 +678,11 @@ retry: goto retry; } - spin_lock_irqsave(&mem->lru_lock, flags); + mz = page_cgroup_zoneinfo(pc); + spin_lock_irqsave(&mz->lru_lock, flags); /* Update statistics vector */ __mem_cgroup_add_list(pc); - spin_unlock_irqrestore(&mem->lru_lock, flags); + spin_unlock_irqrestore(&mz->lru_lock, flags); done: return 0; @@ -624,56 +707,59 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { int ret = 0; - struct mem_cgroup *mem; if (!mm) mm = &init_mm; - rcu_read_lock(); - mem = rcu_dereference(mm->mem_cgroup); - css_get(&mem->css); - rcu_read_unlock(); - if (mem->control_type == MEM_CGROUP_TYPE_ALL) - ret = mem_cgroup_charge_common(page, mm, gfp_mask, + ret = mem_cgroup_charge_common(page, mm, gfp_mask, MEM_CGROUP_CHARGE_TYPE_CACHE); - css_put(&mem->css); return ret; } /* * Uncharging is always a welcome operation, we never complain, simply - * uncharge. + * uncharge. This routine should be called with lock_page_cgroup held */ void mem_cgroup_uncharge(struct page_cgroup *pc) { struct mem_cgroup *mem; + struct mem_cgroup_per_zone *mz; struct page *page; unsigned long flags; /* - * This can handle cases when a page is not charged at all and we - * are switching between handling the control_type. + * Check if our page_cgroup is valid */ if (!pc) return; if (atomic_dec_and_test(&pc->ref_cnt)) { page = pc->page; + mz = page_cgroup_zoneinfo(pc); /* * get page->cgroup and clear it under lock. * force_empty can drop page->cgroup without checking refcnt. */ + unlock_page_cgroup(page); if (clear_page_cgroup(page, pc) == pc) { mem = pc->mem_cgroup; css_put(&mem->css); res_counter_uncharge(&mem->res, PAGE_SIZE); - spin_lock_irqsave(&mem->lru_lock, flags); + spin_lock_irqsave(&mz->lru_lock, flags); __mem_cgroup_remove_list(pc); - spin_unlock_irqrestore(&mem->lru_lock, flags); + spin_unlock_irqrestore(&mz->lru_lock, flags); kfree(pc); } + lock_page_cgroup(page); } } +void mem_cgroup_uncharge_page(struct page *page) +{ + lock_page_cgroup(page); + mem_cgroup_uncharge(page_get_page_cgroup(page)); + unlock_page_cgroup(page); +} + /* * Returns non-zero if a page (under migration) has valid page_cgroup member. * Refcnt of page_cgroup is incremented. @@ -693,8 +779,12 @@ int mem_cgroup_prepare_migration(struct page *page) void mem_cgroup_end_migration(struct page *page) { - struct page_cgroup *pc = page_get_page_cgroup(page); + struct page_cgroup *pc; + + lock_page_cgroup(page); + pc = page_get_page_cgroup(page); mem_cgroup_uncharge(pc); + unlock_page_cgroup(page); } /* * We know both *page* and *newpage* are now not-on-LRU and Pg_locked. @@ -707,24 +797,29 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage) struct page_cgroup *pc; struct mem_cgroup *mem; unsigned long flags; + struct mem_cgroup_per_zone *mz; retry: pc = page_get_page_cgroup(page); if (!pc) return; mem = pc->mem_cgroup; + mz = page_cgroup_zoneinfo(pc); if (clear_page_cgroup(page, pc) != pc) goto retry; - - spin_lock_irqsave(&mem->lru_lock, flags); + spin_lock_irqsave(&mz->lru_lock, flags); __mem_cgroup_remove_list(pc); + spin_unlock_irqrestore(&mz->lru_lock, flags); + pc->page = newpage; lock_page_cgroup(newpage); page_assign_page_cgroup(newpage, pc); unlock_page_cgroup(newpage); - __mem_cgroup_add_list(pc); - spin_unlock_irqrestore(&mem->lru_lock, flags); + mz = page_cgroup_zoneinfo(pc); + spin_lock_irqsave(&mz->lru_lock, flags); + __mem_cgroup_add_list(pc); + spin_unlock_irqrestore(&mz->lru_lock, flags); return; } @@ -735,16 +830,26 @@ retry: */ #define FORCE_UNCHARGE_BATCH (128) static void -mem_cgroup_force_empty_list(struct mem_cgroup *mem, struct list_head *list) +mem_cgroup_force_empty_list(struct mem_cgroup *mem, + struct mem_cgroup_per_zone *mz, + int active) { struct page_cgroup *pc; struct page *page; int count; unsigned long flags; + struct list_head *list; + + if (active) + list = &mz->active_list; + else + list = &mz->inactive_list; + if (list_empty(list)) + return; retry: count = FORCE_UNCHARGE_BATCH; - spin_lock_irqsave(&mem->lru_lock, flags); + spin_lock_irqsave(&mz->lru_lock, flags); while (--count && !list_empty(list)) { pc = list_entry(list->prev, struct page_cgroup, lru); @@ -759,7 +864,7 @@ retry: } else /* being uncharged ? ...do relax */ break; } - spin_unlock_irqrestore(&mem->lru_lock, flags); + spin_unlock_irqrestore(&mz->lru_lock, flags); if (!list_empty(list)) { cond_resched(); goto retry; @@ -775,20 +880,25 @@ retry: int mem_cgroup_force_empty(struct mem_cgroup *mem) { int ret = -EBUSY; + int node, zid; css_get(&mem->css); /* * page reclaim code (kswapd etc..) will move pages between ` * active_list <-> inactive_list while we don't take a lock. * So, we have to do loop here until all lists are empty. */ - while (!(list_empty(&mem->active_list) && - list_empty(&mem->inactive_list))) { + while (mem->res.usage > 0) { if (atomic_read(&mem->css.cgroup->count) > 0) goto out; - /* drop all page_cgroup in active_list */ - mem_cgroup_force_empty_list(mem, &mem->active_list); - /* drop all page_cgroup in inactive_list */ - mem_cgroup_force_empty_list(mem, &mem->inactive_list); + for_each_node_state(node, N_POSSIBLE) + for (zid = 0; zid < MAX_NR_ZONES; zid++) { + struct mem_cgroup_per_zone *mz; + mz = mem_cgroup_zoneinfo(mem, node, zid); + /* drop all page_cgroup in active_list */ + mem_cgroup_force_empty_list(mem, mz, 1); + /* drop all page_cgroup in inactive_list */ + mem_cgroup_force_empty_list(mem, mz, 0); + } } ret = 0; out: @@ -829,61 +939,6 @@ static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft, mem_cgroup_write_strategy); } -static ssize_t mem_control_type_write(struct cgroup *cont, - struct cftype *cft, struct file *file, - const char __user *userbuf, - size_t nbytes, loff_t *pos) -{ - int ret; - char *buf, *end; - unsigned long tmp; - struct mem_cgroup *mem; - - mem = mem_cgroup_from_cont(cont); - buf = kmalloc(nbytes + 1, GFP_KERNEL); - ret = -ENOMEM; - if (buf == NULL) - goto out; - - buf[nbytes] = 0; - ret = -EFAULT; - if (copy_from_user(buf, userbuf, nbytes)) - goto out_free; - - ret = -EINVAL; - tmp = simple_strtoul(buf, &end, 10); - if (*end != '\0') - goto out_free; - - if (tmp <= MEM_CGROUP_TYPE_UNSPEC || tmp >= MEM_CGROUP_TYPE_MAX) - goto out_free; - - mem->control_type = tmp; - ret = nbytes; -out_free: - kfree(buf); -out: - return ret; -} - -static ssize_t mem_control_type_read(struct cgroup *cont, - struct cftype *cft, - struct file *file, char __user *userbuf, - size_t nbytes, loff_t *ppos) -{ - unsigned long val; - char buf[64], *s; - struct mem_cgroup *mem; - - mem = mem_cgroup_from_cont(cont); - s = buf; - val = mem->control_type; - s += sprintf(s, "%lu\n", val); - return simple_read_from_buffer((void __user *)userbuf, nbytes, - ppos, buf, s - buf); -} - - static ssize_t mem_force_empty_write(struct cgroup *cont, struct cftype *cft, struct file *file, const char __user *userbuf, @@ -981,11 +1036,6 @@ static struct cftype mem_cgroup_files[] = { .private = RES_FAILCNT, .read = mem_cgroup_read, }, - { - .name = "control_type", - .write = mem_control_type_write, - .read = mem_control_type_read, - }, { .name = "force_empty", .write = mem_force_empty_write, @@ -1000,15 +1050,41 @@ static struct cftype mem_cgroup_files[] = { static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) { struct mem_cgroup_per_node *pn; - - pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node); + struct mem_cgroup_per_zone *mz; + int zone; + /* + * This routine is called against possible nodes. + * But it's BUG to call kmalloc() against offline node. + * + * TODO: this routine can waste much memory for nodes which will + * never be onlined. It's better to use memory hotplug callback + * function. + */ + if (node_state(node, N_HIGH_MEMORY)) + pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node); + else + pn = kmalloc(sizeof(*pn), GFP_KERNEL); if (!pn) return 1; + mem->info.nodeinfo[node] = pn; memset(pn, 0, sizeof(*pn)); + + for (zone = 0; zone < MAX_NR_ZONES; zone++) { + mz = &pn->zoneinfo[zone]; + INIT_LIST_HEAD(&mz->active_list); + INIT_LIST_HEAD(&mz->inactive_list); + spin_lock_init(&mz->lru_lock); + } return 0; } +static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) +{ + kfree(mem->info.nodeinfo[node]); +} + + static struct mem_cgroup init_mem_cgroup; static struct cgroup_subsys_state * @@ -1024,13 +1100,10 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL); if (mem == NULL) - return NULL; + return ERR_PTR(-ENOMEM); res_counter_init(&mem->res); - INIT_LIST_HEAD(&mem->active_list); - INIT_LIST_HEAD(&mem->inactive_list); - spin_lock_init(&mem->lru_lock); - mem->control_type = MEM_CGROUP_TYPE_ALL; + memset(&mem->info, 0, sizeof(mem->info)); for_each_node_state(node, N_POSSIBLE) @@ -1040,10 +1113,10 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) return &mem->css; free_out: for_each_node_state(node, N_POSSIBLE) - kfree(mem->info.nodeinfo[node]); + free_mem_cgroup_per_zone_info(mem, node); if (cont->parent != NULL) kfree(mem); - return NULL; + return ERR_PTR(-ENOMEM); } static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, @@ -1060,7 +1133,7 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss, struct mem_cgroup *mem = mem_cgroup_from_cont(cont); for_each_node_state(node, N_POSSIBLE) - kfree(mem->info.nodeinfo[node]); + free_mem_cgroup_per_zone_info(mem, node); kfree(mem_cgroup_from_cont(cont)); }