]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/memcontrol.c
x86: add PAT related debug prints
[linux-2.6-omap-h63xx.git] / mm / memcontrol.c
index dcbe30aad1da8dc322eab57d4f43cf9c3c37187e..2e0bfc93484b327557e6b88f509476c4f837ac8f 100644 (file)
@@ -353,7 +353,6 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
 void mem_cgroup_move_lists(struct page *page, bool active)
 {
        struct page_cgroup *pc;
-       struct mem_cgroup *mem;
        struct mem_cgroup_per_zone *mz;
        unsigned long flags;
 
@@ -367,35 +366,14 @@ void mem_cgroup_move_lists(struct page *page, bool active)
        if (!try_lock_page_cgroup(page))
                return;
 
-       /*
-        * Now page_cgroup is stable, but we cannot acquire mz->lru_lock
-        * while holding it, because mem_cgroup_force_empty_list does the
-        * reverse.  Get a hold on the mem_cgroup before unlocking, so that
-        * the zoneinfo remains stable, then take mz->lru_lock; then check
-        * that page still points to pc and pc (even if freed and reassigned
-        * to that same page meanwhile) still points to the same mem_cgroup.
-        * Then we know mz still points to the right spinlock, so it's safe
-        * to move_lists (page->page_cgroup might be reset while we do so, but
-        * that doesn't matter: pc->page is stable till we drop mz->lru_lock).
-        * We're being a little naughty not to try_lock_page_cgroup again
-        * inside there, but we are safe, aren't we?  Aren't we?  Whistle...
-        */
        pc = page_get_page_cgroup(page);
        if (pc) {
-               mem = pc->mem_cgroup;
                mz = page_cgroup_zoneinfo(pc);
-               css_get(&mem->css);
-
-               unlock_page_cgroup(page);
-
                spin_lock_irqsave(&mz->lru_lock, flags);
-               if (page_get_page_cgroup(page) == pc && pc->mem_cgroup == mem)
-                       __mem_cgroup_move_lists(pc, active);
+               __mem_cgroup_move_lists(pc, active);
                spin_unlock_irqrestore(&mz->lru_lock, flags);
-
-               css_put(&mem->css);
-       } else
-               unlock_page_cgroup(page);
+       }
+       unlock_page_cgroup(page);
 }
 
 /*
@@ -555,6 +533,9 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
        unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
        struct mem_cgroup_per_zone *mz;
 
+       if (mem_cgroup_subsys.disabled)
+               return 0;
+
        /*
         * Should page_cgroup's go to their own slab?
         * One could optimize the performance of the charging routine
@@ -645,13 +626,13 @@ retry:
                goto retry;
        }
        page_assign_page_cgroup(page, pc);
-       unlock_page_cgroup(page);
 
        mz = page_cgroup_zoneinfo(pc);
        spin_lock_irqsave(&mz->lru_lock, flags);
        __mem_cgroup_add_list(pc);
        spin_unlock_irqrestore(&mz->lru_lock, flags);
 
+       unlock_page_cgroup(page);
 done:
        return 0;
 out:
@@ -687,6 +668,9 @@ void mem_cgroup_uncharge_page(struct page *page)
        struct mem_cgroup_per_zone *mz;
        unsigned long flags;
 
+       if (mem_cgroup_subsys.disabled)
+               return;
+
        /*
         * Check if our page_cgroup is valid
         */
@@ -699,14 +683,14 @@ void mem_cgroup_uncharge_page(struct page *page)
        VM_BUG_ON(pc->ref_cnt <= 0);
 
        if (--(pc->ref_cnt) == 0) {
-               page_assign_page_cgroup(page, NULL);
-               unlock_page_cgroup(page);
-
                mz = page_cgroup_zoneinfo(pc);
                spin_lock_irqsave(&mz->lru_lock, flags);
                __mem_cgroup_remove_list(pc);
                spin_unlock_irqrestore(&mz->lru_lock, flags);
 
+               page_assign_page_cgroup(page, NULL);
+               unlock_page_cgroup(page);
+
                mem = pc->mem_cgroup;
                res_counter_uncharge(&mem->res, PAGE_SIZE);
                css_put(&mem->css);
@@ -727,6 +711,9 @@ int mem_cgroup_prepare_migration(struct page *page)
 {
        struct page_cgroup *pc;
 
+       if (mem_cgroup_subsys.disabled)
+               return 0;
+
        lock_page_cgroup(page);
        pc = page_get_page_cgroup(page);
        if (pc)
@@ -758,23 +745,24 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage)
                return;
        }
 
-       page_assign_page_cgroup(page, NULL);
-       unlock_page_cgroup(page);
-
        mz = page_cgroup_zoneinfo(pc);
        spin_lock_irqsave(&mz->lru_lock, flags);
        __mem_cgroup_remove_list(pc);
        spin_unlock_irqrestore(&mz->lru_lock, flags);
 
+       page_assign_page_cgroup(page, NULL);
+       unlock_page_cgroup(page);
+
        pc->page = newpage;
        lock_page_cgroup(newpage);
        page_assign_page_cgroup(newpage, pc);
-       unlock_page_cgroup(newpage);
 
        mz = page_cgroup_zoneinfo(pc);
        spin_lock_irqsave(&mz->lru_lock, flags);
        __mem_cgroup_add_list(pc);
        spin_unlock_irqrestore(&mz->lru_lock, flags);
+
+       unlock_page_cgroup(newpage);
 }
 
 /*
@@ -789,7 +777,7 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
 {
        struct page_cgroup *pc;
        struct page *page;
-       int count;
+       int count = FORCE_UNCHARGE_BATCH;
        unsigned long flags;
        struct list_head *list;
 
@@ -798,35 +786,21 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
        else
                list = &mz->inactive_list;
 
-       if (list_empty(list))
-               return;
-retry:
-       count = FORCE_UNCHARGE_BATCH;
        spin_lock_irqsave(&mz->lru_lock, flags);
-
-       while (--count && !list_empty(list)) {
+       while (!list_empty(list)) {
                pc = list_entry(list->prev, struct page_cgroup, lru);
                page = pc->page;
-               lock_page_cgroup(page);
-               if (page_get_page_cgroup(page) == pc) {
-                       page_assign_page_cgroup(page, NULL);
-                       unlock_page_cgroup(page);
-                       __mem_cgroup_remove_list(pc);
-                       res_counter_uncharge(&mem->res, PAGE_SIZE);
-                       css_put(&mem->css);
-                       kfree(pc);
-               } else {
-                       /* racing uncharge: let page go then retry */
-                       unlock_page_cgroup(page);
-                       break;
+               get_page(page);
+               spin_unlock_irqrestore(&mz->lru_lock, flags);
+               mem_cgroup_uncharge_page(page);
+               put_page(page);
+               if (--count <= 0) {
+                       count = FORCE_UNCHARGE_BATCH;
+                       cond_resched();
                }
+               spin_lock_irqsave(&mz->lru_lock, flags);
        }
-
        spin_unlock_irqrestore(&mz->lru_lock, flags);
-       if (!list_empty(list)) {
-               cond_resched();
-               goto retry;
-       }
 }
 
 /*
@@ -838,6 +812,9 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
        int ret = -EBUSY;
        int node, zid;
 
+       if (mem_cgroup_subsys.disabled)
+               return 0;
+
        css_get(&mem->css);
        /*
         * page reclaim code (kswapd etc..) will move pages between
@@ -1001,7 +978,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
 {
        struct mem_cgroup_per_node *pn;
        struct mem_cgroup_per_zone *mz;
-       int zone;
+       int zone, tmp = node;
        /*
         * This routine is called against possible nodes.
         * But it's BUG to call kmalloc() against offline node.
@@ -1010,10 +987,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
         *       never be onlined. It's better to use memory hotplug callback
         *       function.
         */
-       if (node_state(node, N_HIGH_MEMORY))
-               pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node);
-       else
-               pn = kmalloc(sizeof(*pn), GFP_KERNEL);
+       if (!node_state(node, N_NORMAL_MEMORY))
+               tmp = -1;
+       pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
        if (!pn)
                return 1;
 
@@ -1088,6 +1064,8 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
 static int mem_cgroup_populate(struct cgroup_subsys *ss,
                                struct cgroup *cont)
 {
+       if (mem_cgroup_subsys.disabled)
+               return 0;
        return cgroup_add_files(cont, ss, mem_cgroup_files,
                                        ARRAY_SIZE(mem_cgroup_files));
 }
@@ -1100,6 +1078,9 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
        struct mm_struct *mm;
        struct mem_cgroup *mem, *old_mem;
 
+       if (mem_cgroup_subsys.disabled)
+               return;
+
        mm = get_task_mm(p);
        if (mm == NULL)
                return;
@@ -1114,7 +1095,7 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
         * Only thread group leaders are allowed to migrate, the mm_struct is
         * in effect owned by the leader
         */
-       if (p->tgid != p->pid)
+       if (!thread_group_leader(p))
                goto out;
 
        css_get(&mem->css);