]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/memcontrol.c
blackfin: use kbuild.h instead of defining macros in asm-macros.c
[linux-2.6-omap-h63xx.git] / mm / memcontrol.c
index dc3472f9f68c54f884ec4b62b39ed0f9a75591ce..33add96cd5fbf98c2f804fb2511aeec3f5a2594d 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/spinlock.h>
 #include <linux/fs.h>
 #include <linux/seq_file.h>
+#include <linux/vmalloc.h>
 
 #include <asm/uaccess.h>
 
@@ -275,10 +276,10 @@ static void unlock_page_cgroup(struct page *page)
        bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
 }
 
-static void __mem_cgroup_remove_list(struct page_cgroup *pc)
+static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
+                       struct page_cgroup *pc)
 {
        int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
-       struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
 
        if (from)
                MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
@@ -289,10 +290,10 @@ static void __mem_cgroup_remove_list(struct page_cgroup *pc)
        list_del_init(&pc->lru);
 }
 
-static void __mem_cgroup_add_list(struct page_cgroup *pc)
+static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
+                               struct page_cgroup *pc)
 {
        int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
-       struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
 
        if (!to) {
                MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
@@ -591,7 +592,6 @@ retry:
                        mem_cgroup_out_of_memory(mem, gfp_mask);
                        goto out;
                }
-               congestion_wait(WRITE, HZ/10);
        }
 
        pc->ref_cnt = 1;
@@ -599,7 +599,7 @@ retry:
        pc->page = page;
        pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
        if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
-               pc->flags |= PAGE_CGROUP_FLAG_CACHE;
+               pc->flags = PAGE_CGROUP_FLAG_CACHE;
 
        lock_page_cgroup(page);
        if (page_get_page_cgroup(page)) {
@@ -618,7 +618,7 @@ retry:
 
        mz = page_cgroup_zoneinfo(pc);
        spin_lock_irqsave(&mz->lru_lock, flags);
-       __mem_cgroup_add_list(pc);
+       __mem_cgroup_add_list(mz, pc);
        spin_unlock_irqrestore(&mz->lru_lock, flags);
 
        unlock_page_cgroup(page);
@@ -674,7 +674,7 @@ void mem_cgroup_uncharge_page(struct page *page)
        if (--(pc->ref_cnt) == 0) {
                mz = page_cgroup_zoneinfo(pc);
                spin_lock_irqsave(&mz->lru_lock, flags);
-               __mem_cgroup_remove_list(pc);
+               __mem_cgroup_remove_list(mz, pc);
                spin_unlock_irqrestore(&mz->lru_lock, flags);
 
                page_assign_page_cgroup(page, NULL);
@@ -736,7 +736,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage)
 
        mz = page_cgroup_zoneinfo(pc);
        spin_lock_irqsave(&mz->lru_lock, flags);
-       __mem_cgroup_remove_list(pc);
+       __mem_cgroup_remove_list(mz, pc);
        spin_unlock_irqrestore(&mz->lru_lock, flags);
 
        page_assign_page_cgroup(page, NULL);
@@ -748,7 +748,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage)
 
        mz = page_cgroup_zoneinfo(pc);
        spin_lock_irqsave(&mz->lru_lock, flags);
-       __mem_cgroup_add_list(pc);
+       __mem_cgroup_add_list(mz, pc);
        spin_unlock_irqrestore(&mz->lru_lock, flags);
 
        unlock_page_cgroup(newpage);
@@ -857,12 +857,19 @@ static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
                                mem_cgroup_write_strategy);
 }
 
-static int mem_cgroup_max_reset(struct cgroup *cont, unsigned int event)
+static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
 {
        struct mem_cgroup *mem;
 
        mem = mem_cgroup_from_cont(cont);
-       res_counter_reset_max(&mem->res);
+       switch (event) {
+       case RES_MAX_USAGE:
+               res_counter_reset_max(&mem->res);
+               break;
+       case RES_FAILCNT:
+               res_counter_reset_failcnt(&mem->res);
+               break;
+       }
        return 0;
 }
 
@@ -916,7 +923,7 @@ static struct cftype mem_cgroup_files[] = {
        {
                .name = "max_usage_in_bytes",
                .private = RES_MAX_USAGE,
-               .trigger = mem_cgroup_max_reset,
+               .trigger = mem_cgroup_reset,
                .read_u64 = mem_cgroup_read,
        },
        {
@@ -928,6 +935,7 @@ static struct cftype mem_cgroup_files[] = {
        {
                .name = "failcnt",
                .private = RES_FAILCNT,
+               .trigger = mem_cgroup_reset,
                .read_u64 = mem_cgroup_read,
        },
        {
@@ -976,6 +984,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
        kfree(mem->info.nodeinfo[node]);
 }
 
+static struct mem_cgroup *mem_cgroup_alloc(void)
+{
+       struct mem_cgroup *mem;
+
+       if (sizeof(*mem) < PAGE_SIZE)
+               mem = kmalloc(sizeof(*mem), GFP_KERNEL);
+       else
+               mem = vmalloc(sizeof(*mem));
+
+       if (mem)
+               memset(mem, 0, sizeof(*mem));
+       return mem;
+}
+
+static void mem_cgroup_free(struct mem_cgroup *mem)
+{
+       if (sizeof(*mem) < PAGE_SIZE)
+               kfree(mem);
+       else
+               vfree(mem);
+}
+
+
 static struct cgroup_subsys_state *
 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 {
@@ -986,16 +1017,13 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
                mem = &init_mem_cgroup;
                page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
        } else {
-               mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
+               mem = mem_cgroup_alloc();
+               if (!mem)
+                       return ERR_PTR(-ENOMEM);
        }
 
-       if (mem == NULL)
-               return ERR_PTR(-ENOMEM);
-
        res_counter_init(&mem->res);
 
-       memset(&mem->info, 0, sizeof(mem->info));
-
        for_each_node_state(node, N_POSSIBLE)
                if (alloc_mem_cgroup_per_zone_info(mem, node))
                        goto free_out;
@@ -1005,7 +1033,7 @@ free_out:
        for_each_node_state(node, N_POSSIBLE)
                free_mem_cgroup_per_zone_info(mem, node);
        if (cont->parent != NULL)
-               kfree(mem);
+               mem_cgroup_free(mem);
        return ERR_PTR(-ENOMEM);
 }
 
@@ -1025,7 +1053,7 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
        for_each_node_state(node, N_POSSIBLE)
                free_mem_cgroup_per_zone_info(mem, node);
 
-       kfree(mem_cgroup_from_cont(cont));
+       mem_cgroup_free(mem_cgroup_from_cont(cont));
 }
 
 static int mem_cgroup_populate(struct cgroup_subsys *ss,