2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/bit_spinlock.h>
5 #include <linux/page_cgroup.h>
6 #include <linux/hash.h>
7 #include <linux/slab.h>
8 #include <linux/memory.h>
9 #include <linux/vmalloc.h>
10 #include <linux/cgroup.h>
13 __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
16 pc->mem_cgroup = NULL;
17 pc->page = pfn_to_page(pfn);
19 static unsigned long total_usage;
21 #if !defined(CONFIG_SPARSEMEM)
24 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
26 pgdat->node_page_cgroup = NULL;
29 struct page_cgroup *lookup_page_cgroup(struct page *page)
31 unsigned long pfn = page_to_pfn(page);
33 struct page_cgroup *base;
35 base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
39 offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
43 static int __init alloc_node_page_cgroup(int nid)
45 struct page_cgroup *base, *pc;
46 unsigned long table_size;
47 unsigned long start_pfn, nr_pages, index;
49 start_pfn = NODE_DATA(nid)->node_start_pfn;
50 nr_pages = NODE_DATA(nid)->node_spanned_pages;
55 table_size = sizeof(struct page_cgroup) * nr_pages;
57 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
58 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
61 for (index = 0; index < nr_pages; index++) {
63 __init_page_cgroup(pc, start_pfn + index);
65 NODE_DATA(nid)->node_page_cgroup = base;
66 total_usage += table_size;
70 void __init page_cgroup_init(void)
75 if (mem_cgroup_subsys.disabled)
78 for_each_online_node(nid) {
79 fail = alloc_node_page_cgroup(nid);
83 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
84 printk(KERN_INFO "please try cgroup_disable=memory option if you"
88 printk(KERN_CRIT "allocation of page_cgroup was failed.\n");
89 printk(KERN_CRIT "please try cgroup_disable=memory boot option\n");
90 panic("Out of memory");
93 #else /* CONFIG_FLAT_NODE_MEM_MAP */
95 struct page_cgroup *lookup_page_cgroup(struct page *page)
97 unsigned long pfn = page_to_pfn(page);
98 struct mem_section *section = __pfn_to_section(pfn);
100 return section->page_cgroup + pfn;
103 /* __alloc_bootmem...() is protected by !slab_available() */
104 int __init_refok init_section_page_cgroup(unsigned long pfn)
106 struct mem_section *section;
107 struct page_cgroup *base, *pc;
108 unsigned long table_size;
111 section = __pfn_to_section(pfn);
113 if (!section->page_cgroup) {
114 nid = page_to_nid(pfn_to_page(pfn));
115 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
116 if (slab_is_available()) {
117 base = kmalloc_node(table_size, GFP_KERNEL, nid);
119 base = vmalloc_node(table_size, nid);
121 base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
123 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
127 * We don't have to allocate page_cgroup again, but
128 * address of memmap may be changed. So, we have to initialize
131 base = section->page_cgroup + pfn;
133 /* check address of memmap is changed or not. */
134 if (base->page == pfn_to_page(pfn))
139 printk(KERN_ERR "page cgroup allocation failure\n");
143 for (index = 0; index < PAGES_PER_SECTION; index++) {
145 __init_page_cgroup(pc, pfn + index);
148 section = __pfn_to_section(pfn);
149 section->page_cgroup = base - pfn;
150 total_usage += table_size;
153 #ifdef CONFIG_MEMORY_HOTPLUG
154 void __free_page_cgroup(unsigned long pfn)
156 struct mem_section *ms;
157 struct page_cgroup *base;
159 ms = __pfn_to_section(pfn);
160 if (!ms || !ms->page_cgroup)
162 base = ms->page_cgroup + pfn;
163 if (is_vmalloc_addr(base)) {
165 ms->page_cgroup = NULL;
167 struct page *page = virt_to_page(base);
168 if (!PageReserved(page)) { /* Is bootmem ? */
170 ms->page_cgroup = NULL;
175 int __meminit online_page_cgroup(unsigned long start_pfn,
176 unsigned long nr_pages,
179 unsigned long start, end, pfn;
182 start = start_pfn & ~(PAGES_PER_SECTION - 1);
183 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
185 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
186 if (!pfn_present(pfn))
188 fail = init_section_page_cgroup(pfn);
194 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
195 __free_page_cgroup(pfn);
200 int __meminit offline_page_cgroup(unsigned long start_pfn,
201 unsigned long nr_pages, int nid)
203 unsigned long start, end, pfn;
205 start = start_pfn & ~(PAGES_PER_SECTION - 1);
206 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
208 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
209 __free_page_cgroup(pfn);
214 static int __meminit page_cgroup_callback(struct notifier_block *self,
215 unsigned long action, void *arg)
217 struct memory_notify *mn = arg;
220 case MEM_GOING_ONLINE:
221 ret = online_page_cgroup(mn->start_pfn,
222 mn->nr_pages, mn->status_change_nid);
225 offline_page_cgroup(mn->start_pfn,
226 mn->nr_pages, mn->status_change_nid);
228 case MEM_CANCEL_ONLINE:
229 case MEM_GOING_OFFLINE:
232 case MEM_CANCEL_OFFLINE:
237 ret = notifier_from_errno(ret);
246 void __init page_cgroup_init(void)
251 if (mem_cgroup_subsys.disabled)
254 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
255 if (!pfn_present(pfn))
257 fail = init_section_page_cgroup(pfn);
260 printk(KERN_CRIT "try cgroup_disable=memory boot option\n");
261 panic("Out of memory");
263 hotplug_memory_notifier(page_cgroup_callback, 0);
265 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
266 printk(KERN_INFO "please try cgroup_disable=memory option if you don't"
270 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)