2 * bootmem - A boot-time physical memory allocator and configurator
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/bootmem.h>
14 #include <linux/module.h>
18 #include <asm/processor.h>
22 unsigned long max_low_pfn;
23 unsigned long min_low_pfn;
24 unsigned long max_pfn;
26 static LIST_HEAD(bdata_list);
27 #ifdef CONFIG_CRASH_DUMP
29 * If we have booted due to a crash, max_pfn will be a very low value. We need
30 * to know the amount of memory that the previous kernel used.
32 unsigned long saved_max_pfn;
35 bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
38 * Given an initialised bdata, it returns the size of the boot bitmap
40 static unsigned long __init get_mapsize(bootmem_data_t *bdata)
42 unsigned long mapsize;
43 unsigned long start = PFN_DOWN(bdata->node_boot_start);
44 unsigned long end = bdata->node_low_pfn;
46 mapsize = ((end - start) + 7) / 8;
47 return ALIGN(mapsize, sizeof(long));
50 /* return the number of _pages_ that will be allocated for the boot bitmap */
51 unsigned long __init bootmem_bootmap_pages(unsigned long pages)
53 unsigned long mapsize;
55 mapsize = (pages+7)/8;
56 mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
57 mapsize >>= PAGE_SHIFT;
65 static void __init link_bootmem(bootmem_data_t *bdata)
69 if (list_empty(&bdata_list)) {
70 list_add(&bdata->list, &bdata_list);
74 list_for_each_entry(ent, &bdata_list, list) {
75 if (bdata->node_boot_start < ent->node_boot_start) {
76 list_add_tail(&bdata->list, &ent->list);
80 list_add_tail(&bdata->list, &bdata_list);
84 * Called once to set up the allocator itself.
86 static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
87 unsigned long mapstart, unsigned long start, unsigned long end)
89 unsigned long mapsize;
91 mminit_validate_memmodel_limits(&start, &end);
92 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
93 bdata->node_boot_start = PFN_PHYS(start);
94 bdata->node_low_pfn = end;
98 * Initially all pages are reserved - setup_arch() has to
99 * register free RAM areas explicitly.
101 mapsize = get_mapsize(bdata);
102 memset(bdata->node_bootmem_map, 0xff, mapsize);
107 unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
108 unsigned long startpfn, unsigned long endpfn)
110 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
113 unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
117 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
120 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
124 unsigned long i, count;
129 BUG_ON(!bdata->node_bootmem_map);
132 /* first extant page of the node */
133 pfn = PFN_DOWN(bdata->node_boot_start);
134 idx = bdata->node_low_pfn - pfn;
135 map = bdata->node_bootmem_map;
137 * Check if we are aligned to BITS_PER_LONG pages. If so, we might
138 * be able to free page orders of that size at once.
140 if (!(pfn & (BITS_PER_LONG-1)))
143 for (i = 0; i < idx; ) {
144 unsigned long v = ~map[i / BITS_PER_LONG];
146 if (gofast && v == ~0UL) {
149 page = pfn_to_page(pfn);
150 count += BITS_PER_LONG;
151 order = ffs(BITS_PER_LONG) - 1;
152 __free_pages_bootmem(page, order);
154 page += BITS_PER_LONG;
158 page = pfn_to_page(pfn);
159 for (m = 1; m && i < idx; m<<=1, page++, i++) {
162 __free_pages_bootmem(page, 0);
168 pfn += BITS_PER_LONG;
172 * Now free the allocator bitmap itself, it's not
175 page = virt_to_page(bdata->node_bootmem_map);
176 idx = (get_mapsize(bdata) + PAGE_SIZE-1) >> PAGE_SHIFT;
177 for (i = 0; i < idx; i++, page++)
178 __free_pages_bootmem(page, 0);
180 bdata->node_bootmem_map = NULL;
185 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
187 register_page_bootmem_info_node(pgdat);
188 return free_all_bootmem_core(pgdat->bdata);
191 unsigned long __init free_all_bootmem(void)
193 return free_all_bootmem_core(NODE_DATA(0)->bdata);
196 static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
199 unsigned long sidx, eidx;
205 if (addr + size < bdata->node_boot_start ||
206 PFN_DOWN(addr) > bdata->node_low_pfn)
209 * round down end of usable mem, partially free pages are
210 * considered reserved.
213 if (addr >= bdata->node_boot_start && addr < bdata->last_success)
214 bdata->last_success = addr;
217 * Round up to index to the range.
219 if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
220 sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
224 eidx = PFN_DOWN(addr + size - bdata->node_boot_start);
225 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
226 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
228 for (i = sidx; i < eidx; i++) {
229 if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
234 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
237 free_bootmem_core(pgdat->bdata, physaddr, size);
240 void __init free_bootmem(unsigned long addr, unsigned long size)
242 bootmem_data_t *bdata;
243 list_for_each_entry(bdata, &bdata_list, list)
244 free_bootmem_core(bdata, addr, size);
248 * Marks a particular physical memory range as unallocatable. Usable RAM
249 * might be used for boot-time allocations - or it might get added
250 * to the free page pool later on.
252 static int __init can_reserve_bootmem_core(bootmem_data_t *bdata,
253 unsigned long addr, unsigned long size, int flags)
255 unsigned long sidx, eidx;
260 /* out of range, don't hold other */
261 if (addr + size < bdata->node_boot_start ||
262 PFN_DOWN(addr) > bdata->node_low_pfn)
266 * Round up to index to the range.
268 if (addr > bdata->node_boot_start)
269 sidx= PFN_DOWN(addr - bdata->node_boot_start);
273 eidx = PFN_UP(addr + size - bdata->node_boot_start);
274 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
275 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
277 for (i = sidx; i < eidx; i++) {
278 if (test_bit(i, bdata->node_bootmem_map)) {
279 if (flags & BOOTMEM_EXCLUSIVE)
288 static void __init reserve_bootmem_core(bootmem_data_t *bdata,
289 unsigned long addr, unsigned long size, int flags)
291 unsigned long sidx, eidx;
297 if (addr + size < bdata->node_boot_start ||
298 PFN_DOWN(addr) > bdata->node_low_pfn)
302 * Round up to index to the range.
304 if (addr > bdata->node_boot_start)
305 sidx= PFN_DOWN(addr - bdata->node_boot_start);
309 eidx = PFN_UP(addr + size - bdata->node_boot_start);
310 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
311 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
313 for (i = sidx; i < eidx; i++) {
314 if (test_and_set_bit(i, bdata->node_bootmem_map)) {
315 #ifdef CONFIG_DEBUG_BOOTMEM
316 printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
322 int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
323 unsigned long size, int flags)
327 ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
330 reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
334 #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
335 int __init reserve_bootmem(unsigned long addr, unsigned long size,
338 bootmem_data_t *bdata;
341 list_for_each_entry(bdata, &bdata_list, list) {
342 ret = can_reserve_bootmem_core(bdata, addr, size, flags);
346 list_for_each_entry(bdata, &bdata_list, list)
347 reserve_bootmem_core(bdata, addr, size, flags);
351 #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
354 * We 'merge' subsequent allocations to save space. We might 'lose'
355 * some fraction of a page if allocations cannot be satisfied due to
356 * size constraints on boxes where there is physical RAM space
357 * fragmentation - in these cases (mostly large memory boxes) this
360 * On low memory boxes we get it right in 100% of the cases.
362 * alignment has to be a power of 2 value.
364 * NOTE: This function is _not_ reentrant.
367 alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
368 unsigned long align, unsigned long goal, unsigned long limit)
370 unsigned long areasize, preferred;
371 unsigned long i, start = 0, incr, eidx, end_pfn;
373 unsigned long node_boot_start;
374 void *node_bootmem_map;
377 printk("alloc_bootmem_core(): zero-sized request\n");
380 BUG_ON(align & (align-1));
382 /* on nodes without memory - bootmem_map is NULL */
383 if (!bdata->node_bootmem_map)
386 /* bdata->node_boot_start is supposed to be (12+6)bits alignment on x86_64 ? */
387 node_boot_start = bdata->node_boot_start;
388 node_bootmem_map = bdata->node_bootmem_map;
390 node_boot_start = ALIGN(bdata->node_boot_start, align);
391 if (node_boot_start > bdata->node_boot_start)
392 node_bootmem_map = (unsigned long *)bdata->node_bootmem_map +
393 PFN_DOWN(node_boot_start - bdata->node_boot_start)/BITS_PER_LONG;
396 if (limit && node_boot_start >= limit)
399 end_pfn = bdata->node_low_pfn;
400 limit = PFN_DOWN(limit);
401 if (limit && end_pfn > limit)
404 eidx = end_pfn - PFN_DOWN(node_boot_start);
407 * We try to allocate bootmem pages above 'goal'
408 * first, then we try to allocate lower pages.
411 if (goal && PFN_DOWN(goal) < end_pfn) {
412 if (goal > node_boot_start)
413 preferred = goal - node_boot_start;
415 if (bdata->last_success > node_boot_start &&
416 bdata->last_success - node_boot_start >= preferred)
417 if (!limit || (limit && limit > bdata->last_success))
418 preferred = bdata->last_success - node_boot_start;
421 preferred = PFN_DOWN(ALIGN(preferred, align));
422 areasize = (size + PAGE_SIZE-1) / PAGE_SIZE;
423 incr = align >> PAGE_SHIFT ? : 1;
426 for (i = preferred; i < eidx;) {
429 i = find_next_zero_bit(node_bootmem_map, eidx, i);
433 if (test_bit(i, node_bootmem_map)) {
437 for (j = i + 1; j < i + areasize; ++j) {
440 if (test_bit(j, node_bootmem_map))
458 bdata->last_success = PFN_PHYS(start) + node_boot_start;
459 BUG_ON(start >= eidx);
462 * Is the next page of the previous allocation-end the start
463 * of this allocation's buffer? If yes then we can 'merge'
464 * the previous partial page with this allocation.
466 if (align < PAGE_SIZE &&
467 bdata->last_offset && bdata->last_pos+1 == start) {
468 unsigned long offset, remaining_size;
469 offset = ALIGN(bdata->last_offset, align);
470 BUG_ON(offset > PAGE_SIZE);
471 remaining_size = PAGE_SIZE - offset;
472 if (size < remaining_size) {
474 /* last_pos unchanged */
475 bdata->last_offset = offset + size;
476 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
477 offset + node_boot_start);
479 remaining_size = size - remaining_size;
480 areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE;
481 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
482 offset + node_boot_start);
483 bdata->last_pos = start + areasize - 1;
484 bdata->last_offset = remaining_size;
486 bdata->last_offset &= ~PAGE_MASK;
488 bdata->last_pos = start + areasize - 1;
489 bdata->last_offset = size & ~PAGE_MASK;
490 ret = phys_to_virt(start * PAGE_SIZE + node_boot_start);
494 * Reserve the area now:
496 for (i = start; i < start + areasize; i++)
497 if (unlikely(test_and_set_bit(i, node_bootmem_map)))
499 memset(ret, 0, size);
503 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
506 bootmem_data_t *bdata;
509 list_for_each_entry(bdata, &bdata_list, list) {
510 ptr = alloc_bootmem_core(bdata, size, align, goal, 0);
517 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
520 void *mem = __alloc_bootmem_nopanic(size,align,goal);
525 * Whoops, we cannot satisfy the allocation request.
527 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
528 panic("Out of memory");
532 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
533 unsigned long align, unsigned long goal)
537 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
541 return __alloc_bootmem(size, align, goal);
544 #ifdef CONFIG_SPARSEMEM
545 void * __init alloc_bootmem_section(unsigned long size,
546 unsigned long section_nr)
549 unsigned long limit, goal, start_nr, end_nr, pfn;
550 struct pglist_data *pgdat;
552 pfn = section_nr_to_pfn(section_nr);
553 goal = PFN_PHYS(pfn);
554 limit = PFN_PHYS(section_nr_to_pfn(section_nr + 1)) - 1;
555 pgdat = NODE_DATA(early_pfn_to_nid(pfn));
556 ptr = alloc_bootmem_core(pgdat->bdata, size, SMP_CACHE_BYTES, goal,
562 start_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr)));
563 end_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr) + size));
564 if (start_nr != section_nr || end_nr != section_nr) {
565 printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n",
567 free_bootmem_core(pgdat->bdata, __pa(ptr), size);
575 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
576 unsigned long align, unsigned long goal)
580 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
584 return __alloc_bootmem_nopanic(size, align, goal);
587 #ifndef ARCH_LOW_ADDRESS_LIMIT
588 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
591 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
594 bootmem_data_t *bdata;
597 list_for_each_entry(bdata, &bdata_list, list) {
598 ptr = alloc_bootmem_core(bdata, size, align, goal,
599 ARCH_LOW_ADDRESS_LIMIT);
605 * Whoops, we cannot satisfy the allocation request.
607 printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
608 panic("Out of low memory");
612 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
613 unsigned long align, unsigned long goal)
615 return alloc_bootmem_core(pgdat->bdata, size, align, goal,
616 ARCH_LOW_ADDRESS_LIMIT);