2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/proc_fs.h>
25 #include <linux/pci.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/module.h>
28 #include <linux/memory_hotplug.h>
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
36 #include <asm/fixmap.h>
40 #include <asm/mmu_context.h>
41 #include <asm/proto.h>
43 #include <asm/sections.h>
49 struct dma_mapping_ops* dma_ops;
50 EXPORT_SYMBOL(dma_ops);
52 static unsigned long dma_reserve __initdata;
54 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
64 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
69 printk(KERN_INFO "Mem-info:\n");
71 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
73 for_each_online_pgdat(pgdat) {
74 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
77 if (PageReserved(page))
79 else if (PageSwapCache(page))
81 else if (page_count(page))
82 shared += page_count(page) - 1;
85 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
91 /* References to section boundaries */
95 static __init void *spp_getpage(void)
99 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
101 ptr = alloc_bootmem_pages(PAGE_SIZE);
102 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
103 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
105 Dprintk("spp_getpage %p\n", ptr);
109 static __init void set_pte_phys(unsigned long vaddr,
110 unsigned long phys, pgprot_t prot)
117 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
119 pgd = pgd_offset_k(vaddr);
120 if (pgd_none(*pgd)) {
121 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
124 pud = pud_offset(pgd, vaddr);
125 if (pud_none(*pud)) {
126 pmd = (pmd_t *) spp_getpage();
127 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
128 if (pmd != pmd_offset(pud, 0)) {
129 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
133 pmd = pmd_offset(pud, vaddr);
134 if (pmd_none(*pmd)) {
135 pte = (pte_t *) spp_getpage();
136 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
137 if (pte != pte_offset_kernel(pmd, 0)) {
138 printk("PAGETABLE BUG #02!\n");
142 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
144 pte = pte_offset_kernel(pmd, vaddr);
145 if (!pte_none(*pte) &&
146 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
148 set_pte(pte, new_pte);
151 * It's enough to flush this one mapping.
152 * (PGE mappings get flushed as well)
154 __flush_tlb_one(vaddr);
157 /* NOTE: this is meant to be run only at boot */
159 __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
161 unsigned long address = __fix_to_virt(idx);
163 if (idx >= __end_of_fixed_addresses) {
164 printk("Invalid __set_fixmap\n");
167 set_pte_phys(address, phys, prot);
170 unsigned long __initdata table_start, table_end;
172 extern pmd_t temp_boot_pmds[];
174 static struct temp_map {
178 } temp_mappings[] __initdata = {
179 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
180 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
184 static __meminit void *alloc_low_page(int *index, unsigned long *phys)
188 unsigned long pfn = table_end++, paddr;
192 adr = (void *)get_zeroed_page(GFP_ATOMIC);
198 panic("alloc_low_page: ran out of memory");
199 for (i = 0; temp_mappings[i].allocated; i++) {
200 if (!temp_mappings[i].pmd)
201 panic("alloc_low_page: ran out of temp mappings");
203 ti = &temp_mappings[i];
204 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
205 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
208 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
209 memset(adr, 0, PAGE_SIZE);
211 *phys = pfn * PAGE_SIZE;
215 static __meminit void unmap_low_page(int i)
222 ti = &temp_mappings[i];
223 set_pmd(ti->pmd, __pmd(0));
227 /* Must run before zap_low_mappings */
228 __init void *early_ioremap(unsigned long addr, unsigned long size)
230 unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
232 /* actually usually some more */
233 if (size >= LARGE_PAGE_SIZE) {
234 printk("SMBIOS area too long %lu\n", size);
237 set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
238 map += LARGE_PAGE_SIZE;
239 set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
241 return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
244 /* To avoid virtual aliases later */
245 __init void early_iounmap(void *addr, unsigned long size)
247 if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
248 printk("early_iounmap: bad address %p\n", addr);
249 set_pmd(temp_mappings[0].pmd, __pmd(0));
250 set_pmd(temp_mappings[1].pmd, __pmd(0));
254 static void __meminit
255 phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
259 for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
263 for (; i < PTRS_PER_PMD; i++, pmd++)
264 set_pmd(pmd, __pmd(0));
267 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
268 entry &= __supported_pte_mask;
269 set_pmd(pmd, __pmd(entry));
273 static void __meminit
274 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
276 pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
278 if (pmd_none(*pmd)) {
279 spin_lock(&init_mm.page_table_lock);
280 phys_pmd_init(pmd, address, end);
281 spin_unlock(&init_mm.page_table_lock);
286 static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
288 long i = pud_index(address);
292 if (after_bootmem && pud_val(*pud)) {
293 phys_pmd_update(pud, address, end);
297 for (; i < PTRS_PER_PUD; pud++, i++) {
299 unsigned long paddr, pmd_phys;
302 paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
306 if (!after_bootmem && !e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) {
307 set_pud(pud, __pud(0));
311 pmd = alloc_low_page(&map, &pmd_phys);
312 spin_lock(&init_mm.page_table_lock);
313 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
314 phys_pmd_init(pmd, paddr, end);
315 spin_unlock(&init_mm.page_table_lock);
321 static void __init find_early_table_space(unsigned long end)
323 unsigned long puds, pmds, tables, start;
325 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
326 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
327 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
328 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
330 /* RED-PEN putting page tables only on node 0 could
331 cause a hotspot and fill up ZONE_DMA. The page tables
332 need roughly 0.5KB per GB. */
334 table_start = find_e820_area(start, end, tables);
335 if (table_start == -1UL)
336 panic("Cannot find space for the kernel page tables");
338 table_start >>= PAGE_SHIFT;
339 table_end = table_start;
341 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
342 end, table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
345 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
346 This runs before bootmem is initialized and gets pages directly from the
347 physical memory. To access them they are temporarily mapped. */
348 void __meminit init_memory_mapping(unsigned long start, unsigned long end)
352 Dprintk("init_memory_mapping\n");
355 * Find space for the kernel direct mapping tables.
356 * Later we should allocate these tables in the local node of the memory
357 * mapped. Unfortunately this is done currently before the nodes are
361 find_early_table_space(end);
363 start = (unsigned long)__va(start);
364 end = (unsigned long)__va(end);
366 for (; start < end; start = next) {
368 unsigned long pud_phys;
369 pgd_t *pgd = pgd_offset_k(start);
373 pud = pud_offset(pgd, start & PGDIR_MASK);
375 pud = alloc_low_page(&map, &pud_phys);
377 next = start + PGDIR_SIZE;
380 phys_pud_init(pud, __pa(start), __pa(next));
382 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
387 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
391 void __cpuinit zap_low_mappings(int cpu)
394 pgd_t *pgd = pgd_offset_k(0UL);
398 * For AP's, zap the low identity mappings by changing the cr3
399 * to init_level4_pgt and doing local flush tlb all
401 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
406 /* Compute zone sizes for the DMA and DMA32 zones in a node. */
408 size_zones(unsigned long *z, unsigned long *h,
409 unsigned long start_pfn, unsigned long end_pfn)
414 for (i = 0; i < MAX_NR_ZONES; i++)
417 if (start_pfn < MAX_DMA_PFN)
418 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
419 if (start_pfn < MAX_DMA32_PFN) {
420 unsigned long dma32_pfn = MAX_DMA32_PFN;
421 if (dma32_pfn > end_pfn)
423 z[ZONE_DMA32] = dma32_pfn - start_pfn;
425 z[ZONE_NORMAL] = end_pfn - start_pfn;
427 /* Remove lower zones from higher ones. */
429 for (i = 0; i < MAX_NR_ZONES; i++) {
437 for (i = 0; i < MAX_NR_ZONES; i++) {
440 h[i] = e820_hole_size(s, w);
443 /* Add the space pace needed for mem_map to the holes too. */
444 for (i = 0; i < MAX_NR_ZONES; i++)
445 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
447 /* The 16MB DMA zone has the kernel and other misc mappings.
450 h[ZONE_DMA] += dma_reserve;
451 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
453 "Kernel too large and filling up ZONE_DMA?\n");
454 h[ZONE_DMA] = z[ZONE_DMA];
460 void __init paging_init(void)
462 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
464 memory_present(0, 0, end_pfn);
466 size_zones(zones, holes, 0, end_pfn);
467 free_area_init_node(0, NODE_DATA(0), zones,
468 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
472 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
473 from the CPU leading to inconsistent cache lines. address and size
474 must be aligned to 2MB boundaries.
475 Does nothing when the mapping doesn't exist. */
476 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
478 unsigned long end = address + size;
480 BUG_ON(address & ~LARGE_PAGE_MASK);
481 BUG_ON(size & ~LARGE_PAGE_MASK);
483 for (; address < end; address += LARGE_PAGE_SIZE) {
484 pgd_t *pgd = pgd_offset_k(address);
489 pud = pud_offset(pgd, address);
492 pmd = pmd_offset(pud, address);
493 if (!pmd || pmd_none(*pmd))
495 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
496 /* Could handle this, but it should not happen currently. */
498 "clear_kernel_mapping: mapping has been split. will leak memory\n");
501 set_pmd(pmd, __pmd(0));
507 * Memory hotplug specific functions
509 #if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
511 void online_page(struct page *page)
513 ClearPageReserved(page);
514 init_page_count(page);
520 #ifndef CONFIG_MEMORY_HOTPLUG
522 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
523 * just online the pages.
525 int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
529 unsigned long total = 0, mem = 0;
530 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
531 if (pfn_valid(pfn)) {
532 online_page(pfn_to_page(pfn));
539 z->spanned_pages += total;
540 z->present_pages += mem;
541 z->zone_pgdat->node_spanned_pages += total;
542 z->zone_pgdat->node_present_pages += mem;
549 * Memory is added always to NORMAL zone. This means you will never get
550 * additional DMA/DMA32 memory.
552 int add_memory(u64 start, u64 size)
554 struct pglist_data *pgdat = NODE_DATA(0);
555 struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
556 unsigned long start_pfn = start >> PAGE_SHIFT;
557 unsigned long nr_pages = size >> PAGE_SHIFT;
560 ret = __add_pages(zone, start_pfn, nr_pages);
564 init_memory_mapping(start, (start + size -1));
568 printk("%s: Problem encountered in __add_pages!\n", __func__);
571 EXPORT_SYMBOL_GPL(add_memory);
573 int remove_memory(u64 start, u64 size)
577 EXPORT_SYMBOL_GPL(remove_memory);
581 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
584 void __init mem_init(void)
586 long codesize, reservedpages, datasize, initsize;
590 /* How many end-of-memory variables you have, grandma! */
591 max_low_pfn = end_pfn;
593 num_physpages = end_pfn;
594 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
596 /* clear the zero-page */
597 memset(empty_zero_page, 0, PAGE_SIZE);
601 /* this will put all low memory onto the freelists */
603 totalram_pages = numa_free_all_bootmem();
605 totalram_pages = free_all_bootmem();
607 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
611 codesize = (unsigned long) &_etext - (unsigned long) &_text;
612 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
613 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
615 /* Register memory areas for /proc/kcore */
616 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
617 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
618 VMALLOC_END-VMALLOC_START);
619 kclist_add(&kcore_kernel, &_stext, _end - _stext);
620 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
621 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
622 VSYSCALL_END - VSYSCALL_START);
624 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
625 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
626 end_pfn << (PAGE_SHIFT-10),
628 reservedpages << (PAGE_SHIFT-10),
634 * Sync boot_level4_pgt mappings with the init_level4_pgt
635 * except for the low identity mappings which are already zapped
636 * in init_level4_pgt. This sync-up is essential for AP's bringup
638 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
642 void free_init_pages(char *what, unsigned long begin, unsigned long end)
649 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
650 for (addr = begin; addr < end; addr += PAGE_SIZE) {
651 ClearPageReserved(virt_to_page(addr));
652 init_page_count(virt_to_page(addr));
653 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
659 void free_initmem(void)
661 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
662 free_init_pages("unused kernel memory",
663 (unsigned long)(&__init_begin),
664 (unsigned long)(&__init_end));
667 #ifdef CONFIG_DEBUG_RODATA
669 extern char __start_rodata, __end_rodata;
670 void mark_rodata_ro(void)
672 unsigned long addr = (unsigned long)&__start_rodata;
674 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
675 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
677 printk ("Write protecting the kernel read-only data: %luk\n",
678 (&__end_rodata - &__start_rodata) >> 10);
681 * change_page_attr_addr() requires a global_flush_tlb() call after it.
682 * We do this after the printk so that if something went wrong in the
683 * change, the printk gets out at least to give a better debug hint
684 * of who is the culprit.
690 #ifdef CONFIG_BLK_DEV_INITRD
691 void free_initrd_mem(unsigned long start, unsigned long end)
693 free_init_pages("initrd memory", start, end);
697 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
699 /* Should check here against the e820 map to avoid double free */
701 int nid = phys_to_nid(phys);
702 reserve_bootmem_node(NODE_DATA(nid), phys, len);
704 reserve_bootmem(phys, len);
706 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
707 dma_reserve += len / PAGE_SIZE;
710 int kern_addr_valid(unsigned long addr)
712 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
718 if (above != 0 && above != -1UL)
721 pgd = pgd_offset_k(addr);
725 pud = pud_offset(pgd, addr);
729 pmd = pmd_offset(pud, addr);
733 return pfn_valid(pmd_pfn(*pmd));
735 pte = pte_offset_kernel(pmd, addr);
738 return pfn_valid(pte_pfn(*pte));
742 #include <linux/sysctl.h>
744 extern int exception_trace, page_fault_trace;
746 static ctl_table debug_table2[] = {
747 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
752 static ctl_table debug_root_table2[] = {
753 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
754 .child = debug_table2 },
758 static __init int x8664_sysctl_init(void)
760 register_sysctl_table(debug_root_table2, 1);
763 __initcall(x8664_sysctl_init);
766 /* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
767 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
768 not need special handling anymore. */
770 static struct vm_area_struct gate_vma = {
771 .vm_start = VSYSCALL_START,
772 .vm_end = VSYSCALL_END,
773 .vm_page_prot = PAGE_READONLY
776 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
778 #ifdef CONFIG_IA32_EMULATION
779 if (test_tsk_thread_flag(tsk, TIF_IA32))
785 int in_gate_area(struct task_struct *task, unsigned long addr)
787 struct vm_area_struct *vma = get_gate_vma(task);
790 return (addr >= vma->vm_start) && (addr < vma->vm_end);
793 /* Use this when you have no reliable task/vma, typically from interrupt
794 * context. It is less reliable than using the task's vma and may give
797 int in_gate_area_no_task(unsigned long addr)
799 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);