2 * linux/arch/i386/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
9 #include <linux/module.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <linux/smp.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/pagemap.h>
25 #include <linux/pfn.h>
26 #include <linux/poison.h>
27 #include <linux/bootmem.h>
28 #include <linux/slab.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/initrd.h>
32 #include <linux/cpumask.h>
35 #include <asm/processor.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/pgtable.h>
40 #include <asm/fixmap.h>
45 #include <asm/tlbflush.h>
46 #include <asm/pgalloc.h>
47 #include <asm/sections.h>
48 #include <asm/paravirt.h>
50 unsigned int __VMALLOC_RESERVE = 128 << 20;
52 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
53 unsigned long highstart_pfn, highend_pfn;
55 static noinline int do_test_wp_bit(void);
58 * Creates a middle page table and puts a pointer to it in the
59 * given global directory entry. This only returns the gd entry
60 * in non-PAE compilation mode, since the middle layer is folded.
62 static pmd_t * __init one_md_table_init(pgd_t *pgd)
68 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
69 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
71 paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
72 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
73 pud = pud_offset(pgd, 0);
74 BUG_ON(pmd_table != pmd_offset(pud, 0));
77 pud = pud_offset(pgd, 0);
78 pmd_table = pmd_offset(pud, 0);
84 * Create a page table and place a pointer to it in a middle page
87 static pte_t * __init one_page_table_init(pmd_t *pmd)
89 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
90 pte_t *page_table = NULL;
92 #ifdef CONFIG_DEBUG_PAGEALLOC
93 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
97 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
100 paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
101 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
102 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
105 return pte_offset_kernel(pmd, 0);
109 * This function initializes a certain range of kernel virtual memory
110 * with new bootmem page tables, everywhere page tables are missing in
113 * NOTE: The pagetables are allocated contiguous on the physical space
114 * so we can cache the place of the first one and move around without
115 * checking the pgd every time.
118 page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
120 int pgd_idx, pmd_idx;
126 pgd_idx = pgd_index(vaddr);
127 pmd_idx = pmd_index(vaddr);
128 pgd = pgd_base + pgd_idx;
130 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
131 pmd = one_md_table_init(pgd);
132 pmd = pmd + pmd_index(vaddr);
133 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
135 one_page_table_init(pmd);
143 static inline int is_kernel_text(unsigned long addr)
145 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
151 * This maps the physical memory to kernel virtual address space, a total
152 * of max_low_pfn pages, by creating page tables starting from address
155 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
157 int pgd_idx, pmd_idx, pte_ofs;
163 pgd_idx = pgd_index(PAGE_OFFSET);
164 pgd = pgd_base + pgd_idx;
167 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
168 pmd = one_md_table_init(pgd);
169 if (pfn >= max_low_pfn)
173 pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn;
175 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
178 * Map with big pages if possible, otherwise
179 * create normal page tables:
183 pgprot_t prot = PAGE_KERNEL_LARGE;
185 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
186 PAGE_OFFSET + PAGE_SIZE-1;
188 if (is_kernel_text(addr) ||
189 is_kernel_text(addr2))
190 prot = PAGE_KERNEL_LARGE_EXEC;
192 set_pmd(pmd, pfn_pmd(pfn, prot));
197 pte = one_page_table_init(pmd);
200 pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
201 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
202 pgprot_t prot = PAGE_KERNEL;
204 if (is_kernel_text(addr))
205 prot = PAGE_KERNEL_EXEC;
207 set_pte(pte, pfn_pte(pfn, prot));
213 static inline int page_kills_ppro(unsigned long pagenr)
215 if (pagenr >= 0x70000 && pagenr <= 0x7003F)
220 #ifdef CONFIG_HIGHMEM
224 static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
226 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
227 vaddr), vaddr), vaddr);
230 static void __init kmap_init(void)
232 unsigned long kmap_vstart;
235 * Cache the first kmap pte:
237 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
238 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
240 kmap_prot = PAGE_KERNEL;
243 static void __init permanent_kmaps_init(pgd_t *pgd_base)
252 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
254 pgd = swapper_pg_dir + pgd_index(vaddr);
255 pud = pud_offset(pgd, vaddr);
256 pmd = pmd_offset(pud, vaddr);
257 pte = pte_offset_kernel(pmd, vaddr);
258 pkmap_page_table = pte;
261 static void __meminit free_new_highpage(struct page *page)
263 init_page_count(page);
268 void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
270 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
271 ClearPageReserved(page);
272 free_new_highpage(page);
274 SetPageReserved(page);
278 add_one_highpage_hotplug(struct page *page, unsigned long pfn)
280 free_new_highpage(page);
282 #ifdef CONFIG_FLATMEM
283 max_mapnr = max(pfn, max_mapnr);
291 * Not currently handling the NUMA case.
292 * Assuming single node and all memory that
293 * has been added dynamically that would be
294 * onlined here is in HIGHMEM.
296 void __meminit online_page(struct page *page)
298 ClearPageReserved(page);
299 add_one_highpage_hotplug(page, page_to_pfn(page));
303 static void __init set_highmem_pages_init(int bad_ppro)
307 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
309 * Holes under sparsemem might not have no mem_map[]:
312 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
314 totalram_pages += totalhigh_pages;
316 #endif /* !CONFIG_NUMA */
319 # define kmap_init() do { } while (0)
320 # define permanent_kmaps_init(pgd_base) do { } while (0)
321 # define set_highmem_pages_init(bad_ppro) do { } while (0)
322 #endif /* CONFIG_HIGHMEM */
324 pteval_t __PAGE_KERNEL = _PAGE_KERNEL;
325 EXPORT_SYMBOL(__PAGE_KERNEL);
327 pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
329 void __init native_pagetable_setup_start(pgd_t *base)
331 #ifdef CONFIG_X86_PAE
335 * Init entries of the first-level page table to the
336 * zero page, if they haven't already been set up.
338 * In a normal native boot, we'll be running on a
339 * pagetable rooted in swapper_pg_dir, but not in PAE
340 * mode, so this will end up clobbering the mappings
341 * for the lower 24Mbytes of the address space,
342 * without affecting the kernel address space.
344 for (i = 0; i < USER_PTRS_PER_PGD; i++)
346 __pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
348 /* Make sure kernel address space is empty so that a pagetable
349 will be allocated for it. */
350 memset(&base[USER_PTRS_PER_PGD], 0,
351 KERNEL_PGD_PTRS * sizeof(pgd_t));
353 paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT);
357 void __init native_pagetable_setup_done(pgd_t *base)
359 #ifdef CONFIG_X86_PAE
361 * Add low memory identity-mappings - SMP needs it when
362 * starting up on an AP from real-mode. In the non-PAE
363 * case we already have these mappings through head.S.
364 * All user-space mappings are explicitly cleared after
367 set_pgd(&base[0], base[USER_PTRS_PER_PGD]);
372 * Build a proper pagetable for the kernel mappings. Up until this
373 * point, we've been running on some set of pagetables constructed by
376 * If we're booting on native hardware, this will be a pagetable
377 * constructed in arch/i386/kernel/head.S, and not running in PAE mode
378 * (even if we'll end up running in PAE). The root of the pagetable
379 * will be swapper_pg_dir.
381 * If we're booting paravirtualized under a hypervisor, then there are
382 * more options: we may already be running PAE, and the pagetable may
383 * or may not be based in swapper_pg_dir. In any case,
384 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
385 * appropriately for the rest of the initialization to work.
387 * In general, pagetable_init() assumes that the pagetable may already
388 * be partially populated, and so it avoids stomping on any existing
391 static void __init pagetable_init(void)
393 pgd_t *pgd_base = swapper_pg_dir;
394 unsigned long vaddr, end;
396 paravirt_pagetable_setup_start(pgd_base);
398 /* Enable PSE if available */
400 set_in_cr4(X86_CR4_PSE);
402 /* Enable PGE if available */
404 set_in_cr4(X86_CR4_PGE);
405 __PAGE_KERNEL |= _PAGE_GLOBAL;
406 __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
409 kernel_physical_mapping_init(pgd_base);
413 * Fixed mappings, only the page table structure has to be
414 * created - mappings will be set by set_fixmap():
416 early_ioremap_clear();
417 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
418 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
419 page_table_range_init(vaddr, end, pgd_base);
420 early_ioremap_reset();
422 permanent_kmaps_init(pgd_base);
424 paravirt_pagetable_setup_done(pgd_base);
427 #ifdef CONFIG_ACPI_SLEEP
429 * ACPI suspend needs this for resume, because things like the intel-agp
430 * driver might have split up a kernel 4MB mapping.
432 char swsusp_pg_dir[PAGE_SIZE]
433 __attribute__ ((aligned(PAGE_SIZE)));
435 static inline void save_pg_dir(void)
437 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
439 #else /* !CONFIG_ACPI_SLEEP */
440 static inline void save_pg_dir(void)
443 #endif /* !CONFIG_ACPI_SLEEP */
445 void zap_low_mappings(void)
452 * Zap initial low-memory mappings.
454 * Note that "pgd_clear()" doesn't do it for
455 * us, because pgd_clear() is a no-op on i386.
457 for (i = 0; i < USER_PTRS_PER_PGD; i++) {
458 #ifdef CONFIG_X86_PAE
459 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
461 set_pgd(swapper_pg_dir+i, __pgd(0));
469 pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX;
470 EXPORT_SYMBOL_GPL(__supported_pte_mask);
472 #ifdef CONFIG_X86_PAE
474 static int disable_nx __initdata;
479 * Control non executable mappings.
484 static int __init noexec_setup(char *str)
486 if (!str || !strcmp(str, "on")) {
488 __supported_pte_mask |= _PAGE_NX;
492 if (!strcmp(str, "off")) {
494 __supported_pte_mask &= ~_PAGE_NX;
502 early_param("noexec", noexec_setup);
504 static void __init set_nx(void)
506 unsigned int v[4], l, h;
508 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
509 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
511 if ((v[3] & (1 << 20)) && !disable_nx) {
512 rdmsr(MSR_EFER, l, h);
514 wrmsr(MSR_EFER, l, h);
516 __supported_pte_mask |= _PAGE_NX;
523 * paging_init() sets up the page tables - note that the first 8MB are
524 * already mapped by head.S.
526 * This routines also unmaps the page at virtual kernel address 0, so
527 * that we can trap those pesky NULL-reference errors in the kernel.
529 void __init paging_init(void)
531 #ifdef CONFIG_X86_PAE
534 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
538 load_cr3(swapper_pg_dir);
540 #ifdef CONFIG_X86_PAE
542 * We will bail out later - printk doesn't work right now so
543 * the user would just see a hanging kernel.
546 set_in_cr4(X86_CR4_PAE);
554 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
555 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
556 * used to involve black magic jumps to work around some nasty CPU bugs,
557 * but fortunately the switch to using exceptions got rid of all that.
559 static void __init test_wp_bit(void)
562 "Checking if this processor honours the WP bit even in supervisor mode...");
564 /* Any page-aligned address will do, the test is non-destructive */
565 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
566 boot_cpu_data.wp_works_ok = do_test_wp_bit();
567 clear_fixmap(FIX_WP_TEST);
569 if (!boot_cpu_data.wp_works_ok) {
570 printk(KERN_CONT "No.\n");
571 #ifdef CONFIG_X86_WP_WORKS_OK
573 "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
576 printk(KERN_CONT "Ok.\n");
580 static struct kcore_list kcore_mem, kcore_vmalloc;
582 void __init mem_init(void)
584 int codesize, reservedpages, datasize, initsize;
587 #ifdef CONFIG_FLATMEM
590 bad_ppro = ppro_with_ram_bug();
592 #ifdef CONFIG_HIGHMEM
593 /* check that fixmap and pkmap do not overlap */
594 if (PKMAP_BASE + LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
596 "fixmap and kmap areas overlap - this will crash\n");
597 printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
598 PKMAP_BASE, PKMAP_BASE + LAST_PKMAP*PAGE_SIZE,
603 /* this will put all low memory onto the freelists */
604 totalram_pages += free_all_bootmem();
607 for (tmp = 0; tmp < max_low_pfn; tmp++)
609 * Only count reserved RAM pages:
611 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
614 set_highmem_pages_init(bad_ppro);
616 codesize = (unsigned long) &_etext - (unsigned long) &_text;
617 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
618 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
620 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
621 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
622 VMALLOC_END-VMALLOC_START);
624 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
625 "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
626 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
627 num_physpages << (PAGE_SHIFT-10),
629 reservedpages << (PAGE_SHIFT-10),
632 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
635 #if 1 /* double-sanity-check paranoia */
636 printk(KERN_INFO "virtual kernel memory layout:\n"
637 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
638 #ifdef CONFIG_HIGHMEM
639 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
641 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
642 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
643 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
644 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
645 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
646 FIXADDR_START, FIXADDR_TOP,
647 (FIXADDR_TOP - FIXADDR_START) >> 10,
649 #ifdef CONFIG_HIGHMEM
650 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
651 (LAST_PKMAP*PAGE_SIZE) >> 10,
654 VMALLOC_START, VMALLOC_END,
655 (VMALLOC_END - VMALLOC_START) >> 20,
657 (unsigned long)__va(0), (unsigned long)high_memory,
658 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
660 (unsigned long)&__init_begin, (unsigned long)&__init_end,
661 ((unsigned long)&__init_end -
662 (unsigned long)&__init_begin) >> 10,
664 (unsigned long)&_etext, (unsigned long)&_edata,
665 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
667 (unsigned long)&_text, (unsigned long)&_etext,
668 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
670 #ifdef CONFIG_HIGHMEM
671 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
672 BUG_ON(VMALLOC_END > PKMAP_BASE);
674 BUG_ON(VMALLOC_START > VMALLOC_END);
675 BUG_ON((unsigned long)high_memory > VMALLOC_START);
676 #endif /* double-sanity-check paranoia */
678 #ifdef CONFIG_X86_PAE
680 panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
682 if (boot_cpu_data.wp_works_ok < 0)
686 * Subtle. SMP is doing it's boot stuff late (because it has to
687 * fork idle threads) - but it also needs low mappings for the
688 * protected-mode entry to work. We zap these entries only after
689 * the WP-bit has been tested.
696 #ifdef CONFIG_MEMORY_HOTPLUG
697 int arch_add_memory(int nid, u64 start, u64 size)
699 struct pglist_data *pgdata = NODE_DATA(nid);
700 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
701 unsigned long start_pfn = start >> PAGE_SHIFT;
702 unsigned long nr_pages = size >> PAGE_SHIFT;
704 return __add_pages(zone, start_pfn, nr_pages);
709 * This function cannot be __init, since exceptions don't work in that
710 * section. Put this after the callers, so that it cannot be inlined.
712 static noinline int do_test_wp_bit(void)
717 __asm__ __volatile__(
723 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
732 #ifdef CONFIG_DEBUG_RODATA
733 const int rodata_test_data = 0xC3;
734 EXPORT_SYMBOL_GPL(rodata_test_data);
736 void mark_rodata_ro(void)
738 unsigned long start = PFN_ALIGN(_text);
739 unsigned long size = PFN_ALIGN(_etext) - start;
741 #ifndef CONFIG_KPROBES
742 #ifdef CONFIG_HOTPLUG_CPU
743 /* It must still be possible to apply SMP alternatives. */
744 if (num_possible_cpus() <= 1)
747 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
748 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
751 #ifdef CONFIG_CPA_DEBUG
752 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
754 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
756 printk(KERN_INFO "Testing CPA: write protecting again\n");
757 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
762 size = (unsigned long)__end_rodata - start;
763 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
764 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
768 #ifdef CONFIG_CPA_DEBUG
769 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
770 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
772 printk(KERN_INFO "Testing CPA: write protecting again\n");
773 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
778 void free_init_pages(char *what, unsigned long begin, unsigned long end)
780 #ifdef CONFIG_DEBUG_PAGEALLOC
782 * If debugging page accesses then do not free this memory but
783 * mark them not present - any buggy init-section access will
784 * create a kernel page fault:
786 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
787 begin, PAGE_ALIGN(end));
788 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
793 * We just marked the kernel text read only above, now that
794 * we are going to free part of that, we need to make that
797 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
799 for (addr = begin; addr < end; addr += PAGE_SIZE) {
800 ClearPageReserved(virt_to_page(addr));
801 init_page_count(virt_to_page(addr));
802 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
806 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
810 void free_initmem(void)
812 free_init_pages("unused kernel memory",
813 (unsigned long)(&__init_begin),
814 (unsigned long)(&__init_end));
817 #ifdef CONFIG_BLK_DEV_INITRD
818 void free_initrd_mem(unsigned long start, unsigned long end)
820 free_init_pages("initrd memory", start, end);