3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
22 #include <linux/signal.h>
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
25 #include <linux/errno.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/mman.h>
30 #include <linux/swap.h>
31 #include <linux/stddef.h>
32 #include <linux/vmalloc.h>
33 #include <linux/init.h>
34 #include <linux/delay.h>
35 #include <linux/bootmem.h>
36 #include <linux/highmem.h>
37 #include <linux/idr.h>
38 #include <linux/nodemask.h>
39 #include <linux/module.h>
40 #include <linux/poison.h>
41 #include <linux/lmb.h>
43 #include <asm/pgalloc.h>
48 #include <asm/mmu_context.h>
49 #include <asm/pgtable.h>
51 #include <asm/uaccess.h>
53 #include <asm/machdep.h>
56 #include <asm/processor.h>
57 #include <asm/mmzone.h>
58 #include <asm/cputable.h>
59 #include <asm/sections.h>
60 #include <asm/system.h>
61 #include <asm/iommu.h>
62 #include <asm/abs_addr.h>
67 #if PGTABLE_RANGE > USER_VSID_RANGE
68 #warning Limited user VSID range means pagetable space is wasted
71 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
72 #warning TASK_SIZE is smaller than it needs to be.
75 phys_addr_t memstart_addr;
77 void free_initmem(void)
81 addr = (unsigned long)__init_begin;
82 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
83 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
84 ClearPageReserved(virt_to_page(addr));
85 init_page_count(virt_to_page(addr));
89 printk ("Freeing unused kernel memory: %luk freed\n",
90 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
93 #ifdef CONFIG_BLK_DEV_INITRD
94 void free_initrd_mem(unsigned long start, unsigned long end)
97 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
98 for (; start < end; start += PAGE_SIZE) {
99 ClearPageReserved(virt_to_page(start));
100 init_page_count(virt_to_page(start));
107 #ifdef CONFIG_PROC_KCORE
108 static struct kcore_list kcore_vmem;
110 static int __init setup_kcore(void)
114 for (i=0; i < lmb.memory.cnt; i++) {
115 unsigned long base, size;
116 struct kcore_list *kcore_mem;
118 base = lmb.memory.region[i].base;
119 size = lmb.memory.region[i].size;
121 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
122 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
124 panic("%s: kmalloc failed\n", __func__);
126 kclist_add(kcore_mem, __va(base), size);
129 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
133 module_init(setup_kcore);
136 static void zero_ctor(struct kmem_cache *cache, void *addr)
138 memset(addr, 0, kmem_cache_size(cache));
141 static const unsigned int pgtable_cache_size[2] = {
142 PGD_TABLE_SIZE, PMD_TABLE_SIZE
144 static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
145 #ifdef CONFIG_PPC_64K_PAGES
146 "pgd_cache", "pmd_cache",
148 "pgd_cache", "pud_pmd_cache",
149 #endif /* CONFIG_PPC_64K_PAGES */
152 #ifdef CONFIG_HUGETLB_PAGE
153 /* Hugepages need one extra cache, initialized in hugetlbpage.c. We
154 * can't put into the tables above, because HPAGE_SHIFT is not compile
156 struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
158 struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
161 void pgtable_cache_init(void)
165 for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
166 int size = pgtable_cache_size[i];
167 const char *name = pgtable_cache_name[i];
169 pr_debug("Allocating page table cache %s (#%d) "
170 "for size: %08x...\n", name, i, size);
171 pgtable_cache[i] = kmem_cache_create(name,
178 #ifdef CONFIG_SPARSEMEM_VMEMMAP
180 * Given an address within the vmemmap, determine the pfn of the page that
181 * represents the start of the section it is within. Note that we have to
182 * do this by hand as the proffered address may not be correctly aligned.
183 * Subtraction of non-aligned pointers produces undefined results.
185 unsigned long __meminit vmemmap_section_start(unsigned long page)
187 unsigned long offset = page - ((unsigned long)(vmemmap));
189 /* Return the pfn of the start of the section. */
190 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
194 * Check if this vmemmap page is already initialised. If any section
195 * which overlaps this vmemmap page is initialised then this page is
196 * initialised already.
198 int __meminit vmemmap_populated(unsigned long start, int page_size)
200 unsigned long end = start + page_size;
202 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
203 if (pfn_valid(vmemmap_section_start(start)))
209 int __meminit vmemmap_populate(struct page *start_page,
210 unsigned long nr_pages, int node)
212 unsigned long mode_rw;
213 unsigned long start = (unsigned long)start_page;
214 unsigned long end = (unsigned long)(start_page + nr_pages);
215 unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift;
217 mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
219 /* Align to the page size of the linear mapping. */
220 start = _ALIGN_DOWN(start, page_size);
222 for (; start < end; start += page_size) {
226 if (vmemmap_populated(start, page_size))
229 p = vmemmap_alloc_block(page_size, node);
233 pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n",
236 mapped = htab_bolt_mapping(start, start + page_size,
237 __pa(p), mode_rw, mmu_linear_psize,