1 #ifndef _I386_PGTABLE_H
2 #define _I386_PGTABLE_H
6 * The Linux memory management assumes a three-level page table setup. On
7 * the i386, we use that, but "fold" the mid level into the top-level page
8 * table, so that we physically have the same two-level page table as the
11 * This file contains the functions and defines necessary to modify and use
12 * the i386 page table tree.
15 #include <asm/processor.h>
16 #include <asm/fixmap.h>
17 #include <linux/threads.h>
18 #include <asm/paravirt.h>
20 #include <linux/bitops.h>
21 #include <linux/slab.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
26 struct vm_area_struct;
29 * ZERO_PAGE is a global shared page that is always zero: used
30 * for zero-mapped memory areas etc..
32 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
33 extern unsigned long empty_zero_page[1024];
34 extern pgd_t swapper_pg_dir[1024];
35 extern struct kmem_cache *pmd_cache;
36 extern spinlock_t pgd_lock;
37 extern struct page *pgd_list;
38 void check_pgt_cache(void);
40 void pmd_ctor(struct kmem_cache *, void *);
41 void pgtable_cache_init(void);
42 void paging_init(void);
46 * The Linux x86 paging architecture is 'compile-time dual-mode', it
47 * implements both the traditional 2-level x86 page tables and the
48 * newer 3-level PAE-mode page tables.
51 # include <asm/pgtable-3level-defs.h>
52 # define PMD_SIZE (1UL << PMD_SHIFT)
53 # define PMD_MASK (~(PMD_SIZE-1))
55 # include <asm/pgtable-2level-defs.h>
58 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
59 #define PGDIR_MASK (~(PGDIR_SIZE-1))
61 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
62 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
64 #define TWOLEVEL_PGDIR_SHIFT 22
65 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
66 #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
68 /* Just any arbitrary offset to the start of the vmalloc VM area: the
69 * current 8MB value just means that there will be a 8MB "hole" after the
70 * physical memory until the kernel virtual memory starts. That means that
71 * any out-of-bounds memory accesses will hopefully be caught.
72 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
73 * area for the same reason. ;)
75 #define VMALLOC_OFFSET (8*1024*1024)
76 #define VMALLOC_START (((unsigned long) high_memory + \
77 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
79 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
81 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
85 * Define this if things work differently on an i386 and an i486:
86 * it will (on an i486) warn about kernel memory accesses that are
87 * done without a 'access_ok(VERIFY_WRITE,..)'
91 /* The boot page tables (all created as a single array) */
92 extern unsigned long pg0[];
94 #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
96 /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
97 #define pmd_none(x) (!(unsigned long)pmd_val(x))
98 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
99 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
102 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
104 #ifdef CONFIG_X86_PAE
105 # include <asm/pgtable-3level.h>
107 # include <asm/pgtable-2level.h>
111 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
113 * dst - pointer to pgd range anwhere on a pgd page
115 * count - the number of pgds to copy.
117 * dst and src can be on the same page, but the range must not overlap,
118 * and must not cross a page boundary.
120 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
122 memcpy(dst, src, count * sizeof(pgd_t));
126 * Macro to mark a page protection value as "uncacheable". On processors which do not support
127 * it, this is a no-op.
129 #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
130 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
133 * Conversion functions: convert a page and protection to a page entry,
134 * and a page entry and page directory to the page they refer to.
137 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
140 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
142 * this macro returns the index of the entry in the pgd page which would
143 * control the given virtual address
145 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
146 #define pgd_index_k(addr) pgd_index(addr)
149 * pgd_offset() returns a (pgd_t *)
150 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
152 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
155 * a shortcut which implies the use of the kernel's pgd, instead
158 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
161 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
163 * this macro returns the index of the entry in the pmd page which would
164 * control the given virtual address
166 #define pmd_index(address) \
167 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
170 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
172 * this macro returns the index of the entry in the pte page which would
173 * control the given virtual address
175 #define pte_index(address) \
176 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
177 #define pte_offset_kernel(dir, address) \
178 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
180 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
182 #define pmd_page_vaddr(pmd) \
183 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
186 * Helper function that returns the kernel pagetable entry controlling
187 * the virtual address 'address'. NULL means no pagetable entry present.
188 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
191 extern pte_t *lookup_address(unsigned long address);
194 * Make a given kernel text page executable/non-executable.
195 * Returns the previous executability setting of that page (which
196 * is used to restore the previous state). Used by the SMP bootup code.
197 * NOTE: this is an __init function for security reasons.
199 #ifdef CONFIG_X86_PAE
200 extern int set_kernel_exec(unsigned long vaddr, int enable);
202 static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
205 #if defined(CONFIG_HIGHPTE)
206 #define pte_offset_map(dir, address) \
207 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
208 #define pte_offset_map_nested(dir, address) \
209 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
210 #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
211 #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
213 #define pte_offset_map(dir, address) \
214 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
215 #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
216 #define pte_unmap(pte) do { } while (0)
217 #define pte_unmap_nested(pte) do { } while (0)
220 /* Clear a kernel PTE and flush it from the TLB */
221 #define kpte_clear_flush(ptep, vaddr) \
223 pte_clear(&init_mm, vaddr, ptep); \
224 __flush_tlb_one(vaddr); \
228 * The i386 doesn't have any external MMU info: the kernel page
229 * tables contain all the necessary information.
231 #define update_mmu_cache(vma,address,pte) do { } while (0)
233 void native_pagetable_setup_start(pgd_t *base);
234 void native_pagetable_setup_done(pgd_t *base);
236 #ifndef CONFIG_PARAVIRT
237 static inline void paravirt_pagetable_setup_start(pgd_t *base)
239 native_pagetable_setup_start(base);
242 static inline void paravirt_pagetable_setup_done(pgd_t *base)
244 native_pagetable_setup_done(base);
246 #endif /* !CONFIG_PARAVIRT */
248 #endif /* !__ASSEMBLY__ */
251 * kern_addr_valid() is (1) for FLATMEM and (0) for
252 * SPARSEMEM and DISCONTIGMEM
254 #ifdef CONFIG_FLATMEM
255 #define kern_addr_valid(addr) (1)
257 #define kern_addr_valid(kaddr) (0)
260 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
261 remap_pfn_range(vma, vaddr, pfn, size, prot)
263 #endif /* _I386_PGTABLE_H */