1 #ifndef _ASM_GENERIC_PGTABLE_H
2 #define _ASM_GENERIC_PGTABLE_H
6 #ifndef __HAVE_ARCH_PTEP_ESTABLISH
8 * Establish a new mapping:
10 * - update the page tables
11 * - inform the TLB about the new one
13 * We hold the mm semaphore for reading, and the pte lock.
15 * Note: the old pte is known to not be writable, so we don't need to
16 * worry about dirty bits etc getting lost.
18 #ifndef __HAVE_ARCH_SET_PTE_ATOMIC
19 #define ptep_establish(__vma, __address, __ptep, __entry) \
21 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
22 flush_tlb_page(__vma, __address); \
24 #else /* __HAVE_ARCH_SET_PTE_ATOMIC */
25 #define ptep_establish(__vma, __address, __ptep, __entry) \
27 set_pte_atomic(__ptep, __entry); \
28 flush_tlb_page(__vma, __address); \
30 #endif /* __HAVE_ARCH_SET_PTE_ATOMIC */
33 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
35 * Largely same as above, but only sets the access flags (dirty,
36 * accessed, and writable). Furthermore, we know it always gets set
37 * to a "more permissive" setting, which allows most architectures
40 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
42 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
43 flush_tlb_page(__vma, __address); \
47 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
48 #define ptep_test_and_clear_young(__vma, __address, __ptep) \
50 pte_t __pte = *(__ptep); \
52 if (!pte_young(__pte)) \
55 set_pte_at((__vma)->vm_mm, (__address), \
56 (__ptep), pte_mkold(__pte)); \
61 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
62 #define ptep_clear_flush_young(__vma, __address, __ptep) \
65 __young = ptep_test_and_clear_young(__vma, __address, __ptep); \
67 flush_tlb_page(__vma, __address); \
72 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
73 #define ptep_test_and_clear_dirty(__vma, __address, __ptep) \
75 pte_t __pte = *__ptep; \
77 if (!pte_dirty(__pte)) \
80 set_pte_at((__vma)->vm_mm, (__address), (__ptep), \
81 pte_mkclean(__pte)); \
86 #ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
87 #define ptep_clear_flush_dirty(__vma, __address, __ptep) \
90 __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \
92 flush_tlb_page(__vma, __address); \
97 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
98 #define ptep_get_and_clear(__mm, __address, __ptep) \
100 pte_t __pte = *(__ptep); \
101 pte_clear((__mm), (__address), (__ptep)); \
106 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
107 #define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \
110 __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \
116 * Some architectures may be able to avoid expensive synchronization
117 * primitives when modifications are made to PTE's which are already
118 * not present, or in the process of an address space destruction.
120 #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
121 #define pte_clear_not_present_full(__mm, __address, __ptep, __full) \
123 pte_clear((__mm), (__address), (__ptep)); \
127 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
128 #define ptep_clear_flush(__vma, __address, __ptep) \
131 __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
132 flush_tlb_page(__vma, __address); \
137 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
139 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
141 pte_t old_pte = *ptep;
142 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
146 #ifndef __HAVE_ARCH_PTE_SAME
147 #define pte_same(A,B) (pte_val(A) == pte_val(B))
150 #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
151 #define page_test_and_clear_dirty(page) (0)
152 #define pte_maybe_dirty(pte) pte_dirty(pte)
154 #define pte_maybe_dirty(pte) (1)
157 #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
158 #define page_test_and_clear_young(page) (0)
161 #ifndef __HAVE_ARCH_PGD_OFFSET_GATE
162 #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
165 #ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
166 #define lazy_mmu_prot_update(pte) do { } while (0)
169 #ifndef __HAVE_ARCH_MOVE_PTE
170 #define move_pte(pte, prot, old_addr, new_addr) (pte)
174 * A facility to provide lazy MMU batching. This allows PTE updates and
175 * page invalidations to be delayed until a call to leave lazy MMU mode
176 * is issued. Some architectures may benefit from doing this, and it is
177 * beneficial for both shadow and direct mode hypervisors, which may batch
178 * the PTE updates which happen during this window. Note that using this
179 * interface requires that read hazards be removed from the code. A read
180 * hazard could result in the direct mode hypervisor case, since the actual
181 * write to the page tables may not yet have taken place, so reads though
182 * a raw PTE pointer after it has been modified are not guaranteed to be
183 * up to date. This mode can only be entered and left under the protection of
184 * the page table locks for all page tables which may be modified. In the UP
185 * case, this is required so that preemption is disabled, and in the SMP case,
186 * it must synchronize the delayed page table writes properly on other CPUs.
188 #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
189 #define arch_enter_lazy_mmu_mode() do {} while (0)
190 #define arch_leave_lazy_mmu_mode() do {} while (0)
194 * When walking page tables, get the address of the next boundary,
195 * or the end address of the range if that comes earlier. Although no
196 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
199 #define pgd_addr_end(addr, end) \
200 ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
201 (__boundary - 1 < (end) - 1)? __boundary: (end); \
205 #define pud_addr_end(addr, end) \
206 ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
207 (__boundary - 1 < (end) - 1)? __boundary: (end); \
212 #define pmd_addr_end(addr, end) \
213 ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
214 (__boundary - 1 < (end) - 1)? __boundary: (end); \
219 * When walking page tables, we usually want to skip any p?d_none entries;
220 * and any p?d_bad entries - reporting the error before resetting to none.
221 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
223 void pgd_clear_bad(pgd_t *);
224 void pud_clear_bad(pud_t *);
225 void pmd_clear_bad(pmd_t *);
227 static inline int pgd_none_or_clear_bad(pgd_t *pgd)
231 if (unlikely(pgd_bad(*pgd))) {
238 static inline int pud_none_or_clear_bad(pud_t *pud)
242 if (unlikely(pud_bad(*pud))) {
249 static inline int pmd_none_or_clear_bad(pmd_t *pmd)
253 if (unlikely(pmd_bad(*pmd))) {
259 #endif /* !__ASSEMBLY__ */
261 #endif /* _ASM_GENERIC_PGTABLE_H */