X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-generic%2Fpgtable.h;h=44ef329531c3b45b5809f120a80a920f145892b4;hb=ce1d5b23a8d1e19866ab82bdec0dc41fde5273d8;hp=dc8f99ee305fc4edd43232abdc0e5564b2c15434;hpb=bc95f3669f5e6f63cf0b84fe4922c3c6dd4aa775;p=linux-2.6-omap-h63xx.git diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index dc8f99ee305..44ef329531c 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -2,38 +2,27 @@ #define _ASM_GENERIC_PGTABLE_H #ifndef __ASSEMBLY__ - -#ifndef __HAVE_ARCH_PTEP_ESTABLISH -/* - * Establish a new mapping: - * - flush the old one - * - update the page tables - * - inform the TLB about the new one - * - * We hold the mm semaphore for reading, and the pte lock. - * - * Note: the old pte is known to not be writable, so we don't need to - * worry about dirty bits etc getting lost. - */ -#define ptep_establish(__vma, __address, __ptep, __entry) \ -do { \ - set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ - flush_tlb_page(__vma, __address); \ -} while (0) -#endif +#ifdef CONFIG_MMU #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* * Largely same as above, but only sets the access flags (dirty, * accessed, and writable). Furthermore, we know it always gets set * to a "more permissive" setting, which allows most architectures - * to optimize this. + * to optimize this. We return whether the PTE actually changed, which + * in turn instructs the caller to do things like update__mmu_cache. + * This used to be done in the caller, but sparc needs minor faults to + * force that call on sun4c so we changed this macro slightly */ #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ -do { \ - set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ - flush_tlb_page(__vma, __address); \ -} while (0) +({ \ + int __changed = !pte_same(*(__ptep), __entry); \ + if (__changed) { \ + set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ + flush_tlb_page(__vma, __address); \ + } \ + __changed; \ +}) #endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG @@ -61,31 +50,6 @@ do { \ }) #endif -#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY -#define ptep_test_and_clear_dirty(__vma, __address, __ptep) \ -({ \ - pte_t __pte = *__ptep; \ - int r = 1; \ - if (!pte_dirty(__pte)) \ - r = 0; \ - else \ - set_pte_at((__vma)->vm_mm, (__address), (__ptep), \ - pte_mkclean(__pte)); \ - r; \ -}) -#endif - -#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH -#define ptep_clear_flush_dirty(__vma, __address, __ptep) \ -({ \ - int __dirty; \ - __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \ - if (__dirty) \ - flush_tlb_page(__vma, __address); \ - __dirty; \ -}) -#endif - #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR #define ptep_get_and_clear(__mm, __address, __ptep) \ ({ \ @@ -161,49 +125,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) #endif -#ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE -#define lazy_mmu_prot_update(pte) do { } while (0) -#endif - #ifndef __HAVE_ARCH_MOVE_PTE #define move_pte(pte, prot, old_addr, new_addr) (pte) #endif -/* - * A facility to provide lazy MMU batching. This allows PTE updates and - * page invalidations to be delayed until a call to leave lazy MMU mode - * is issued. Some architectures may benefit from doing this, and it is - * beneficial for both shadow and direct mode hypervisors, which may batch - * the PTE updates which happen during this window. Note that using this - * interface requires that read hazards be removed from the code. A read - * hazard could result in the direct mode hypervisor case, since the actual - * write to the page tables may not yet have taken place, so reads though - * a raw PTE pointer after it has been modified are not guaranteed to be - * up to date. This mode can only be entered and left under the protection of - * the page table locks for all page tables which may be modified. In the UP - * case, this is required so that preemption is disabled, and in the SMP case, - * it must synchronize the delayed page table writes properly on other CPUs. - */ -#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE -#define arch_enter_lazy_mmu_mode() do {} while (0) -#define arch_leave_lazy_mmu_mode() do {} while (0) -#define arch_flush_lazy_mmu_mode() do {} while (0) -#endif - -/* - * A facility to provide batching of the reload of page tables with the - * actual context switch code for paravirtualized guests. By convention, - * only one of the lazy modes (CPU, MMU) should be active at any given - * time, entry should never be nested, and entry and exits should always - * be paired. This is for sanity of maintaining and reasoning about the - * kernel code. - */ -#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE -#define arch_enter_lazy_cpu_mode() do {} while (0) -#define arch_leave_lazy_cpu_mode() do {} while (0) -#define arch_flush_lazy_cpu_mode() do {} while (0) -#endif - /* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no @@ -270,6 +195,43 @@ static inline int pmd_none_or_clear_bad(pmd_t *pmd) } return 0; } +#endif /* CONFIG_MMU */ + +/* + * A facility to provide lazy MMU batching. This allows PTE updates and + * page invalidations to be delayed until a call to leave lazy MMU mode + * is issued. Some architectures may benefit from doing this, and it is + * beneficial for both shadow and direct mode hypervisors, which may batch + * the PTE updates which happen during this window. Note that using this + * interface requires that read hazards be removed from the code. A read + * hazard could result in the direct mode hypervisor case, since the actual + * write to the page tables may not yet have taken place, so reads though + * a raw PTE pointer after it has been modified are not guaranteed to be + * up to date. This mode can only be entered and left under the protection of + * the page table locks for all page tables which may be modified. In the UP + * case, this is required so that preemption is disabled, and in the SMP case, + * it must synchronize the delayed page table writes properly on other CPUs. + */ +#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE +#define arch_enter_lazy_mmu_mode() do {} while (0) +#define arch_leave_lazy_mmu_mode() do {} while (0) +#define arch_flush_lazy_mmu_mode() do {} while (0) +#endif + +/* + * A facility to provide batching of the reload of page tables with the + * actual context switch code for paravirtualized guests. By convention, + * only one of the lazy modes (CPU, MMU) should be active at any given + * time, entry should never be nested, and entry and exits should always + * be paired. This is for sanity of maintaining and reasoning about the + * kernel code. + */ +#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE +#define arch_enter_lazy_cpu_mode() do {} while (0) +#define arch_leave_lazy_cpu_mode() do {} while (0) +#define arch_flush_lazy_cpu_mode() do {} while (0) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_GENERIC_PGTABLE_H */