X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-powerpc%2Ftlbflush.h;h=86e6266a028bb9a69ad1217b4c5005ffa264156c;hb=6e66837b2669d6c07177ada4db9ec47ce9c31873;hp=ca3655672bbc890e9d268fa9418d0b9c880e23a8;hpb=2fc2991175bf77395e6b15fe6b2304d3bf72da40;p=linux-2.6-omap-h63xx.git diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h index ca3655672bb..86e6266a028 100644 --- a/include/asm-powerpc/tlbflush.h +++ b/include/asm-powerpc/tlbflush.h @@ -17,11 +17,73 @@ */ #ifdef __KERNEL__ -#include - struct mm_struct; +struct vm_area_struct; + +#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) +/* + * TLB flushing for software loaded TLB chips + * + * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & + * flush_tlb_kernel_range are best implemented as tlbia vs + * specific tlbie's + */ + +extern void _tlbie(unsigned long address); + +#if defined(CONFIG_40x) || defined(CONFIG_8xx) +#define _tlbia() asm volatile ("tlbia; sync" : : : "memory") +#else /* CONFIG_44x || CONFIG_FSL_BOOKE */ +extern void _tlbia(void); +#endif + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + _tlbia(); +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ + _tlbie(vmaddr); +} + +static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, + unsigned long vmaddr) +{ + _tlbie(vmaddr); +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + _tlbia(); +} + +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + _tlbia(); +} + +#elif defined(CONFIG_PPC32) +/* + * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx + */ +extern void _tlbie(unsigned long address); +extern void _tlbia(void); -#ifdef CONFIG_PPC64 +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); + +#else +/* + * TLB flushing for 64-bit has-MMU CPUs + */ #include #include @@ -29,116 +91,90 @@ struct mm_struct; #define PPC64_TLB_BATCH_NR 192 struct ppc64_tlb_batch { - unsigned long index; - struct mm_struct *mm; - pte_t pte[PPC64_TLB_BATCH_NR]; - unsigned long vaddr[PPC64_TLB_BATCH_NR]; - unsigned int large; + int active; + unsigned long index; + struct mm_struct *mm; + real_pte_t pte[PPC64_TLB_BATCH_NR]; + unsigned long vaddr[PPC64_TLB_BATCH_NR]; + unsigned int psize; }; DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); -static inline void flush_tlb_pending(void) -{ - struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); - - if (batch->index) - __flush_tlb_pending(batch); - put_cpu_var(ppc64_tlb_batch); -} +extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long pte, int huge); -extern void flush_hash_page(unsigned long va, pte_t pte, int local); -void flush_hash_range(unsigned long number, int local); +#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE -#else /* CONFIG_PPC64 */ +static inline void arch_enter_lazy_mmu_mode(void) +{ + struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); -#include + batch->active = 1; +} -extern void _tlbie(unsigned long address); -extern void _tlbia(void); +static inline void arch_leave_lazy_mmu_mode(void) +{ + struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); -/* - * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & - * flush_tlb_kernel_range are best implemented as tlbia vs - * specific tlbie's - */ + if (batch->index) + __flush_tlb_pending(batch); + batch->active = 0; +} -#if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx) -#define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory") -#elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE) -#define flush_tlb_pending() _tlbia() -#endif +#define arch_flush_lazy_mmu_mode() do {} while (0) -/* - * This gets called at the end of handling a page fault, when - * the kernel has put a new PTE into the page table for the process. - * We use it to ensure coherency between the i-cache and d-cache - * for the page which has just been mapped in. - * On machines which use an MMU hash table, we use this to put a - * corresponding HPTE into the hash table ahead of time, instead of - * waiting for the inevitable extra hash-table miss exception. - */ -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); -#endif /* CONFIG_PPC64 */ +extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, + int local); +extern void flush_hash_range(unsigned long number, int local); -#if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \ - defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx) static inline void flush_tlb_mm(struct mm_struct *mm) { - flush_tlb_pending(); } static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long vmaddr) + unsigned long vmaddr) { -#ifdef CONFIG_PPC64 - flush_tlb_pending(); -#else - _tlbie(vmaddr); -#endif } static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long vmaddr) { -#ifndef CONFIG_PPC64 - _tlbie(vmaddr); -#endif } static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end) { - flush_tlb_pending(); } static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) + unsigned long end) { - flush_tlb_pending(); } -#else /* 6xx, 7xx, 7xxx cpus */ - -extern void flush_tlb_mm(struct mm_struct *mm); -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end); -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); - #endif +/* + * This gets called at the end of handling a page fault, when + * the kernel has put a new PTE into the page table for the process. + * We use it to ensure coherency between the i-cache and d-cache + * for the page which has just been mapped in. + * On machines which use an MMU hash table, we use this to put a + * corresponding HPTE into the hash table ahead of time, instead of + * waiting for the inevitable extra hash-table miss exception. + */ +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); + /* * This is called in munmap when we have freed up some page-table * pages. We don't need to do anything here, there's nothing special * about our page-table pages. -- paulus */ static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end) { }