X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-s390%2Ftlbflush.h;h=9e57a93d7de153dfcfb094bd07d73cc29d31599f;hb=5f3b28781cbc030351e2fa0712602afbea592aae;hp=6de2632a3e4f5ac1fb6c63431c4de4bf51148a9c;hpb=4800be295c34268fd3211d49828bfaa6bf62867f;p=linux-2.6-omap-h63xx.git diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h index 6de2632a3e4..9e57a93d7de 100644 --- a/include/asm-s390/tlbflush.h +++ b/include/asm-s390/tlbflush.h @@ -6,67 +6,20 @@ #include /* - * TLB flushing: - * - * - flush_tlb() flushes the current mm struct TLBs - * - flush_tlb_all() flushes all processes TLBs - * - flush_tlb_mm(mm) flushes the specified mm context TLB's - * - flush_tlb_page(vma, vmaddr) flushes one page - * - flush_tlb_range(vma, start, end) flushes a range of pages - * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - */ - -/* - * S/390 has three ways of flushing TLBs - * 'ptlb' does a flush of the local processor - * 'csp' flushes the TLBs on all PUs of a SMP - * 'ipte' invalidates a pte in a page table and flushes that out of - * the TLBs of all PUs of a SMP + * Flush all tlb entries on the local cpu. */ - -#define local_flush_tlb() \ -do { asm volatile("ptlb": : :"memory"); } while (0) - -#ifndef CONFIG_SMP - -/* - * We always need to flush, since s390 does not flush tlb - * on each context switch - */ - -static inline void flush_tlb(void) -{ - local_flush_tlb(); -} -static inline void flush_tlb_all(void) -{ - local_flush_tlb(); -} -static inline void flush_tlb_mm(struct mm_struct *mm) -{ - local_flush_tlb(); -} -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long addr) -{ - local_flush_tlb(); -} -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) +static inline void __tlb_flush_local(void) { - local_flush_tlb(); + asm volatile("ptlb" : : : "memory"); } -#define flush_tlb_kernel_range(start, end) \ - local_flush_tlb(); - -#else - -#include - -extern void smp_ptlb_all(void); +#ifdef CONFIG_SMP +/* + * Flush all tlb entries on all cpus. + */ +void smp_ptlb_all(void); -static inline void global_flush_tlb(void) +static inline void __tlb_flush_global(void) { register unsigned long reg2 asm("2"); register unsigned long reg3 asm("3"); @@ -88,67 +41,99 @@ static inline void global_flush_tlb(void) : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); } -/* - * We only have to do global flush of tlb if process run since last - * flush on any other pu than current. - * If we have threads (mm->count > 1) we always do a global flush, - * since the process runs on more than one processor at the same time. - */ - -static inline void __flush_tlb_mm(struct mm_struct * mm) +static inline void __tlb_flush_full(struct mm_struct *mm) { cpumask_t local_cpumask; - if (unlikely(cpus_empty(mm->cpu_vm_mask))) - return; - if (MACHINE_HAS_IDTE) { - pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd); - - if (shadow_pgd) { - asm volatile( - " .insn rrf,0xb98e0000,0,%0,%1,0" - : : "a" (2048), - "a" (__pa(shadow_pgd) & PAGE_MASK) : "cc" ); - } - asm volatile( - " .insn rrf,0xb98e0000,0,%0,%1,0" - : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc"); - return; - } preempt_disable(); + /* + * If the process only ran on the local cpu, do a local flush. + */ local_cpumask = cpumask_of_cpu(smp_processor_id()); if (cpus_equal(mm->cpu_vm_mask, local_cpumask)) - local_flush_tlb(); + __tlb_flush_local(); else - global_flush_tlb(); + __tlb_flush_global(); preempt_enable(); } +#else +#define __tlb_flush_full(mm) __tlb_flush_local() +#endif -static inline void flush_tlb(void) +/* + * Flush all tlb entries of a page table on all cpus. + */ +static inline void __tlb_flush_idte(unsigned long asce) { - __flush_tlb_mm(current->mm); + asm volatile( + " .insn rrf,0xb98e0000,0,%0,%1,0" + : : "a" (2048), "a" (asce) : "cc" ); } -static inline void flush_tlb_all(void) + +static inline void __tlb_flush_mm(struct mm_struct * mm) { - global_flush_tlb(); + if (unlikely(cpus_empty(mm->cpu_vm_mask))) + return; + /* + * If the machine has IDTE we prefer to do a per mm flush + * on all cpus instead of doing a local flush if the mm + * only ran on the local cpu. + */ + if (MACHINE_HAS_IDTE) { + if (mm->context.noexec) + __tlb_flush_idte((unsigned long) + get_shadow_table(mm->pgd) | + mm->context.asce_bits); + __tlb_flush_idte((unsigned long) mm->pgd | + mm->context.asce_bits); + return; + } + __tlb_flush_full(mm); } -static inline void flush_tlb_mm(struct mm_struct *mm) + +static inline void __tlb_flush_mm_cond(struct mm_struct * mm) { - __flush_tlb_mm(mm); + if (atomic_read(&mm->mm_users) <= 1 && mm == current->active_mm) + __tlb_flush_mm(mm); } -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long addr) + +/* + * TLB flushing: + * flush_tlb() - flushes the current mm struct TLBs + * flush_tlb_all() - flushes all processes TLBs + * flush_tlb_mm(mm) - flushes the specified mm context TLB's + * flush_tlb_page(vma, vmaddr) - flushes one page + * flush_tlb_range(vma, start, end) - flushes a range of pages + * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages + */ + +/* + * flush_tlb_mm goes together with ptep_set_wrprotect for the + * copy_page_range operation and flush_tlb_range is related to + * ptep_get_and_clear for change_protection. ptep_set_wrprotect and + * ptep_get_and_clear do not flush the TLBs directly if the mm has + * only one user. At the end of the update the flush_tlb_mm and + * flush_tlb_range functions need to do the flush. + */ +#define flush_tlb() do { } while (0) +#define flush_tlb_all() do { } while (0) +#define flush_tlb_page(vma, addr) do { } while (0) + +static inline void flush_tlb_mm(struct mm_struct *mm) { - __flush_tlb_mm(vma->vm_mm); + __tlb_flush_mm_cond(mm); } + static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - __flush_tlb_mm(vma->vm_mm); + __tlb_flush_mm_cond(vma->vm_mm); } -#define flush_tlb_kernel_range(start, end) global_flush_tlb() - -#endif +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + __tlb_flush_mm(&init_mm); +} #endif /* _S390_TLBFLUSH_H */