X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-x86_64%2Ftlbflush.h;h=4a9c20ea9b105d9523bb712e0b9a30d93aa30047;hb=4e0c1159d83a658d1ffba5bc3442f4ec4cadb436;hp=061742382520ecfe4cf416a5be91cba6b8b51f18;hpb=8ed693d29b25bbd88471ec043cc01581419d0481;p=linux-2.6-omap-h63xx.git diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h index 06174238252..4a9c20ea9b1 100644 --- a/include/asm-x86_64/tlbflush.h +++ b/include/asm-x86_64/tlbflush.h @@ -56,8 +56,9 @@ extern unsigned long pgkern_mask; * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables * - * ..but the x86_64 has somewhat limited tlb flushing capabilities, - * and page-granular flushes are available only on i486 and up. + * x86-64 can only flush individual pages or full VMs. For a range flush + * we always do the full VM. Might be worth trying if for a small + * range a few INVLPGs in a row are a win. */ #ifndef CONFIG_SMP @@ -108,6 +109,10 @@ static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long st #define TLBSTATE_OK 1 #define TLBSTATE_LAZY 2 +/* Roughly an IPI every 20MB with 4k pages for freeing page table + ranges. Cost is about 42k of memory for each CPU. */ +#define ARCH_FREE_PTE_NR 5350 + #endif #define flush_tlb_kernel_range(start, end) flush_tlb_all() @@ -115,7 +120,9 @@ static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long st static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) { - /* x86_64 does not keep any page table caches in TLB */ + /* x86_64 does not keep any page table caches in a software TLB. + The CPUs do in their hardware TLBs, but they are handled + by the normal TLB flushing algorithms. */ } #endif /* _X8664_TLBFLUSH_H */