X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=arch%2Fia64%2Fkernel%2Fsmp.c;h=b3a47f986e1e69e99e5cc980cdff3af3418a368a;hb=b2cd64153b94473f6bd82448a68b8e8c041676ea;hp=f4c7f7769cf7df617bdb7cecefd5be4802bb840e;hpb=fb7665544dd60e016494cd5531f5b65ddae22ddc;p=linux-2.6-omap-h63xx.git diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index f4c7f7769cf..b3a47f986e1 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -49,6 +49,18 @@ #include #include +/* + * Note: alignment of 4 entries/cacheline was empirically determined + * to be a good tradeoff between hot cachelines & spreading the array + * across too many cacheline. + */ +static struct local_tlb_flush_counts { + unsigned int count; +} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS]; + +static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; + + /* * Structure and data for smp_call_function(). This is designed to minimise static memory * requirements. It also looks cleaner. @@ -174,7 +186,7 @@ handle_IPI (int irq, void *dev_id) } /* - * Called with preeemption disabled. + * Called with preemption disabled. */ static inline void send_IPI_single (int dest_cpu, int op) @@ -184,7 +196,7 @@ send_IPI_single (int dest_cpu, int op) } /* - * Called with preeemption disabled. + * Called with preemption disabled. */ static inline void send_IPI_allbutself (int op) @@ -198,7 +210,7 @@ send_IPI_allbutself (int op) } /* - * Called with preeemption disabled. + * Called with preemption disabled. */ static inline void send_IPI_all (int op) @@ -211,7 +223,7 @@ send_IPI_all (int op) } /* - * Called with preeemption disabled. + * Called with preemption disabled. */ static inline void send_IPI_self (int op) @@ -221,13 +233,13 @@ send_IPI_self (int op) #ifdef CONFIG_KEXEC void -kdump_smp_send_stop() +kdump_smp_send_stop(void) { send_IPI_allbutself(IPI_KDUMP_CPU_STOP); } void -kdump_smp_send_init() +kdump_smp_send_init(void) { unsigned int cpu, self_cpu; self_cpu = smp_processor_id(); @@ -240,7 +252,7 @@ kdump_smp_send_init() } #endif /* - * Called with preeemption disabled. + * Called with preemption disabled. */ void smp_send_reschedule (int cpu) @@ -248,6 +260,62 @@ smp_send_reschedule (int cpu) platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); } +/* + * Called with preemption disabled. + */ +static void +smp_send_local_flush_tlb (int cpu) +{ + platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0); +} + +void +smp_local_flush_tlb(void) +{ + /* + * Use atomic ops. Otherwise, the load/increment/store sequence from + * a "++" operation can have the line stolen between the load & store. + * The overhead of the atomic op in negligible in this case & offers + * significant benefit for the brief periods where lots of cpus + * are simultaneously flushing TLBs. + */ + ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq); + local_flush_tlb_all(); +} + +#define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */ + +void +smp_flush_tlb_cpumask(cpumask_t xcpumask) +{ + unsigned int *counts = __ia64_per_cpu_var(shadow_flush_counts); + cpumask_t cpumask = xcpumask; + int mycpu, cpu, flush_mycpu = 0; + + preempt_disable(); + mycpu = smp_processor_id(); + + for_each_cpu_mask(cpu, cpumask) + counts[cpu] = local_tlb_flush_counts[cpu].count; + + mb(); + for_each_cpu_mask(cpu, cpumask) { + if (cpu == mycpu) + flush_mycpu = 1; + else + smp_send_local_flush_tlb(cpu); + } + + if (flush_mycpu) + smp_local_flush_tlb(); + + for_each_cpu_mask(cpu, cpumask) + while(counts[cpu] == local_tlb_flush_counts[cpu].count) + udelay(FLUSH_DELAY); + + preempt_enable(); +} + void smp_flush_tlb_all (void) {