X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=arch%2Fsparc64%2Fkernel%2Fsmp.c;h=063668feab1e4e06aa7106834012a33e6a5120b4;hb=622824dbb536f7bdc241eefc3e1ae31c463b4eb8;hp=fa63c68a181941736a7809553d41538a1c5ab074;hpb=a8cac817764a494705aebd99fd51bdf6cdc28ec9;p=linux-2.6-omap-h63xx.git diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index fa63c68a181..063668feab1 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -756,6 +756,8 @@ dump_cpu_list_and_out: printk("]\n"); } +static void (*xcall_deliver)(u64, u64, u64, cpumask_t); + /* Send cross call to all processors mentioned in MASK * except self. */ @@ -767,12 +769,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d cpus_and(mask, mask, cpu_online_map); cpu_clear(this_cpu, mask); - if (tlb_type == spitfire) - spitfire_xcall_deliver(data0, data1, data2, mask); - else if (tlb_type == cheetah || tlb_type == cheetah_plus) - cheetah_xcall_deliver(data0, data1, data2, mask); - else - hypervisor_xcall_deliver(data0, data1, data2, mask); + xcall_deliver(data0, data1, data2, mask); /* NOTE: Caller runs local copy on master. */ put_cpu(); @@ -788,92 +785,36 @@ static void smp_start_sync_tick_client(int cpu) 0, 0, 0, mask); } -/* Send cross call to all processors except self. */ -#define smp_cross_call(func, ctx, data1, data2) \ - smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map) - -struct call_data_struct { - void (*func) (void *info); - void *info; - atomic_t finished; - int wait; -}; - -static struct call_data_struct *call_data; - extern unsigned long xcall_call_function; -/** - * smp_call_function(): Run a function on all other CPUs. - * @func: The function to run. This must be fast and non-blocking. - * @info: An arbitrary pointer to pass to the function. - * @nonatomic: currently unused. - * @wait: If true, wait (atomically) until function has completed on other CPUs. - * - * Returns 0 on success, else a negative status code. Does not return until - * remote CPUs are nearly ready to execute <> or are or have executed. - * - * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. - */ -static int smp_call_function_mask(void (*func)(void *info), void *info, - int nonatomic, int wait, cpumask_t mask) +void arch_send_call_function_ipi(cpumask_t mask) { - struct call_data_struct data; - int cpus; - - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); - - data.func = func; - data.info = info; - atomic_set(&data.finished, 0); - data.wait = wait; - - spin_lock(&call_lock); - - cpu_clear(smp_processor_id(), mask); - cpus = cpus_weight(mask); - if (!cpus) - goto out_unlock; - - call_data = &data; - mb(); - smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); +} - /* Wait for response */ - while (atomic_read(&data.finished) != cpus) - cpu_relax(); +extern unsigned long xcall_call_function_single; -out_unlock: - spin_unlock(&call_lock); +void arch_send_call_function_single_ipi(int cpu) +{ + cpumask_t mask = cpumask_of_cpu(cpu); - return 0; + smp_cross_call_masked(&xcall_call_function_single, 0, 0, 0, mask); } -int smp_call_function(void (*func)(void *info), void *info, - int nonatomic, int wait) -{ - return smp_call_function_mask(func, info, nonatomic, wait, - cpu_online_map); -} +/* Send cross call to all processors except self. */ +#define smp_cross_call(func, ctx, data1, data2) \ + smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map) void smp_call_function_client(int irq, struct pt_regs *regs) { - void (*func) (void *info) = call_data->func; - void *info = call_data->info; + clear_softint(1 << irq); + generic_smp_call_function_interrupt(); +} +void smp_call_function_single_client(int irq, struct pt_regs *regs) +{ clear_softint(1 << irq); - if (call_data->wait) { - /* let initiator proceed only after completion */ - func(info); - atomic_inc(&call_data->finished); - } else { - /* let initiator proceed after getting data */ - atomic_inc(&call_data->finished); - func(info); - } + generic_smp_call_function_single_interrupt(); } static void tsb_sync(void *info) @@ -893,13 +834,12 @@ static void tsb_sync(void *info) void smp_tsb_sync(struct mm_struct *mm) { - smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); + smp_call_function_mask(mm->cpu_vm_mask, tsb_sync, mm, 1); } extern unsigned long xcall_flush_tlb_mm; extern unsigned long xcall_flush_tlb_pending; extern unsigned long xcall_flush_tlb_kernel_range; -extern unsigned long xcall_report_regs; #ifdef CONFIG_MAGIC_SYSRQ extern unsigned long xcall_fetch_glob_regs; #endif @@ -950,29 +890,24 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) __local_flush_dcache_page(page); } else if (cpu_online(cpu)) { void *pg_addr = page_address(page); - u64 data0; + u64 data0 = 0; if (tlb_type == spitfire) { - data0 = - ((u64)&xcall_flush_dcache_page_spitfire); + data0 = ((u64)&xcall_flush_dcache_page_spitfire); if (page_mapping(page) != NULL) data0 |= ((u64)1 << 32); - spitfire_xcall_deliver(data0, - __pa(pg_addr), - (u64) pg_addr, - mask); } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { #ifdef DCACHE_ALIASING_POSSIBLE - data0 = - ((u64)&xcall_flush_dcache_page_cheetah); - cheetah_xcall_deliver(data0, - __pa(pg_addr), - 0, mask); + data0 = ((u64)&xcall_flush_dcache_page_cheetah); #endif } + if (data0) { + xcall_deliver(data0, __pa(pg_addr), + (u64) pg_addr, mask); #ifdef CONFIG_DEBUG_DCFLUSH - atomic_inc(&dcpage_flushes_xcall); + atomic_inc(&dcpage_flushes_xcall); #endif + } } put_cpu(); @@ -980,10 +915,10 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) void flush_dcache_page_all(struct mm_struct *mm, struct page *page) { - void *pg_addr = page_address(page); cpumask_t mask = cpu_online_map; - u64 data0; + void *pg_addr; int this_cpu; + u64 data0; if (tlb_type == hypervisor) return; @@ -997,25 +932,24 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) #endif if (cpus_empty(mask)) goto flush_self; + data0 = 0; + pg_addr = page_address(page); if (tlb_type == spitfire) { data0 = ((u64)&xcall_flush_dcache_page_spitfire); if (page_mapping(page) != NULL) data0 |= ((u64)1 << 32); - spitfire_xcall_deliver(data0, - __pa(pg_addr), - (u64) pg_addr, - mask); } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { #ifdef DCACHE_ALIASING_POSSIBLE data0 = ((u64)&xcall_flush_dcache_page_cheetah); - cheetah_xcall_deliver(data0, - __pa(pg_addr), - 0, mask); #endif } + if (data0) { + xcall_deliver(data0, __pa(pg_addr), + (u64) pg_addr, mask); #ifdef CONFIG_DEBUG_DCFLUSH - atomic_inc(&dcpage_flushes_xcall); + atomic_inc(&dcpage_flushes_xcall); #endif + } flush_self: __local_flush_dcache_page(page); @@ -1078,11 +1012,6 @@ void kgdb_roundup_cpus(unsigned long flags) } #endif -void smp_report_regs(void) -{ - smp_cross_call(&xcall_report_regs, 0, 0, 0); -} - #ifdef CONFIG_MAGIC_SYSRQ void smp_fetch_global_regs(void) { @@ -1264,6 +1193,16 @@ void __devinit smp_prepare_boot_cpu(void) { } +void __init smp_setup_processor_id(void) +{ + if (tlb_type == spitfire) + xcall_deliver = spitfire_xcall_deliver; + else if (tlb_type == cheetah || tlb_type == cheetah_plus) + xcall_deliver = cheetah_xcall_deliver; + else + xcall_deliver = hypervisor_xcall_deliver; +} + void __devinit smp_fill_in_sib_core_maps(void) { unsigned int i;