*
* B stepping CPUs may hang. There are hardware work arounds
* for this. We warn about it in case your board doesn't have the work
- * arounds. Basically thats so I can tell anyone with a B stepping
+ * arounds. Basically that's so I can tell anyone with a B stepping
* CPU and SMP problems "tough".
*
* Specific items [From Pentium Processor Specification Update]
apic_write_around(APIC_ICR, cfg);
}
-void fastcall send_IPI_self(int vector)
+void send_IPI_self(int vector)
{
__send_IPI_shortcut(APIC_DEST_SELF, vector);
}
*/
local_irq_save(flags);
- for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
+ for_each_possible_cpu(query_cpu) {
if (cpu_isset(query_cpu, mask)) {
__send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
vector);
* We need to reload %cr3 since the page tables may be going
* away from under us..
*/
-void leave_mm(unsigned long cpu)
+void leave_mm(int cpu)
{
if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
BUG();
cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
load_cr3(swapper_pg_dir);
}
+EXPORT_SYMBOL_GPL(leave_mm);
/*
*
* 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
* Stop ipi delivery for the old mm. This is not synchronized with
* the other cpus, but smp_invalidate_interrupt ignore flush ipis
- * for the wrong mm, and in the worst case we perform a superflous
+ * for the wrong mm, and in the worst case we perform a superfluous
* tlb flush.
* 1a2) set cpu_tlbstate to TLBSTATE_OK
* Now the smp_invalidate_interrupt won't call leave_mm if cpu0
* 2) Leave the mm if we are in the lazy tlb mode.
*/
-fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
+void smp_invalidate_interrupt(struct pt_regs *regs)
{
unsigned long cpu;
smp_mb__after_clear_bit();
out:
put_cpu_no_resched();
+ __get_cpu_var(irq_stat).irq_tlb_count++;
}
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
*/
cpu_clear(smp_processor_id(), cpu_online_map);
disable_local_APIC();
- if (cpu_data[smp_processor_id()].hlt_works_ok)
+ if (cpu_data(smp_processor_id()).hlt_works_ok)
for(;;) halt();
for (;;);
}
* all the work is done automatically when
* we return from the interrupt.
*/
-fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
+void smp_reschedule_interrupt(struct pt_regs *regs)
{
ack_APIC_irq();
+ __get_cpu_var(irq_stat).irq_resched_count++;
}
-fastcall void smp_call_function_interrupt(struct pt_regs *regs)
+void smp_call_function_interrupt(struct pt_regs *regs)
{
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
*/
irq_enter();
(*func)(info);
+ __get_cpu_var(irq_stat).irq_call_count++;
irq_exit();
if (wait) {
{
int i;
- for (i = 0; i < NR_CPUS; i++) {
- if (x86_cpu_to_apicid[i] == apic_id)
+ for_each_possible_cpu(i) {
+ if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
return i;
}
return -1;
.smp_send_reschedule = native_smp_send_reschedule,
.smp_call_function_mask = native_smp_call_function_mask,
};
+EXPORT_SYMBOL_GPL(smp_ops);