]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/xen/smp.c
Merge git://git.infradead.org/~dedekind/ubi-2.6
[linux-2.6-omap-h63xx.git] / arch / x86 / xen / smp.c
index 557b8e24706a94e3511b9a802f333dbb5a2f573f..aafc54437403f95e84d07f1cccad12ea873bd8a3 100644 (file)
@@ -146,9 +146,14 @@ void __init xen_smp_prepare_boot_cpu(void)
           old memory can be recycled */
        make_lowmem_page_readwrite(&per_cpu__gdt_page);
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
-               cpus_clear(cpu_sibling_map[cpu]);
-               cpus_clear(cpu_core_map[cpu]);
+       for_each_possible_cpu(cpu) {
+               cpus_clear(per_cpu(cpu_sibling_map, cpu));
+               /*
+                * cpu_core_map lives in a per cpu area that is cleared
+                * when the per cpu array is allocated.
+                *
+                * cpus_clear(per_cpu(cpu_core_map, cpu));
+                */
        }
 
        xen_setup_vcpu_info_placement();
@@ -158,9 +163,14 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)
 {
        unsigned cpu;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
-               cpus_clear(cpu_sibling_map[cpu]);
-               cpus_clear(cpu_core_map[cpu]);
+       for_each_possible_cpu(cpu) {
+               cpus_clear(per_cpu(cpu_sibling_map, cpu));
+               /*
+                * cpu_core_ map will be zeroed when the per
+                * cpu area is allocated.
+                *
+                * cpus_clear(per_cpu(cpu_core_map, cpu));
+                */
        }
 
        smp_store_cpu_info(0);
@@ -229,10 +239,10 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
        ctxt->gdt_ents      = ARRAY_SIZE(gdt->gdt);
 
        ctxt->user_regs.cs = __KERNEL_CS;
-       ctxt->user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
+       ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
 
        ctxt->kernel_ss = __KERNEL_DS;
-       ctxt->kernel_sp = idle->thread.esp0;
+       ctxt->kernel_sp = idle->thread.sp0;
 
        ctxt->event_callback_cs     = __KERNEL_CS;
        ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
@@ -346,6 +356,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
         */
        irq_enter();
        (*func)(info);
+       __get_cpu_var(irq_stat).irq_call_count++;
        irq_exit();
 
        if (wait) {
@@ -360,7 +371,8 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
                               void *info, int wait)
 {
        struct call_data_struct data;
-       int cpus;
+       int cpus, cpu;
+       bool yield;
 
        /* Holding any lock stops cpus from going down. */
        spin_lock(&call_lock);
@@ -389,9 +401,14 @@ int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
        /* Send a message to other CPUs and wait for them to respond */
        xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
 
-       /* Make sure other vcpus get a chance to run.
-          XXX too severe?  Maybe we should check the other CPU's states? */
-       HYPERVISOR_sched_op(SCHEDOP_yield, 0);
+       /* Make sure other vcpus get a chance to run if they need to. */
+       yield = false;
+       for_each_cpu_mask(cpu, mask)
+               if (xen_vcpu_stolen(cpu))
+                       yield = true;
+
+       if (yield)
+               HYPERVISOR_sched_op(SCHEDOP_yield, 0);
 
        /* Wait for response */
        while (atomic_read(&data.started) != cpus ||