]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/mach-voyager/voyager_smp.c
Merge branches 'x86/acpi', 'x86/asm', 'x86/cpudetect', 'x86/crashdump', 'x86/debug...
[linux-2.6-omap-h63xx.git] / arch / x86 / mach-voyager / voyager_smp.c
index 9840b7ec749a00dfc7a906e8a9045b76d545650d..6f5a38c7f9001050ce4fdb5f7e0466fcbfe3e0bd 100644 (file)
@@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq);
 static void disable_local_vic_irq(unsigned int irq);
 static void before_handle_vic_irq(unsigned int irq);
 static void after_handle_vic_irq(unsigned int irq);
-static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask);
+static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask);
 static void ack_vic_irq(unsigned int irq);
 static void vic_enable_cpi(void);
 static void do_boot_cpu(__u8 cpuid);
@@ -211,8 +211,6 @@ static __u32 cpu_booted_map;
 static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
 
 /* This is for the new dynamic CPU boot code */
-cpumask_t cpu_callin_map = CPU_MASK_NONE;
-cpumask_t cpu_callout_map = CPU_MASK_NONE;
 
 /* The per processor IRQ masks (these are usually kept in sync) */
 static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
@@ -378,7 +376,7 @@ void __init find_smp_config(void)
        cpus_addr(phys_cpu_present_map)[0] |=
            voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
                                       3) << 24;
-       cpu_possible_map = phys_cpu_present_map;
+       init_cpu_possible(&phys_cpu_present_map);
        printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
               cpus_addr(phys_cpu_present_map)[0]);
        /* Here we set up the VIC to enable SMP */
@@ -402,7 +400,7 @@ void __init find_smp_config(void)
             VOYAGER_SUS_IN_CONTROL_PORT);
 
        current_thread_info()->cpu = boot_cpu_id;
-       x86_write_percpu(cpu_number, boot_cpu_id);
+       percpu_write(cpu_number, boot_cpu_id);
 }
 
 /*
@@ -530,7 +528,6 @@ static void __init do_boot_cpu(__u8 cpu)
        /* init_tasks (in sched.c) is indexed logically */
        stack_start.sp = (void *)idle->thread.sp;
 
-       init_gdt(cpu);
        per_cpu(current_task, cpu) = idle;
        early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
        irq_ctx_init(cpu);
@@ -1599,16 +1596,16 @@ static void after_handle_vic_irq(unsigned int irq)
  * change the mask and then do an interrupt enable CPI to re-enable on
  * the selected processors */
 
-void set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
+void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask)
 {
        /* Only extended processors handle interrupts */
        unsigned long real_mask;
        unsigned long irq_mask = 1 << irq;
        int cpu;
 
-       real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
+       real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors;
 
-       if (cpus_addr(mask)[0] == 0)
+       if (cpus_addr(*mask)[0] == 0)
                /* can't have no CPUs to accept the interrupt -- extremely
                 * bad things will happen */
                return;
@@ -1747,13 +1744,13 @@ static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
 
 static void __cpuinit voyager_smp_prepare_boot_cpu(void)
 {
-       init_gdt(smp_processor_id());
-       switch_to_new_gdt();
+       int cpu = smp_processor_id();
+       switch_to_new_gdt(cpu);
 
-       cpu_set(smp_processor_id(), cpu_online_map);
-       cpu_set(smp_processor_id(), cpu_callout_map);
-       cpu_set(smp_processor_id(), cpu_possible_map);
-       cpu_set(smp_processor_id(), cpu_present_map);
+       cpu_online_map = cpumask_of_cpu(smp_processor_id());
+       cpu_callout_map = cpumask_of_cpu(smp_processor_id());
+       cpu_callin_map = CPU_MASK_NONE;
+       cpu_present_map = cpumask_of_cpu(smp_processor_id());
 }
 
 static int __cpuinit voyager_cpu_up(unsigned int cpu)
@@ -1780,12 +1777,11 @@ static void __init voyager_smp_cpus_done(unsigned int max_cpus)
 void __init smp_setup_processor_id(void)
 {
        current_thread_info()->cpu = hard_smp_processor_id();
-       x86_write_percpu(cpu_number, hard_smp_processor_id());
 }
 
-static void voyager_send_call_func(cpumask_t callmask)
+static void voyager_send_call_func(const struct cpumask *callmask)
 {
-       __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id());
+       __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id());
        send_CPI(mask, VIC_CALL_FUNCTION_CPI);
 }