]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kernel/smpboot_32.c
x86: convert mm_context_t semaphore to a mutex
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / smpboot_32.c
index e4f61d1c6248d8116810e122651f3fb109b5b78e..be3faac047190e2647d95baae8d0322dc1a922c4 100644 (file)
@@ -70,12 +70,12 @@ EXPORT_SYMBOL(smp_num_siblings);
 int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
 
 /* representing HT siblings of each logical CPU */
-cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(cpu_sibling_map);
+DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
+EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 
 /* representing HT and core siblings of each logical CPU */
-cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(cpu_core_map);
+DEFINE_PER_CPU(cpumask_t, cpu_core_map);
+EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 
 /* bitmap of online cpus */
 cpumask_t cpu_online_map __read_mostly;
@@ -102,8 +102,8 @@ u8 apicid_2_node[MAX_APICID];
  * Trampoline 80x86 program as an array.
  */
 
-extern unsigned char trampoline_data [];
-extern unsigned char trampoline_end  [];
+extern const unsigned char trampoline_data [];
+extern const unsigned char trampoline_end  [];
 static unsigned char *trampoline_base;
 static int trampoline_exec;
 
@@ -118,7 +118,7 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
  * has made sure it's suitably aligned.
  */
 
-static unsigned long __devinit setup_trampoline(void)
+static unsigned long __cpuinit setup_trampoline(void)
 {
        memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
        return virt_to_phys(trampoline_base);
@@ -300,7 +300,7 @@ cpumask_t cpu_coregroup_map(int cpu)
         * And for power savings, we return cpu_core_map
         */
        if (sched_mc_power_savings || sched_smt_power_savings)
-               return cpu_core_map[cpu];
+               return per_cpu(cpu_core_map, cpu);
        else
                return c->llc_shared_map;
 }
@@ -319,22 +319,22 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                for_each_cpu_mask(i, cpu_sibling_setup_map) {
                        if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
                            c[cpu].cpu_core_id == c[i].cpu_core_id) {
-                               cpu_set(i, cpu_sibling_map[cpu]);
-                               cpu_set(cpu, cpu_sibling_map[i]);
-                               cpu_set(i, cpu_core_map[cpu]);
-                               cpu_set(cpu, cpu_core_map[i]);
+                               cpu_set(i, per_cpu(cpu_sibling_map, cpu));
+                               cpu_set(cpu, per_cpu(cpu_sibling_map, i));
+                               cpu_set(i, per_cpu(cpu_core_map, cpu));
+                               cpu_set(cpu, per_cpu(cpu_core_map, i));
                                cpu_set(i, c[cpu].llc_shared_map);
                                cpu_set(cpu, c[i].llc_shared_map);
                        }
                }
        } else {
-               cpu_set(cpu, cpu_sibling_map[cpu]);
+               cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
        }
 
        cpu_set(cpu, c[cpu].llc_shared_map);
 
        if (current_cpu_data.x86_max_cores == 1) {
-               cpu_core_map[cpu] = cpu_sibling_map[cpu];
+               per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
                c[cpu].booted_cores = 1;
                return;
        }
@@ -346,17 +346,17 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                        cpu_set(cpu, c[i].llc_shared_map);
                }
                if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
-                       cpu_set(i, cpu_core_map[cpu]);
-                       cpu_set(cpu, cpu_core_map[i]);
+                       cpu_set(i, per_cpu(cpu_core_map, cpu));
+                       cpu_set(cpu, per_cpu(cpu_core_map, i));
                        /*
                         *  Does this new cpu bringup a new core?
                         */
-                       if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
+                       if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
                                /*
                                 * for each core in package, increment
                                 * the booted_cores for this new cpu
                                 */
-                               if (first_cpu(cpu_sibling_map[i]) == i)
+                               if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
                                        c[cpu].booted_cores++;
                                /*
                                 * increment the core count for all
@@ -983,8 +983,8 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
                        printk(KERN_NOTICE "Local APIC not detected."
                                           " Using dummy APIC emulation.\n");
                map_cpu_to_logical_apicid();
-               cpu_set(0, cpu_sibling_map[0]);
-               cpu_set(0, cpu_core_map[0]);
+               cpu_set(0, per_cpu(cpu_sibling_map, 0));
+               cpu_set(0, per_cpu(cpu_core_map, 0));
                return;
        }
 
@@ -1008,8 +1008,8 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
                printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
                smpboot_clear_io_apic_irqs();
                phys_cpu_present_map = physid_mask_of_physid(0);
-               cpu_set(0, cpu_sibling_map[0]);
-               cpu_set(0, cpu_core_map[0]);
+               cpu_set(0, per_cpu(cpu_sibling_map, 0));
+               cpu_set(0, per_cpu(cpu_core_map, 0));
                return;
        }
 
@@ -1021,10 +1021,16 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
        if (!max_cpus) {
                smp_found_config = 0;
                printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
+
+               if (nmi_watchdog == NMI_LOCAL_APIC) {
+                       printk(KERN_INFO "activating minimal APIC for NMI watchdog use.\n");
+                       connect_bsp_APIC();
+                       setup_local_APIC();
+               }
                smpboot_clear_io_apic_irqs();
                phys_cpu_present_map = physid_mask_of_physid(0);
-               cpu_set(0, cpu_sibling_map[0]);
-               cpu_set(0, cpu_core_map[0]);
+               cpu_set(0, per_cpu(cpu_sibling_map, 0));
+               cpu_set(0, per_cpu(cpu_core_map, 0));
                return;
        }
 
@@ -1102,16 +1108,16 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
        Dprintk("Boot done.\n");
 
        /*
-        * construct cpu_sibling_map[], so that we can tell sibling CPUs
+        * construct cpu_sibling_map, so that we can tell sibling CPUs
         * efficiently.
         */
        for (cpu = 0; cpu < NR_CPUS; cpu++) {
-               cpus_clear(cpu_sibling_map[cpu]);
-               cpus_clear(cpu_core_map[cpu]);
+               cpus_clear(per_cpu(cpu_sibling_map, cpu));
+               cpus_clear(per_cpu(cpu_core_map, cpu));
        }
 
-       cpu_set(0, cpu_sibling_map[0]);
-       cpu_set(0, cpu_core_map[0]);
+       cpu_set(0, per_cpu(cpu_sibling_map, 0));
+       cpu_set(0, per_cpu(cpu_core_map, 0));
 
        smpboot_setup_io_apic();
 
@@ -1148,19 +1154,19 @@ void remove_siblinginfo(int cpu)
        int sibling;
        struct cpuinfo_x86 *c = cpu_data;
 
-       for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
-               cpu_clear(cpu, cpu_core_map[sibling]);
-               /*
+       for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
+               cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
+               /*/
                 * last thread sibling in this cpu core going down
                 */
-               if (cpus_weight(cpu_sibling_map[cpu]) == 1)
+               if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
                        c[sibling].booted_cores--;
        }
                        
-       for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
-               cpu_clear(cpu, cpu_sibling_map[sibling]);
-       cpus_clear(cpu_sibling_map[cpu]);
-       cpus_clear(cpu_core_map[cpu]);
+       for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
+               cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
+       cpus_clear(per_cpu(cpu_sibling_map, cpu));
+       cpus_clear(per_cpu(cpu_core_map, cpu));
        c[cpu].phys_proc_id = 0;
        c[cpu].cpu_core_id = 0;
        cpu_clear(cpu, cpu_sibling_setup_map);