]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kernel/smpboot_32.c
x86: remove set_kernel_exec()
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / smpboot_32.c
index e4f61d1c6248d8116810e122651f3fb109b5b78e..5787a0c3e2960919f6dc82f0b034a5ba1d19735c 100644 (file)
 #include <asm/mtrr.h>
 
 /* Set if we find a B stepping CPU */
-static int __devinitdata smp_b_stepping;
+static int __cpuinitdata smp_b_stepping;
 
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
 EXPORT_SYMBOL(smp_num_siblings);
 
 /* Last level cache ID of each logical CPU */
-int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
+DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
 
 /* representing HT siblings of each logical CPU */
-cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(cpu_sibling_map);
+DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
+EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 
 /* representing HT and core siblings of each logical CPU */
-cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(cpu_core_map);
+DEFINE_PER_CPU(cpumask_t, cpu_core_map);
+EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 
 /* bitmap of online cpus */
 cpumask_t cpu_online_map __read_mostly;
@@ -83,18 +83,20 @@ EXPORT_SYMBOL(cpu_online_map);
 
 cpumask_t cpu_callin_map;
 cpumask_t cpu_callout_map;
-EXPORT_SYMBOL(cpu_callout_map);
 cpumask_t cpu_possible_map;
 EXPORT_SYMBOL(cpu_possible_map);
 static cpumask_t smp_commenced_mask;
 
 /* Per CPU bogomips and other parameters */
-struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
-EXPORT_SYMBOL(cpu_data);
+DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
+EXPORT_PER_CPU_SYMBOL(cpu_info);
 
-u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly =
-                       { [0 ... NR_CPUS-1] = 0xff };
-EXPORT_SYMBOL(x86_cpu_to_apicid);
+/* which logical CPU number maps to which CPU (physical APIC ID) */
+u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata =
+                       { [0 ... NR_CPUS-1] = BAD_APICID };
+void *x86_cpu_to_apicid_early_ptr;
+DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
+EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
 
 u8 apicid_2_node[MAX_APICID];
 
@@ -102,10 +104,9 @@ u8 apicid_2_node[MAX_APICID];
  * Trampoline 80x86 program as an array.
  */
 
-extern unsigned char trampoline_data [];
-extern unsigned char trampoline_end  [];
+extern const unsigned char trampoline_data [];
+extern const unsigned char trampoline_end  [];
 static unsigned char *trampoline_base;
-static int trampoline_exec;
 
 static void map_cpu_to_logical_apicid(void);
 
@@ -118,7 +119,7 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
  * has made sure it's suitably aligned.
  */
 
-static unsigned long __devinit setup_trampoline(void)
+static unsigned long __cpuinit setup_trampoline(void)
 {
        memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
        return virt_to_phys(trampoline_base);
@@ -130,17 +131,13 @@ static unsigned long __devinit setup_trampoline(void)
  */
 void __init smp_alloc_memory(void)
 {
-       trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
+       trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
        /*
         * Has to be in very low memory so we can execute
         * real-mode AP code.
         */
        if (__pa(trampoline_base) >= 0x9F000)
                BUG();
-       /*
-        * Make the SMP trampoline executable:
-        */
-       trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);
 }
 
 /*
@@ -150,9 +147,10 @@ void __init smp_alloc_memory(void)
 
 void __cpuinit smp_store_cpu_info(int id)
 {
-       struct cpuinfo_x86 *c = cpu_data + id;
+       struct cpuinfo_x86 *c = &cpu_data(id);
 
        *c = boot_cpu_data;
+       c->cpu_index = id;
        if (id!=0)
                identify_secondary_cpu(c);
        /*
@@ -294,13 +292,13 @@ static int cpucount;
 /* maps the cpu to the sched domain representing multi-core */
 cpumask_t cpu_coregroup_map(int cpu)
 {
-       struct cpuinfo_x86 *c = cpu_data + cpu;
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
        /*
         * For perf, we return last level cache shared map.
         * And for power savings, we return cpu_core_map
         */
        if (sched_mc_power_savings || sched_smt_power_savings)
-               return cpu_core_map[cpu];
+               return per_cpu(cpu_core_map, cpu);
        else
                return c->llc_shared_map;
 }
@@ -311,61 +309,61 @@ static cpumask_t cpu_sibling_setup_map;
 void __cpuinit set_cpu_sibling_map(int cpu)
 {
        int i;
-       struct cpuinfo_x86 *c = cpu_data;
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
 
        cpu_set(cpu, cpu_sibling_setup_map);
 
        if (smp_num_siblings > 1) {
                for_each_cpu_mask(i, cpu_sibling_setup_map) {
-                       if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
-                           c[cpu].cpu_core_id == c[i].cpu_core_id) {
-                               cpu_set(i, cpu_sibling_map[cpu]);
-                               cpu_set(cpu, cpu_sibling_map[i]);
-                               cpu_set(i, cpu_core_map[cpu]);
-                               cpu_set(cpu, cpu_core_map[i]);
-                               cpu_set(i, c[cpu].llc_shared_map);
-                               cpu_set(cpu, c[i].llc_shared_map);
+                       if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
+                           c->cpu_core_id == cpu_data(i).cpu_core_id) {
+                               cpu_set(i, per_cpu(cpu_sibling_map, cpu));
+                               cpu_set(cpu, per_cpu(cpu_sibling_map, i));
+                               cpu_set(i, per_cpu(cpu_core_map, cpu));
+                               cpu_set(cpu, per_cpu(cpu_core_map, i));
+                               cpu_set(i, c->llc_shared_map);
+                               cpu_set(cpu, cpu_data(i).llc_shared_map);
                        }
                }
        } else {
-               cpu_set(cpu, cpu_sibling_map[cpu]);
+               cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
        }
 
-       cpu_set(cpu, c[cpu].llc_shared_map);
+       cpu_set(cpu, c->llc_shared_map);
 
        if (current_cpu_data.x86_max_cores == 1) {
-               cpu_core_map[cpu] = cpu_sibling_map[cpu];
-               c[cpu].booted_cores = 1;
+               per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
+               c->booted_cores = 1;
                return;
        }
 
        for_each_cpu_mask(i, cpu_sibling_setup_map) {
-               if (cpu_llc_id[cpu] != BAD_APICID &&
-                   cpu_llc_id[cpu] == cpu_llc_id[i]) {
-                       cpu_set(i, c[cpu].llc_shared_map);
-                       cpu_set(cpu, c[i].llc_shared_map);
+               if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
+                   per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
+                       cpu_set(i, c->llc_shared_map);
+                       cpu_set(cpu, cpu_data(i).llc_shared_map);
                }
-               if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
-                       cpu_set(i, cpu_core_map[cpu]);
-                       cpu_set(cpu, cpu_core_map[i]);
+               if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
+                       cpu_set(i, per_cpu(cpu_core_map, cpu));
+                       cpu_set(cpu, per_cpu(cpu_core_map, i));
                        /*
                         *  Does this new cpu bringup a new core?
                         */
-                       if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
+                       if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
                                /*
                                 * for each core in package, increment
                                 * the booted_cores for this new cpu
                                 */
-                               if (first_cpu(cpu_sibling_map[i]) == i)
-                                       c[cpu].booted_cores++;
+                               if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
+                                       c->booted_cores++;
                                /*
                                 * increment the core count for all
                                 * the other cpus in this package
                                 */
                                if (i != cpu)
-                                       c[i].booted_cores++;
-                       } else if (i != cpu && !c[cpu].booted_cores)
-                               c[cpu].booted_cores = c[i].booted_cores;
+                                       cpu_data(i).booted_cores++;
+                       } else if (i != cpu && !c->booted_cores)
+                               c->booted_cores = cpu_data(i).booted_cores;
                }
        }
 }
@@ -396,7 +394,7 @@ static void __cpuinit start_secondary(void *unused)
        setup_secondary_clock();
        if (nmi_watchdog == NMI_IO_APIC) {
                disable_8259A_irq(0);
-               enable_NMI_through_LVT0(NULL);
+               enable_NMI_through_LVT0();
                enable_8259A_irq(0);
        }
        /*
@@ -412,7 +410,7 @@ static void __cpuinit start_secondary(void *unused)
        /*
         * We need to hold call_lock, so there is no inconsistency
         * between the time smp_call_function() determines number of
-        * IPI receipients, and the time when the determination is made
+        * IPI recipients, and the time when the determination is made
         * for which cpus receive the IPI. Holding this
         * lock helps us to not include this cpu in a currently in progress
         * smp_call_function().
@@ -439,38 +437,38 @@ void __devinit initialize_secondary(void)
 {
        /*
         * We don't actually need to load the full TSS,
-        * basically just the stack pointer and the eip.
+        * basically just the stack pointer and the ip.
         */
 
        asm volatile(
                "movl %0,%%esp\n\t"
                "jmp *%1"
                :
-               :"m" (current->thread.esp),"m" (current->thread.eip));
+               :"m" (current->thread.sp),"m" (current->thread.ip));
 }
 
 /* Static state in head.S used to set up a CPU */
 extern struct {
-       void * esp;
+       void * sp;
        unsigned short ss;
 } stack_start;
 
 #ifdef CONFIG_NUMA
 
 /* which logical CPUs are on which nodes */
-cpumask_t node_2_cpu_mask[MAX_NUMNODES] __read_mostly =
+cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly =
                                { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
-EXPORT_SYMBOL(node_2_cpu_mask);
+EXPORT_SYMBOL(node_to_cpumask_map);
 /* which node each logical CPU is on */
-int cpu_2_node[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
-EXPORT_SYMBOL(cpu_2_node);
+int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
+EXPORT_SYMBOL(cpu_to_node_map);
 
 /* set up a mapping between cpu and node. */
 static inline void map_cpu_to_node(int cpu, int node)
 {
        printk("Mapping cpu %d to node %d\n", cpu, node);
-       cpu_set(cpu, node_2_cpu_mask[node]);
-       cpu_2_node[cpu] = node;
+       cpu_set(cpu, node_to_cpumask_map[node]);
+       cpu_to_node_map[cpu] = node;
 }
 
 /* undo a mapping between cpu and node. */
@@ -480,8 +478,8 @@ static inline void unmap_cpu_to_node(int cpu)
 
        printk("Unmapping cpu %d from all nodes\n", cpu);
        for (node = 0; node < MAX_NUMNODES; node ++)
-               cpu_clear(cpu, node_2_cpu_mask[node]);
-       cpu_2_node[cpu] = 0;
+               cpu_clear(cpu, node_to_cpumask_map[node]);
+       cpu_to_node_map[cpu] = 0;
 }
 #else /* !CONFIG_NUMA */
 
@@ -659,7 +657,7 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
         * target processor state.
         */
        startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
-                        (unsigned long) stack_start.esp);
+                        (unsigned long) stack_start.sp);
 
        /*
         * Run STARTUP IPI loop.
@@ -736,8 +734,8 @@ static inline int alloc_cpu_id(void)
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static struct task_struct * __devinitdata cpu_idle_tasks[NR_CPUS];
-static inline struct task_struct * alloc_idle_task(int cpu)
+static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS];
+static inline struct task_struct * __cpuinit alloc_idle_task(int cpu)
 {
        struct task_struct *idle;
 
@@ -745,7 +743,7 @@ static inline struct task_struct * alloc_idle_task(int cpu)
                /* initialize thread_struct.  we really want to avoid destroy
                 * idle tread
                 */
-               idle->thread.esp = (unsigned long)task_pt_regs(idle);
+               idle->thread.sp = (unsigned long)task_pt_regs(idle);
                init_idle(idle, cpu);
                return idle;
        }
@@ -790,7 +788,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
        per_cpu(current_task, cpu) = idle;
        early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
 
-       idle->thread.eip = (unsigned long) start_secondary;
+       idle->thread.ip = (unsigned long) start_secondary;
        /* start_eip had better be page-aligned! */
        start_eip = setup_trampoline();
 
@@ -798,13 +796,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
        alternatives_smp_switch(1);
 
        /* So we see what's up   */
-       printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
+       printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip);
        /* Stack for startup_32 can be just as for start_secondary onwards */
-       stack_start.esp = (void *) idle->thread.esp;
+       stack_start.sp = (void *) idle->thread.sp;
 
        irq_ctx_init(cpu);
 
-       x86_cpu_to_apicid[cpu] = apicid;
+       per_cpu(x86_cpu_to_apicid, cpu) = apicid;
        /*
         * This grunge runs the startup process for
         * the targeted processor.
@@ -844,7 +842,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
                        /* number CPUs logically, starting from 1 (BSP is 0) */
                        Dprintk("OK.\n");
                        printk("CPU%d: ", cpu);
-                       print_cpu_info(&cpu_data[cpu]);
+                       print_cpu_info(&cpu_data(cpu));
                        Dprintk("CPU has booted.\n");
                } else {
                        boot_error= 1;
@@ -866,7 +864,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
                cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
                cpucount--;
        } else {
-               x86_cpu_to_apicid[cpu] = apicid;
+               per_cpu(x86_cpu_to_apicid, cpu) = apicid;
                cpu_set(cpu, cpu_present_map);
        }
 
@@ -915,7 +913,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
        struct warm_boot_cpu_info info;
        int     apicid, ret;
 
-       apicid = x86_cpu_to_apicid[cpu];
+       apicid = per_cpu(x86_cpu_to_apicid, cpu);
        if (apicid == BAD_APICID) {
                ret = -ENODEV;
                goto exit;
@@ -961,11 +959,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
         */
        smp_store_cpu_info(0); /* Final full version of the data */
        printk("CPU%d: ", 0);
-       print_cpu_info(&cpu_data[0]);
+       print_cpu_info(&cpu_data(0));
 
        boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
        boot_cpu_logical_apicid = logical_smp_processor_id();
-       x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
+       per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid;
 
        current_thread_info()->cpu = 0;
 
@@ -983,8 +981,8 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
                        printk(KERN_NOTICE "Local APIC not detected."
                                           " Using dummy APIC emulation.\n");
                map_cpu_to_logical_apicid();
-               cpu_set(0, cpu_sibling_map[0]);
-               cpu_set(0, cpu_core_map[0]);
+               cpu_set(0, per_cpu(cpu_sibling_map, 0));
+               cpu_set(0, per_cpu(cpu_core_map, 0));
                return;
        }
 
@@ -1008,8 +1006,9 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
                printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
                smpboot_clear_io_apic_irqs();
                phys_cpu_present_map = physid_mask_of_physid(0);
-               cpu_set(0, cpu_sibling_map[0]);
-               cpu_set(0, cpu_core_map[0]);
+               map_cpu_to_logical_apicid();
+               cpu_set(0, per_cpu(cpu_sibling_map, 0));
+               cpu_set(0, per_cpu(cpu_core_map, 0));
                return;
        }
 
@@ -1021,10 +1020,17 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
        if (!max_cpus) {
                smp_found_config = 0;
                printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
+
+               if (nmi_watchdog == NMI_LOCAL_APIC) {
+                       printk(KERN_INFO "activating minimal APIC for NMI watchdog use.\n");
+                       connect_bsp_APIC();
+                       setup_local_APIC();
+               }
                smpboot_clear_io_apic_irqs();
                phys_cpu_present_map = physid_mask_of_physid(0);
-               cpu_set(0, cpu_sibling_map[0]);
-               cpu_set(0, cpu_core_map[0]);
+               map_cpu_to_logical_apicid();
+               cpu_set(0, per_cpu(cpu_sibling_map, 0));
+               cpu_set(0, per_cpu(cpu_core_map, 0));
                return;
        }
 
@@ -1074,9 +1080,9 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
         * Allow the user to impress friends.
         */
        Dprintk("Before bogomips.\n");
-       for (cpu = 0; cpu < NR_CPUS; cpu++)
+       for_each_possible_cpu(cpu)
                if (cpu_isset(cpu, cpu_callout_map))
-                       bogosum += cpu_data[cpu].loops_per_jiffy;
+                       bogosum += cpu_data(cpu).loops_per_jiffy;
        printk(KERN_INFO
                "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
                cpucount+1,
@@ -1102,16 +1108,16 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
        Dprintk("Boot done.\n");
 
        /*
-        * construct cpu_sibling_map[], so that we can tell sibling CPUs
+        * construct cpu_sibling_map, so that we can tell sibling CPUs
         * efficiently.
         */
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
-               cpus_clear(cpu_sibling_map[cpu]);
-               cpus_clear(cpu_core_map[cpu]);
+       for_each_possible_cpu(cpu) {
+               cpus_clear(per_cpu(cpu_sibling_map, cpu));
+               cpus_clear(per_cpu(cpu_core_map, cpu));
        }
 
-       cpu_set(0, cpu_sibling_map[0]);
-       cpu_set(0, cpu_core_map[0]);
+       cpu_set(0, per_cpu(cpu_sibling_map, 0));
+       cpu_set(0, per_cpu(cpu_core_map, 0));
 
        smpboot_setup_io_apic();
 
@@ -1146,23 +1152,23 @@ void __init native_smp_prepare_boot_cpu(void)
 void remove_siblinginfo(int cpu)
 {
        int sibling;
-       struct cpuinfo_x86 *c = cpu_data;
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-       for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
-               cpu_clear(cpu, cpu_core_map[sibling]);
-               /*
+       for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
+               cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
+               /*/
                 * last thread sibling in this cpu core going down
                 */
-               if (cpus_weight(cpu_sibling_map[cpu]) == 1)
-                       c[sibling].booted_cores--;
+               if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
+                       cpu_data(sibling).booted_cores--;
        }
                        
-       for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
-               cpu_clear(cpu, cpu_sibling_map[sibling]);
-       cpus_clear(cpu_sibling_map[cpu]);
-       cpus_clear(cpu_core_map[cpu]);
-       c[cpu].phys_proc_id = 0;
-       c[cpu].cpu_core_id = 0;
+       for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
+               cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
+       cpus_clear(per_cpu(cpu_sibling_map, cpu));
+       cpus_clear(per_cpu(cpu_core_map, cpu));
+       c->phys_proc_id = 0;
+       c->cpu_core_id = 0;
        cpu_clear(cpu, cpu_sibling_setup_map);
 }
 
@@ -1279,12 +1285,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
        setup_ioapic_dest();
 #endif
        zap_low_mappings();
-#ifndef CONFIG_HOTPLUG_CPU
-       /*
-        * Disable executability of the SMP trampoline:
-        */
-       set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
-#endif
 }
 
 void __init smp_intr_init(void)