dmi_check_system(sw_any_bug_dmi_table);
        if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) {
                policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
-               policy->cpus = cpu_core_map[cpu];
+               policy->cpus = per_cpu(cpu_core_map, cpu);
        }
 #endif
 
 
 static int cpu_family = CPU_OPTERON;
 
 #ifndef CONFIG_SMP
-static cpumask_t cpu_core_map[1];
+DEFINE_PER_CPU(cpumask_t, cpu_core_map);
 #endif
 
 /* Return a frequency in MHz, given an input fid */
 
        dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
        data->powernow_table = powernow_table;
-       if (first_cpu(cpu_core_map[data->cpu]) == data->cpu)
+       if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
                print_basics(data);
 
        for (j = 0; j < data->numps; j++)
 
        /* fill in data */
        data->numps = data->acpi_data.state_count;
-       if (first_cpu(cpu_core_map[data->cpu]) == data->cpu)
+       if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
                print_basics(data);
        powernow_k8_acpi_pst_values(data, 0);
 
        if (cpu_family == CPU_HW_PSTATE)
                pol->cpus = cpumask_of_cpu(pol->cpu);
        else
-               pol->cpus = cpu_core_map[pol->cpu];
+               pol->cpus = per_cpu(cpu_core_map, pol->cpu);
        data->available_cores = &(pol->cpus);
 
        /* Take a crude guess here.
        cpumask_t oldmask = current->cpus_allowed;
        unsigned int khz = 0;
 
-       data = powernow_data[first_cpu(cpu_core_map[cpu])];
+       data = powernow_data[first_cpu(per_cpu(cpu_core_map, cpu))];
 
        if (!data)
                return -EINVAL;
 
 #ifdef CONFIG_X86_HT
        if (c->x86_max_cores * smp_num_siblings > 1) {
                seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
-               seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
+               seq_printf(m, "siblings\t: %d\n",
+                               cpus_weight(per_cpu(cpu_core_map, n)));
                seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
                seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
        }
 
 
 #ifdef CONFIG_SMP
        if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) {   /* symlink */
-               i = first_cpu(cpu_core_map[cpu]);
+               i = first_cpu(per_cpu(cpu_core_map, cpu));
 
                /* first core not up yet */
                if (cpu_data[i].cpu_core_id)
                if (err)
                        goto out;
 
-               b->cpus = cpu_core_map[cpu];
+               b->cpus = per_cpu(cpu_core_map, cpu);
                per_cpu(threshold_banks, cpu)[bank] = b;
                goto out;
        }
 #ifndef CONFIG_SMP
        b->cpus = CPU_MASK_ALL;
 #else
-       b->cpus = cpu_core_map[cpu];
+       b->cpus = per_cpu(cpu_core_map, cpu);
 #endif
        err = kobject_register(&b->kobj);
        if (err)
 
        if (smp_num_siblings * c->x86_max_cores > 1) {
                int cpu = c - cpu_data;
                seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
-               seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
+               seq_printf(m, "siblings\t: %d\n",
+                              cpus_weight(per_cpu(cpu_core_map, cpu)));
                seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
                seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
        }
 
 EXPORT_SYMBOL(cpu_sibling_map);
 
 /* representing HT and core siblings of each logical CPU */
-cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(cpu_core_map);
+DEFINE_PER_CPU(cpumask_t, cpu_core_map);
+EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 
 /* bitmap of online cpus */
 cpumask_t cpu_online_map __read_mostly;
         * And for power savings, we return cpu_core_map
         */
        if (sched_mc_power_savings || sched_smt_power_savings)
-               return cpu_core_map[cpu];
+               return per_cpu(cpu_core_map, cpu);
        else
                return c->llc_shared_map;
 }
                            c[cpu].cpu_core_id == c[i].cpu_core_id) {
                                cpu_set(i, cpu_sibling_map[cpu]);
                                cpu_set(cpu, cpu_sibling_map[i]);
-                               cpu_set(i, cpu_core_map[cpu]);
-                               cpu_set(cpu, cpu_core_map[i]);
+                               cpu_set(i, per_cpu(cpu_core_map, cpu));
+                               cpu_set(cpu, per_cpu(cpu_core_map, i));
                                cpu_set(i, c[cpu].llc_shared_map);
                                cpu_set(cpu, c[i].llc_shared_map);
                        }
        cpu_set(cpu, c[cpu].llc_shared_map);
 
        if (current_cpu_data.x86_max_cores == 1) {
-               cpu_core_map[cpu] = cpu_sibling_map[cpu];
+               per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu];
                c[cpu].booted_cores = 1;
                return;
        }
                        cpu_set(cpu, c[i].llc_shared_map);
                }
                if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
-                       cpu_set(i, cpu_core_map[cpu]);
-                       cpu_set(cpu, cpu_core_map[i]);
+                       cpu_set(i, per_cpu(cpu_core_map, cpu));
+                       cpu_set(cpu, per_cpu(cpu_core_map, i));
                        /*
                         *  Does this new cpu bringup a new core?
                         */
                                           " Using dummy APIC emulation.\n");
                map_cpu_to_logical_apicid();
                cpu_set(0, cpu_sibling_map[0]);
-               cpu_set(0, cpu_core_map[0]);
+               cpu_set(0, per_cpu(cpu_core_map, 0));
                return;
        }
 
                smpboot_clear_io_apic_irqs();
                phys_cpu_present_map = physid_mask_of_physid(0);
                cpu_set(0, cpu_sibling_map[0]);
-               cpu_set(0, cpu_core_map[0]);
+               cpu_set(0, per_cpu(cpu_core_map, 0));
                return;
        }
 
                smpboot_clear_io_apic_irqs();
                phys_cpu_present_map = physid_mask_of_physid(0);
                cpu_set(0, cpu_sibling_map[0]);
-               cpu_set(0, cpu_core_map[0]);
+               cpu_set(0, per_cpu(cpu_core_map, 0));
                return;
        }
 
         */
        for (cpu = 0; cpu < NR_CPUS; cpu++) {
                cpus_clear(cpu_sibling_map[cpu]);
-               cpus_clear(cpu_core_map[cpu]);
+               cpus_clear(per_cpu(cpu_core_map, cpu));
        }
 
        cpu_set(0, cpu_sibling_map[0]);
-       cpu_set(0, cpu_core_map[0]);
+       cpu_set(0, per_cpu(cpu_core_map, 0));
 
        smpboot_setup_io_apic();
 
        int sibling;
        struct cpuinfo_x86 *c = cpu_data;
 
-       for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
-               cpu_clear(cpu, cpu_core_map[sibling]);
-               /*
+       for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
+               cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
+               /*/
                 * last thread sibling in this cpu core going down
                 */
                if (cpus_weight(cpu_sibling_map[cpu]) == 1)
        for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
                cpu_clear(cpu, cpu_sibling_map[sibling]);
        cpus_clear(cpu_sibling_map[cpu]);
-       cpus_clear(cpu_core_map[cpu]);
+       cpus_clear(per_cpu(cpu_core_map, cpu));
        c[cpu].phys_proc_id = 0;
        c[cpu].cpu_core_id = 0;
        cpu_clear(cpu, cpu_sibling_setup_map);
 
 EXPORT_SYMBOL(cpu_sibling_map);
 
 /* representing HT and core siblings of each logical CPU */
-cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(cpu_core_map);
+DEFINE_PER_CPU(cpumask_t, cpu_core_map);
+EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 
 /*
  * Trampoline 80x86 program as an array.
         * And for power savings, we return cpu_core_map
         */
        if (sched_mc_power_savings || sched_smt_power_savings)
-               return cpu_core_map[cpu];
+               return per_cpu(cpu_core_map, cpu);
        else
                return c->llc_shared_map;
 }
                            c[cpu].cpu_core_id == c[i].cpu_core_id) {
                                cpu_set(i, cpu_sibling_map[cpu]);
                                cpu_set(cpu, cpu_sibling_map[i]);
-                               cpu_set(i, cpu_core_map[cpu]);
-                               cpu_set(cpu, cpu_core_map[i]);
+                               cpu_set(i, per_cpu(cpu_core_map, cpu));
+                               cpu_set(cpu, per_cpu(cpu_core_map, i));
                                cpu_set(i, c[cpu].llc_shared_map);
                                cpu_set(cpu, c[i].llc_shared_map);
                        }
        cpu_set(cpu, c[cpu].llc_shared_map);
 
        if (current_cpu_data.x86_max_cores == 1) {
-               cpu_core_map[cpu] = cpu_sibling_map[cpu];
+               per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu];
                c[cpu].booted_cores = 1;
                return;
        }
                        cpu_set(cpu, c[i].llc_shared_map);
                }
                if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
-                       cpu_set(i, cpu_core_map[cpu]);
-                       cpu_set(cpu, cpu_core_map[i]);
+                       cpu_set(i, per_cpu(cpu_core_map, cpu));
+                       cpu_set(cpu, per_cpu(cpu_core_map, i));
                        /*
                         *  Does this new cpu bringup a new core?
                         */
        else
                phys_cpu_present_map = physid_mask_of_physid(0);
        cpu_set(0, cpu_sibling_map[0]);
-       cpu_set(0, cpu_core_map[0]);
+       cpu_set(0, per_cpu(cpu_core_map, 0));
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
        int sibling;
        struct cpuinfo_x86 *c = cpu_data;
 
-       for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
-               cpu_clear(cpu, cpu_core_map[sibling]);
+       for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
+               cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
                /*
                 * last thread sibling in this cpu core going down
                 */
        for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
                cpu_clear(cpu, cpu_sibling_map[sibling]);
        cpus_clear(cpu_sibling_map[cpu]);
-       cpus_clear(cpu_core_map[cpu]);
+       cpus_clear(per_cpu(cpu_core_map, cpu));
        c[cpu].phys_proc_id = 0;
        c[cpu].cpu_core_id = 0;
        cpu_clear(cpu, cpu_sibling_setup_map);
 
 
        for (cpu = 0; cpu < NR_CPUS; cpu++) {
                cpus_clear(cpu_sibling_map[cpu]);
-               cpus_clear(cpu_core_map[cpu]);
+               /*
+                * cpu_core_map lives in a per cpu area that is cleared
+                * when the per cpu array is allocated.
+                *
+                * cpus_clear(per_cpu(cpu_core_map, cpu));
+                */
        }
 
        xen_setup_vcpu_info_placement();
 
        for (cpu = 0; cpu < NR_CPUS; cpu++) {
                cpus_clear(cpu_sibling_map[cpu]);
-               cpus_clear(cpu_core_map[cpu]);
+               /*
+                * cpu_core_ map will be zeroed when the per
+                * cpu area is allocated.
+                *
+                * cpus_clear(per_cpu(cpu_core_map, cpu));
+                */
        }
 
        smp_store_cpu_info(0);
 
 extern int pic_mode;
 extern int smp_num_siblings;
 extern cpumask_t cpu_sibling_map[];
-extern cpumask_t cpu_core_map[];
+DECLARE_PER_CPU(cpumask_t, cpu_core_map);
 
 extern void (*mtrr_hook) (void);
 extern void zap_low_mappings (void);
 
 extern void smp_send_reschedule(int cpu);
 
 extern cpumask_t cpu_sibling_map[NR_CPUS];
-extern cpumask_t cpu_core_map[NR_CPUS];
+/*
+ * cpu_core_map lives in a per cpu area
+ *
+ * extern cpumask_t cpu_core_map[NR_CPUS];
+ */
+DECLARE_PER_CPU(cpumask_t, cpu_core_map);
 extern u8 cpu_llc_id[NR_CPUS];
 
 #define SMP_TRAMPOLINE_BASE 0x6000
 
 #ifdef CONFIG_X86_HT
 #define topology_physical_package_id(cpu)      (cpu_data[cpu].phys_proc_id)
 #define topology_core_id(cpu)                  (cpu_data[cpu].cpu_core_id)
-#define topology_core_siblings(cpu)            (cpu_core_map[cpu])
+#define topology_core_siblings(cpu)            (per_cpu(cpu_core_map, cpu))
 #define topology_thread_siblings(cpu)          (cpu_sibling_map[cpu])
 #endif
 
 
 #ifdef CONFIG_SMP
 #define topology_physical_package_id(cpu)      (cpu_data[cpu].phys_proc_id)
 #define topology_core_id(cpu)                  (cpu_data[cpu].cpu_core_id)
-#define topology_core_siblings(cpu)            (cpu_core_map[cpu])
+#define topology_core_siblings(cpu)            (per_cpu(cpu_core_map, cpu))
 #define topology_thread_siblings(cpu)          (cpu_sibling_map[cpu])
 #define mc_capable()                   (boot_cpu_data.x86_max_cores > 1)
 #define smt_capable()                  (smp_num_siblings > 1)