1 #include <linux/init.h>
2 #include <linux/string.h>
3 #include <linux/delay.h>
5 #include <linux/module.h>
6 #include <linux/percpu.h>
7 #include <linux/bootmem.h>
8 #include <asm/processor.h>
12 #include <asm/mmu_context.h>
17 #ifdef CONFIG_X86_LOCAL_APIC
18 #include <asm/mpspec.h>
20 #include <mach_apic.h>
25 DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
26 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
27 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
28 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
29 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
31 * Segments used for calling PnP BIOS have byte granularity.
32 * They code segments and data segments have fixed 64k limits,
33 * the transfer segment sizes are set at run time.
36 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
38 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
40 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
42 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
44 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
46 * The APM segments have byte granularity and their bases
47 * are set at run time. All have 64k limits.
50 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
52 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
54 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
56 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
57 [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
59 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
61 __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
63 /* Current gdt points %fs at the "master" per-cpu area: after this,
64 * it's on the real one. */
65 void switch_to_new_gdt(void)
67 struct desc_ptr gdt_descr;
69 gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
70 gdt_descr.size = GDT_SIZE - 1;
72 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
75 static int cachesize_override __cpuinitdata = -1;
76 static int disable_x86_serial_nr __cpuinitdata = 1;
78 static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
80 static void __cpuinit default_init(struct cpuinfo_x86 *c)
82 /* Not much we can do here... */
83 /* Check if at least it has cpuid */
84 if (c->cpuid_level == -1) {
85 /* No cpuid. It must be an ancient CPU */
87 strcpy(c->x86_model_id, "486");
89 strcpy(c->x86_model_id, "386");
93 static struct cpu_dev __cpuinitdata default_cpu = {
94 .c_init = default_init,
95 .c_vendor = "Unknown",
96 .c_x86_vendor = X86_VENDOR_UNKNOWN,
98 static struct cpu_dev *this_cpu __cpuinitdata;
100 static int __init cachesize_setup(char *str)
102 get_option(&str, &cachesize_override);
105 __setup("cachesize=", cachesize_setup);
107 int __cpuinit get_model_name(struct cpuinfo_x86 *c)
112 if (c->extended_cpuid_level < 0x80000004)
115 v = (unsigned int *) c->x86_model_id;
116 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
117 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
118 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
119 c->x86_model_id[48] = 0;
121 /* Intel chips right-justify this string for some dumb reason;
122 undo that brain damage */
123 p = q = &c->x86_model_id[0];
129 while (q <= &c->x86_model_id[48])
130 *q++ = '\0'; /* Zero-pad the rest */
137 void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
139 unsigned int n, dummy, ebx, ecx, edx, l2size;
141 n = c->extended_cpuid_level;
143 if (n >= 0x80000005) {
144 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
145 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
146 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
147 c->x86_cache_size = (ecx>>24) + (edx>>24);
150 if (n < 0x80000006) /* Some chips just has a large L1. */
153 ecx = cpuid_ecx(0x80000006);
156 /* do processor-specific cache resizing */
157 if (this_cpu->c_size_cache)
158 l2size = this_cpu->c_size_cache(c, l2size);
160 /* Allow user to override all this if necessary. */
161 if (cachesize_override != -1)
162 l2size = cachesize_override;
165 return; /* Again, no L2 cache is possible */
167 c->x86_cache_size = l2size;
169 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
174 * Naming convention should be: <Name> [(<Codename>)]
175 * This table only is used unless init_<vendor>() below doesn't set it;
176 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
180 /* Look up CPU names by table lookup. */
181 static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
183 struct cpu_model_info *info;
185 if (c->x86_model >= 16)
186 return NULL; /* Range check */
191 info = this_cpu->c_models;
193 while (info && info->family) {
194 if (info->family == c->x86)
195 return info->model_names[c->x86_model];
198 return NULL; /* Not found */
202 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
204 u32 eax, ebx, ecx, edx;
205 int index_msb, core_bits;
207 cpuid(1, &eax, &ebx, &ecx, &edx);
209 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
212 smp_num_siblings = (ebx & 0xff0000) >> 16;
214 if (smp_num_siblings == 1) {
215 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
216 } else if (smp_num_siblings > 1) {
218 if (smp_num_siblings > NR_CPUS) {
219 printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
221 smp_num_siblings = 1;
225 index_msb = get_count_order(smp_num_siblings);
226 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
228 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
231 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
233 index_msb = get_count_order(smp_num_siblings);
235 core_bits = get_count_order(c->x86_max_cores);
237 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
238 ((1 << core_bits) - 1);
240 if (c->x86_max_cores > 1)
241 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
247 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
249 char *v = c->x86_vendor_id;
253 for (i = 0; i < X86_VENDOR_NUM; i++) {
257 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
258 (cpu_devs[i]->c_ident[1] &&
259 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
260 this_cpu = cpu_devs[i];
261 c->x86_vendor = this_cpu->c_x86_vendor;
268 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
269 printk(KERN_ERR "CPU: Your system may be unstable.\n");
272 c->x86_vendor = X86_VENDOR_UNKNOWN;
273 this_cpu = &default_cpu;
277 static int __init x86_fxsr_setup(char *s)
279 setup_clear_cpu_cap(X86_FEATURE_FXSR);
280 setup_clear_cpu_cap(X86_FEATURE_XMM);
283 __setup("nofxsr", x86_fxsr_setup);
286 static int __init x86_sep_setup(char *s)
288 setup_clear_cpu_cap(X86_FEATURE_SEP);
291 __setup("nosep", x86_sep_setup);
294 /* Standard macro to see if a specific flag is changeable */
295 static inline int flag_is_changeable_p(u32 flag)
309 : "=&r" (f1), "=&r" (f2)
312 return ((f1^f2) & flag) != 0;
316 /* Probe for the CPUID instruction */
317 static int __cpuinit have_cpuid_p(void)
319 return flag_is_changeable_p(X86_EFLAGS_ID);
322 void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
324 /* Get vendor name */
325 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
326 (unsigned int *)&c->x86_vendor_id[0],
327 (unsigned int *)&c->x86_vendor_id[8],
328 (unsigned int *)&c->x86_vendor_id[4]);
331 /* Intel-defined flags: level 0x00000001 */
332 if (c->cpuid_level >= 0x00000001) {
333 u32 junk, tfms, cap0, misc;
334 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
335 c->x86 = (tfms >> 8) & 0xf;
336 c->x86_model = (tfms >> 4) & 0xf;
337 c->x86_mask = tfms & 0xf;
339 c->x86 += (tfms >> 20) & 0xff;
341 c->x86_model += ((tfms >> 16) & 0xf) << 4;
342 if (cap0 & (1<<19)) {
343 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
344 c->x86_cache_alignment = c->x86_clflush_size;
349 static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
354 /* Intel-defined flags: level 0x00000001 */
355 if (c->cpuid_level >= 0x00000001) {
356 u32 capability, excap;
357 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
358 c->x86_capability[0] = capability;
359 c->x86_capability[4] = excap;
362 /* AMD-defined flags: level 0x80000001 */
363 xlvl = cpuid_eax(0x80000000);
364 c->extended_cpuid_level = xlvl;
365 if ((xlvl & 0xffff0000) == 0x80000000) {
366 if (xlvl >= 0x80000001) {
367 c->x86_capability[1] = cpuid_edx(0x80000001);
368 c->x86_capability[6] = cpuid_ecx(0x80000001);
373 * Do minimum CPU detection early.
374 * Fields really needed: vendor, cpuid_level, family, model, mask,
376 * The others are not touched to avoid unwanted side effects.
378 * WARNING: this function is only called on the BP. Don't add code here
379 * that is supposed to run on all CPUs.
381 static void __init early_identify_cpu(struct cpuinfo_x86 *c)
383 c->x86_cache_alignment = 32;
384 c->x86_clflush_size = 32;
389 c->extended_cpuid_level = 0;
391 memset(&c->x86_capability, 0, sizeof c->x86_capability);
399 if (this_cpu->c_early_init)
400 this_cpu->c_early_init(c);
402 validate_pat_support(c);
405 void __init early_cpu_init(void)
407 struct cpu_dev **cdev;
410 printk("KERNEL supported cpus:\n");
411 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
412 struct cpu_dev *cpudev = *cdev;
415 if (count >= X86_VENDOR_NUM)
417 cpu_devs[count] = cpudev;
420 for (j = 0; j < 2; j++) {
421 if (!cpudev->c_ident[j])
423 printk(" %s %s\n", cpudev->c_vendor,
428 early_identify_cpu(&boot_cpu_data);
432 * The NOPL instruction is supposed to exist on all CPUs with
433 * family >= 6, unfortunately, that's not true in practice because
434 * of early VIA chips and (more importantly) broken virtualizers that
435 * are not easy to detect. Hence, probe for it based on first
438 static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
440 const u32 nopl_signature = 0x888c53b1; /* Random number */
441 u32 has_nopl = nopl_signature;
443 clear_cpu_cap(c, X86_FEATURE_NOPL);
446 "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
448 " .section .fixup,\"ax\"\n"
455 if (has_nopl == nopl_signature)
456 set_cpu_cap(c, X86_FEATURE_NOPL);
460 static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
465 c->extended_cpuid_level = 0;
473 if (c->cpuid_level >= 0x00000001) {
474 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
476 c->apicid = phys_pkg_id(c->initial_apicid, 0);
477 c->phys_proc_id = c->initial_apicid;
479 c->apicid = c->initial_apicid;
483 if (c->extended_cpuid_level >= 0x80000004)
484 get_model_name(c); /* Default name */
486 init_scattered_cpuid_features(c);
490 static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
492 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
493 /* Disable processor serial number */
494 unsigned long lo, hi;
495 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
497 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
498 printk(KERN_NOTICE "CPU serial number disabled.\n");
499 clear_cpu_cap(c, X86_FEATURE_PN);
501 /* Disabling the serial number may affect the cpuid level */
502 c->cpuid_level = cpuid_eax(0);
506 static int __init x86_serial_nr_setup(char *s)
508 disable_x86_serial_nr = 0;
511 __setup("serialnumber", x86_serial_nr_setup);
516 * This does the hard work of actually picking apart the CPU stuff...
518 static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
522 c->loops_per_jiffy = loops_per_jiffy;
523 c->x86_cache_size = -1;
524 c->x86_vendor = X86_VENDOR_UNKNOWN;
525 c->cpuid_level = -1; /* CPUID not detected */
526 c->x86_model = c->x86_mask = 0; /* So far unknown... */
527 c->x86_vendor_id[0] = '\0'; /* Unset */
528 c->x86_model_id[0] = '\0'; /* Unset */
529 c->x86_max_cores = 1;
530 c->x86_clflush_size = 32;
531 memset(&c->x86_capability, 0, sizeof c->x86_capability);
533 if (!have_cpuid_p()) {
535 * First of all, decide if this is a 486 or higher
536 * It's a 486 if we can modify the AC flag
538 if (flag_is_changeable_p(X86_EFLAGS_AC))
546 if (this_cpu->c_identify)
547 this_cpu->c_identify(c);
550 * Vendor-specific initialization. In this section we
551 * canonicalize the feature flags, meaning if there are
552 * features a certain CPU supports which CPUID doesn't
553 * tell us, CPUID claiming incorrect flags, or other bugs,
554 * we handle them here.
556 * At the end of this section, c->x86_capability better
557 * indicate the features this CPU genuinely supports!
559 if (this_cpu->c_init)
562 /* Disable the PN if appropriate */
563 squash_the_stupid_serial_number(c);
566 * The vendor-specific functions might have changed features. Now
567 * we do "generic changes."
570 /* If the model name is still unset, do table lookup. */
571 if (!c->x86_model_id[0]) {
573 p = table_lookup_model(c);
575 strcpy(c->x86_model_id, p);
578 sprintf(c->x86_model_id, "%02x/%02x",
579 c->x86, c->x86_model);
583 * On SMP, boot_cpu_data holds the common feature set between
584 * all CPUs; so make sure that we indicate which features are
585 * common between the CPUs. The first time this routine gets
586 * executed, c == &boot_cpu_data.
588 if (c != &boot_cpu_data) {
589 /* AND the already accumulated flags with these */
590 for (i = 0; i < NCAPINTS; i++)
591 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
594 /* Clear all flags overriden by options */
595 for (i = 0; i < NCAPINTS; i++)
596 c->x86_capability[i] &= ~cleared_cpu_caps[i];
598 /* Init Machine Check Exception if available. */
601 select_idle_routine(c);
604 void __init identify_boot_cpu(void)
606 identify_cpu(&boot_cpu_data);
611 void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
613 BUG_ON(c == &boot_cpu_data);
624 static struct msr_range msr_range_array[] __cpuinitdata = {
625 { 0x00000000, 0x00000418},
626 { 0xc0000000, 0xc000040b},
627 { 0xc0010000, 0xc0010142},
628 { 0xc0011000, 0xc001103b},
631 static void __cpuinit print_cpu_msr(void)
636 unsigned index_min, index_max;
638 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
639 index_min = msr_range_array[i].min;
640 index_max = msr_range_array[i].max;
641 for (index = index_min; index < index_max; index++) {
642 if (rdmsrl_amd_safe(index, &val))
644 printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
649 static int show_msr __cpuinitdata;
650 static __init int setup_show_msr(char *arg)
654 get_option(&arg, &num);
660 __setup("show_msr=", setup_show_msr);
662 static __init int setup_noclflush(char *arg)
664 setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
667 __setup("noclflush", setup_noclflush);
669 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
673 if (c->x86_vendor < X86_VENDOR_NUM)
674 vendor = this_cpu->c_vendor;
675 else if (c->cpuid_level >= 0)
676 vendor = c->x86_vendor_id;
678 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
679 printk(KERN_CONT "%s ", vendor);
681 if (c->x86_model_id[0])
682 printk(KERN_CONT "%s", c->x86_model_id);
684 printk(KERN_CONT "%d86", c->x86);
686 if (c->x86_mask || c->cpuid_level >= 0)
687 printk(KERN_CONT " stepping %02x\n", c->x86_mask);
689 printk(KERN_CONT "\n");
692 if (c->cpu_index < show_msr)
700 static __init int setup_disablecpuid(char *arg)
703 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
704 setup_clear_cpu_cap(bit);
709 __setup("clearcpuid=", setup_disablecpuid);
711 cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
713 /* Make sure %fs is initialized properly in idle threads */
714 struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
716 memset(regs, 0, sizeof(struct pt_regs));
717 regs->fs = __KERNEL_PERCPU;
722 * cpu_init() initializes state that is per-CPU. Some data is already
723 * initialized (naturally) in the bootstrap process, such as the GDT
724 * and IDT. We reload them nevertheless, this function acts as a
725 * 'CPU state barrier', nothing should get across.
727 void __cpuinit cpu_init(void)
729 int cpu = smp_processor_id();
730 struct task_struct *curr = current;
731 struct tss_struct *t = &per_cpu(init_tss, cpu);
732 struct thread_struct *thread = &curr->thread;
734 if (cpu_test_and_set(cpu, cpu_initialized)) {
735 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
736 for (;;) local_irq_enable();
739 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
741 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
742 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
744 load_idt(&idt_descr);
748 * Set up and load the per-CPU TSS and LDT
750 atomic_inc(&init_mm.mm_count);
751 curr->active_mm = &init_mm;
754 enter_lazy_tlb(&init_mm, curr);
757 set_tss_desc(cpu, t);
759 load_LDT(&init_mm.context);
761 #ifdef CONFIG_DOUBLEFAULT
762 /* Set up doublefault TSS pointer in the GDT */
763 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
767 asm volatile ("mov %0, %%gs" : : "r" (0));
769 /* Clear all 6 debug registers: */
778 * Force FPU initialization:
780 current_thread_info()->status = 0;
782 mxcsr_feature_mask_init();
785 #ifdef CONFIG_HOTPLUG_CPU
786 void __cpuinit cpu_uninit(void)
788 int cpu = raw_smp_processor_id();
789 cpu_clear(cpu, cpu_initialized);
792 per_cpu(cpu_tlbstate, cpu).state = 0;
793 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;