2 * Copyright (C) 1995 Linus Torvalds
6 * This file handles the architecture-dependent parts of initialization
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/user.h>
18 #include <linux/a.out.h>
19 #include <linux/screen_info.h>
20 #include <linux/ioport.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/initrd.h>
24 #include <linux/highmem.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <asm/processor.h>
28 #include <linux/console.h>
29 #include <linux/seq_file.h>
30 #include <linux/crash_dump.h>
31 #include <linux/root_dev.h>
32 #include <linux/pci.h>
33 #include <linux/acpi.h>
34 #include <linux/kallsyms.h>
35 #include <linux/edd.h>
36 #include <linux/mmzone.h>
37 #include <linux/kexec.h>
38 #include <linux/cpufreq.h>
39 #include <linux/dmi.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/ctype.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
50 #include <video/edid.h>
53 #include <asm/mpspec.h>
54 #include <asm/mmu_context.h>
55 #include <asm/proto.h>
56 #include <asm/setup.h>
57 #include <asm/mach_apic.h>
59 #include <asm/sections.h>
61 #include <asm/cacheflush.h>
68 struct cpuinfo_x86 boot_cpu_data __read_mostly;
69 EXPORT_SYMBOL(boot_cpu_data);
71 unsigned long mmu_cr4_features;
73 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
76 unsigned long saved_video_mode;
78 int force_mwait __cpuinitdata;
84 char dmi_alloc_data[DMI_MAX_DATA];
89 struct screen_info screen_info;
90 EXPORT_SYMBOL(screen_info);
91 struct sys_desc_table_struct {
92 unsigned short length;
93 unsigned char table[0];
96 struct edid_info edid_info;
97 EXPORT_SYMBOL_GPL(edid_info);
99 extern int root_mountflags;
101 char __initdata command_line[COMMAND_LINE_SIZE];
103 struct resource standard_io_resources[] = {
104 { .name = "dma1", .start = 0x00, .end = 0x1f,
105 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
106 { .name = "pic1", .start = 0x20, .end = 0x21,
107 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
108 { .name = "timer0", .start = 0x40, .end = 0x43,
109 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
110 { .name = "timer1", .start = 0x50, .end = 0x53,
111 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
112 { .name = "keyboard", .start = 0x60, .end = 0x6f,
113 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
114 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "pic2", .start = 0xa0, .end = 0xa1,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "dma2", .start = 0xc0, .end = 0xdf,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
120 { .name = "fpu", .start = 0xf0, .end = 0xff,
121 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
124 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
126 static struct resource data_resource = {
127 .name = "Kernel data",
130 .flags = IORESOURCE_RAM,
132 static struct resource code_resource = {
133 .name = "Kernel code",
136 .flags = IORESOURCE_RAM,
138 static struct resource bss_resource = {
139 .name = "Kernel bss",
142 .flags = IORESOURCE_RAM,
145 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
147 #ifdef CONFIG_PROC_VMCORE
148 /* elfcorehdr= specifies the location of elf core header
149 * stored by the crashed kernel. This option will be passed
150 * by kexec loader to the capture kernel.
152 static int __init setup_elfcorehdr(char *arg)
157 elfcorehdr_addr = memparse(arg, &end);
158 return end > arg ? 0 : -EINVAL;
160 early_param("elfcorehdr", setup_elfcorehdr);
165 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
167 unsigned long bootmap_size, bootmap;
169 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
170 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
172 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
173 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
174 e820_register_active_regions(0, start_pfn, end_pfn);
175 free_bootmem_with_active_regions(0, end_pfn);
176 reserve_bootmem(bootmap, bootmap_size);
180 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
182 #ifdef CONFIG_EDD_MODULE
186 * copy_edd() - Copy the BIOS EDD information
187 * from boot_params into a safe place.
190 static inline void copy_edd(void)
192 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
193 sizeof(edd.mbr_signature));
194 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
195 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
196 edd.edd_info_nr = boot_params.eddbuf_entries;
199 static inline void copy_edd(void)
205 static void __init reserve_crashkernel(void)
207 unsigned long long free_mem;
208 unsigned long long crash_size, crash_base;
211 free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
213 ret = parse_crashkernel(boot_command_line, free_mem,
214 &crash_size, &crash_base);
215 if (ret == 0 && crash_size) {
216 if (crash_base > 0) {
217 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
218 "for crashkernel (System RAM: %ldMB)\n",
219 (unsigned long)(crash_size >> 20),
220 (unsigned long)(crash_base >> 20),
221 (unsigned long)(free_mem >> 20));
222 crashk_res.start = crash_base;
223 crashk_res.end = crash_base + crash_size - 1;
224 reserve_bootmem(crash_base, crash_size);
226 printk(KERN_INFO "crashkernel reservation failed - "
227 "you have to specify a base address\n");
231 static inline void __init reserve_crashkernel(void)
235 #define EBDA_ADDR_POINTER 0x40E
237 unsigned __initdata ebda_addr;
238 unsigned __initdata ebda_size;
240 static void discover_ebda(void)
243 * there is a real-mode segmented pointer pointing to the
244 * 4K EBDA area at 0x40E
246 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
249 ebda_size = *(unsigned short *)__va(ebda_addr);
251 /* Round EBDA up to pages */
255 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
256 if (ebda_size > 64*1024)
260 void __init setup_arch(char **cmdline_p)
262 printk(KERN_INFO "Command line: %s\n", boot_command_line);
264 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
265 screen_info = boot_params.screen_info;
266 edid_info = boot_params.edid_info;
267 saved_video_mode = boot_params.hdr.vid_mode;
268 bootloader_type = boot_params.hdr.type_of_loader;
270 #ifdef CONFIG_BLK_DEV_RAM
271 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
272 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
273 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
275 setup_memory_region();
278 if (!boot_params.hdr.root_flags)
279 root_mountflags &= ~MS_RDONLY;
280 init_mm.start_code = (unsigned long) &_text;
281 init_mm.end_code = (unsigned long) &_etext;
282 init_mm.end_data = (unsigned long) &_edata;
283 init_mm.brk = (unsigned long) &_end;
285 code_resource.start = virt_to_phys(&_text);
286 code_resource.end = virt_to_phys(&_etext)-1;
287 data_resource.start = virt_to_phys(&_etext);
288 data_resource.end = virt_to_phys(&_edata)-1;
289 bss_resource.start = virt_to_phys(&__bss_start);
290 bss_resource.end = virt_to_phys(&__bss_stop)-1;
292 early_identify_cpu(&boot_cpu_data);
294 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
295 *cmdline_p = command_line;
299 finish_e820_parsing();
301 e820_register_active_regions(0, 0, -1UL);
303 * partially used pages are not usable - thus
304 * we are rounding upwards:
306 end_pfn = e820_end_of_ram();
307 num_physpages = end_pfn;
313 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
320 /* setup to use the static apicid table during kernel startup */
321 x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
326 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
327 * Call this early for SRAT node setup.
329 acpi_boot_table_init();
332 /* How many end-of-memory variables you have, grandma! */
333 max_low_pfn = end_pfn;
335 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
337 /* Remove active ranges so rediscovery with NUMA-awareness happens */
338 remove_all_active_ranges();
340 #ifdef CONFIG_ACPI_NUMA
342 * Parse SRAT to discover nodes.
348 numa_initmem_init(0, end_pfn);
350 contig_initmem_init(0, end_pfn);
353 /* Reserve direct mapping */
354 reserve_bootmem_generic(table_start << PAGE_SHIFT,
355 (table_end - table_start) << PAGE_SHIFT);
358 reserve_bootmem_generic(__pa_symbol(&_text),
359 __pa_symbol(&_end) - __pa_symbol(&_text));
362 * reserve physical page 0 - it's a special BIOS page on many boxes,
363 * enabling clean reboots, SMP operation, laptop functions.
365 reserve_bootmem_generic(0, PAGE_SIZE);
367 /* reserve ebda region */
369 reserve_bootmem_generic(ebda_addr, ebda_size);
371 /* reserve nodemap region */
373 reserve_bootmem_generic(nodemap_addr, nodemap_size);
377 /* Reserve SMP trampoline */
378 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
381 #ifdef CONFIG_ACPI_SLEEP
383 * Reserve low memory region for sleep support.
385 acpi_reserve_bootmem();
388 * Find and reserve possible boot-time SMP configuration:
391 #ifdef CONFIG_BLK_DEV_INITRD
392 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
393 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
394 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
395 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
396 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
398 if (ramdisk_end <= end_of_mem) {
399 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
400 initrd_start = ramdisk_image + PAGE_OFFSET;
401 initrd_end = initrd_start+ramdisk_size;
403 printk(KERN_ERR "initrd extends beyond end of memory "
404 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
405 ramdisk_end, end_of_mem);
410 reserve_crashkernel();
416 * set this early, so we dont allocate cpu0
417 * if MADT list doesnt list BSP first
418 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
420 cpu_set(0, cpu_present_map);
423 * Read APIC and some other early information from ACPI tables.
431 * get boot-time SMP configuration:
433 if (smp_found_config)
435 init_apic_mappings();
436 ioapic_init_mappings();
439 * We trust e820 completely. No explicit ROM probing in memory.
441 e820_reserve_resources(&code_resource, &data_resource, &bss_resource);
442 e820_mark_nosave_regions();
446 /* request I/O space for devices used on all i[345]86 PCs */
447 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
448 request_resource(&ioport_resource, &standard_io_resources[i]);
454 #if defined(CONFIG_VGA_CONSOLE)
455 conswitchp = &vga_con;
456 #elif defined(CONFIG_DUMMY_CONSOLE)
457 conswitchp = &dummy_con;
462 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
466 if (c->extended_cpuid_level < 0x80000004)
469 v = (unsigned int *) c->x86_model_id;
470 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
471 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
472 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
473 c->x86_model_id[48] = 0;
478 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
480 unsigned int n, dummy, eax, ebx, ecx, edx;
482 n = c->extended_cpuid_level;
484 if (n >= 0x80000005) {
485 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
486 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
487 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
488 c->x86_cache_size=(ecx>>24)+(edx>>24);
489 /* On K8 L1 TLB is inclusive, so don't count it */
493 if (n >= 0x80000006) {
494 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
495 ecx = cpuid_ecx(0x80000006);
496 c->x86_cache_size = ecx >> 16;
497 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
499 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
500 c->x86_cache_size, ecx & 0xFF);
504 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
505 if (n >= 0x80000008) {
506 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
507 c->x86_virt_bits = (eax >> 8) & 0xff;
508 c->x86_phys_bits = eax & 0xff;
513 static int nearby_node(int apicid)
516 for (i = apicid - 1; i >= 0; i--) {
517 int node = apicid_to_node[i];
518 if (node != NUMA_NO_NODE && node_online(node))
521 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
522 int node = apicid_to_node[i];
523 if (node != NUMA_NO_NODE && node_online(node))
526 return first_node(node_online_map); /* Shouldn't happen */
531 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
532 * Assumes number of cores is a power of two.
534 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
539 int cpu = smp_processor_id();
541 unsigned apicid = hard_smp_processor_id();
543 unsigned ecx = cpuid_ecx(0x80000008);
545 c->x86_max_cores = (ecx & 0xff) + 1;
547 /* CPU telling us the core id bits shift? */
548 bits = (ecx >> 12) & 0xF;
550 /* Otherwise recompute */
552 while ((1 << bits) < c->x86_max_cores)
556 /* Low order bits define the core id (index of core in socket) */
557 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
558 /* Convert the APIC ID into the socket ID */
559 c->phys_proc_id = phys_pkg_id(bits);
562 node = c->phys_proc_id;
563 if (apicid_to_node[apicid] != NUMA_NO_NODE)
564 node = apicid_to_node[apicid];
565 if (!node_online(node)) {
566 /* Two possibilities here:
567 - The CPU is missing memory and no node was created.
568 In that case try picking one from a nearby CPU
569 - The APIC IDs differ from the HyperTransport node IDs
570 which the K8 northbridge parsing fills in.
571 Assume they are all increased by a constant offset,
572 but in the same order as the HT nodeids.
573 If that doesn't result in a usable node fall back to the
574 path for the previous case. */
575 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
576 if (ht_nodeid >= 0 &&
577 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
578 node = apicid_to_node[ht_nodeid];
579 /* Pick a nearby node */
580 if (!node_online(node))
581 node = nearby_node(apicid);
583 numa_set_node(cpu, node);
585 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
590 #define ENABLE_C1E_MASK 0x18000000
591 #define CPUID_PROCESSOR_SIGNATURE 1
592 #define CPUID_XFAM 0x0ff00000
593 #define CPUID_XFAM_K8 0x00000000
594 #define CPUID_XFAM_10H 0x00100000
595 #define CPUID_XFAM_11H 0x00200000
596 #define CPUID_XMOD 0x000f0000
597 #define CPUID_XMOD_REV_F 0x00040000
599 /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
600 static __cpuinit int amd_apic_timer_broken(void)
603 u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
604 switch (eax & CPUID_XFAM) {
606 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
610 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
611 if (lo & ENABLE_C1E_MASK)
615 /* err on the side of caution */
621 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
629 * Disable TLB flush filter by setting HWCR.FFDIS on K8
630 * bit 6 of msr C001_0015
632 * Errata 63 for SH-B3 steppings
633 * Errata 122 for all steppings (F+ have it disabled by default)
636 rdmsrl(MSR_K8_HWCR, value);
638 wrmsrl(MSR_K8_HWCR, value);
642 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
643 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
644 clear_bit(0*32+31, &c->x86_capability);
646 /* On C+ stepping K8 rep microcode works well for copy/memset */
647 level = cpuid_eax(1);
648 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
649 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
650 if (c->x86 == 0x10 || c->x86 == 0x11)
651 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
653 /* Enable workaround for FXSAVE leak */
655 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
657 level = get_model_name(c);
661 /* Should distinguish Models here, but this is only
662 a fallback anyways. */
663 strcpy(c->x86_model_id, "Hammer");
667 display_cacheinfo(c);
669 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
670 if (c->x86_power & (1<<8))
671 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
673 /* Multi core CPU? */
674 if (c->extended_cpuid_level >= 0x80000008)
677 if (c->extended_cpuid_level >= 0x80000006 &&
678 (cpuid_edx(0x80000006) & 0xf000))
679 num_cache_leaves = 4;
681 num_cache_leaves = 3;
683 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
684 set_bit(X86_FEATURE_K8, &c->x86_capability);
686 /* RDTSC can be speculated around */
687 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
689 /* Family 10 doesn't support C states in MWAIT so don't use it */
690 if (c->x86 == 0x10 && !force_mwait)
691 clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
693 if (amd_apic_timer_broken())
694 disable_apic_timer = 1;
697 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
700 u32 eax, ebx, ecx, edx;
701 int index_msb, core_bits;
703 cpuid(1, &eax, &ebx, &ecx, &edx);
706 if (!cpu_has(c, X86_FEATURE_HT))
708 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
711 smp_num_siblings = (ebx & 0xff0000) >> 16;
713 if (smp_num_siblings == 1) {
714 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
715 } else if (smp_num_siblings > 1 ) {
717 if (smp_num_siblings > NR_CPUS) {
718 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
719 smp_num_siblings = 1;
723 index_msb = get_count_order(smp_num_siblings);
724 c->phys_proc_id = phys_pkg_id(index_msb);
726 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
728 index_msb = get_count_order(smp_num_siblings) ;
730 core_bits = get_count_order(c->x86_max_cores);
732 c->cpu_core_id = phys_pkg_id(index_msb) &
733 ((1 << core_bits) - 1);
736 if ((c->x86_max_cores * smp_num_siblings) > 1) {
737 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
738 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
745 * find out the number of processor cores on the die
747 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
751 if (c->cpuid_level < 4)
754 cpuid_count(4, 0, &eax, &t, &t, &t);
757 return ((eax >> 26) + 1);
762 static void srat_detect_node(void)
766 int cpu = smp_processor_id();
767 int apicid = hard_smp_processor_id();
769 /* Don't do the funky fallback heuristics the AMD version employs
771 node = apicid_to_node[apicid];
772 if (node == NUMA_NO_NODE)
773 node = first_node(node_online_map);
774 numa_set_node(cpu, node);
776 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
780 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
785 init_intel_cacheinfo(c);
786 if (c->cpuid_level > 9 ) {
787 unsigned eax = cpuid_eax(10);
788 /* Check for version and the number of counters */
789 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
790 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
795 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
797 set_bit(X86_FEATURE_BTS, c->x86_capability);
799 set_bit(X86_FEATURE_PEBS, c->x86_capability);
802 n = c->extended_cpuid_level;
803 if (n >= 0x80000008) {
804 unsigned eax = cpuid_eax(0x80000008);
805 c->x86_virt_bits = (eax >> 8) & 0xff;
806 c->x86_phys_bits = eax & 0xff;
807 /* CPUID workaround for Intel 0F34 CPU */
808 if (c->x86_vendor == X86_VENDOR_INTEL &&
809 c->x86 == 0xF && c->x86_model == 0x3 &&
811 c->x86_phys_bits = 36;
815 c->x86_cache_alignment = c->x86_clflush_size * 2;
816 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
817 (c->x86 == 0x6 && c->x86_model >= 0x0e))
818 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
820 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
822 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
824 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
825 c->x86_max_cores = intel_num_cpu_cores(c);
830 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
832 char *v = c->x86_vendor_id;
834 if (!strcmp(v, "AuthenticAMD"))
835 c->x86_vendor = X86_VENDOR_AMD;
836 else if (!strcmp(v, "GenuineIntel"))
837 c->x86_vendor = X86_VENDOR_INTEL;
839 c->x86_vendor = X86_VENDOR_UNKNOWN;
842 struct cpu_model_info {
845 char *model_names[16];
848 /* Do some early cpuid on the boot CPU to get some parameter that are
849 needed before check_bugs. Everything advanced is in identify_cpu
851 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
855 c->loops_per_jiffy = loops_per_jiffy;
856 c->x86_cache_size = -1;
857 c->x86_vendor = X86_VENDOR_UNKNOWN;
858 c->x86_model = c->x86_mask = 0; /* So far unknown... */
859 c->x86_vendor_id[0] = '\0'; /* Unset */
860 c->x86_model_id[0] = '\0'; /* Unset */
861 c->x86_clflush_size = 64;
862 c->x86_cache_alignment = c->x86_clflush_size;
863 c->x86_max_cores = 1;
864 c->extended_cpuid_level = 0;
865 memset(&c->x86_capability, 0, sizeof c->x86_capability);
867 /* Get vendor name */
868 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
869 (unsigned int *)&c->x86_vendor_id[0],
870 (unsigned int *)&c->x86_vendor_id[8],
871 (unsigned int *)&c->x86_vendor_id[4]);
875 /* Initialize the standard set of capabilities */
876 /* Note that the vendor-specific code below might override */
878 /* Intel-defined flags: level 0x00000001 */
879 if (c->cpuid_level >= 0x00000001) {
881 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
882 &c->x86_capability[0]);
883 c->x86 = (tfms >> 8) & 0xf;
884 c->x86_model = (tfms >> 4) & 0xf;
885 c->x86_mask = tfms & 0xf;
887 c->x86 += (tfms >> 20) & 0xff;
889 c->x86_model += ((tfms >> 16) & 0xF) << 4;
890 if (c->x86_capability[0] & (1<<19))
891 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
893 /* Have CPUID level 0 only - unheard of */
898 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
903 * This does the hard work of actually picking apart the CPU stuff...
905 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
910 early_identify_cpu(c);
912 /* AMD-defined flags: level 0x80000001 */
913 xlvl = cpuid_eax(0x80000000);
914 c->extended_cpuid_level = xlvl;
915 if ((xlvl & 0xffff0000) == 0x80000000) {
916 if (xlvl >= 0x80000001) {
917 c->x86_capability[1] = cpuid_edx(0x80000001);
918 c->x86_capability[6] = cpuid_ecx(0x80000001);
920 if (xlvl >= 0x80000004)
921 get_model_name(c); /* Default name */
924 /* Transmeta-defined flags: level 0x80860001 */
925 xlvl = cpuid_eax(0x80860000);
926 if ((xlvl & 0xffff0000) == 0x80860000) {
927 /* Don't set x86_cpuid_level here for now to not confuse. */
928 if (xlvl >= 0x80860001)
929 c->x86_capability[2] = cpuid_edx(0x80860001);
932 init_scattered_cpuid_features(c);
934 c->apicid = phys_pkg_id(0);
937 * Vendor-specific initialization. In this section we
938 * canonicalize the feature flags, meaning if there are
939 * features a certain CPU supports which CPUID doesn't
940 * tell us, CPUID claiming incorrect flags, or other bugs,
941 * we handle them here.
943 * At the end of this section, c->x86_capability better
944 * indicate the features this CPU genuinely supports!
946 switch (c->x86_vendor) {
951 case X86_VENDOR_INTEL:
955 case X86_VENDOR_UNKNOWN:
957 display_cacheinfo(c);
961 select_idle_routine(c);
965 * On SMP, boot_cpu_data holds the common feature set between
966 * all CPUs; so make sure that we indicate which features are
967 * common between the CPUs. The first time this routine gets
968 * executed, c == &boot_cpu_data.
970 if (c != &boot_cpu_data) {
971 /* AND the already accumulated flags with these */
972 for (i = 0 ; i < NCAPINTS ; i++)
973 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
976 #ifdef CONFIG_X86_MCE
979 if (c != &boot_cpu_data)
982 numa_add_cpu(smp_processor_id());
987 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
989 if (c->x86_model_id[0])
990 printk("%s", c->x86_model_id);
992 if (c->x86_mask || c->cpuid_level >= 0)
993 printk(" stepping %02x\n", c->x86_mask);
999 * Get CPU information for use by the procfs.
1002 static int show_cpuinfo(struct seq_file *m, void *v)
1004 struct cpuinfo_x86 *c = v;
1008 * These flag bits must match the definitions in <asm/cpufeature.h>.
1009 * NULL means this bit is undefined or reserved; either way it doesn't
1010 * have meaning as far as Linux is concerned. Note that it's important
1011 * to realize there is a difference between this table and CPUID -- if
1012 * applications want to get the raw CPUID data, they should access
1013 * /dev/cpu/<cpu_nr>/cpuid instead.
1015 static const char *const x86_cap_flags[] = {
1017 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1018 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1019 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1020 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1023 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1024 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1025 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1026 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1027 "3dnowext", "3dnow",
1029 /* Transmeta-defined */
1030 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1031 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1032 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1035 /* Other (Linux-defined) */
1036 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1037 NULL, NULL, NULL, NULL,
1038 "constant_tsc", "up", NULL, "arch_perfmon",
1039 "pebs", "bts", NULL, "sync_rdtsc",
1040 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1041 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1043 /* Intel-defined (#2) */
1044 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1045 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
1046 NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
1047 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1049 /* VIA/Cyrix/Centaur-defined */
1050 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1051 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
1052 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1053 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1055 /* AMD-defined (#2) */
1056 "lahf_lm", "cmp_legacy", "svm", "extapic",
1057 "cr8_legacy", "abm", "sse4a", "misalignsse",
1058 "3dnowprefetch", "osvw", "ibs", "sse5",
1059 "skinit", "wdt", NULL, NULL,
1060 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1061 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1063 /* Auxiliary (Linux-defined) */
1064 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1065 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1066 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1067 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1069 static const char *const x86_power_flags[] = {
1070 "ts", /* temperature sensor */
1071 "fid", /* frequency id control */
1072 "vid", /* voltage id control */
1073 "ttp", /* thermal trip */
1078 "", /* tsc invariant mapped to constant_tsc */
1087 seq_printf(m,"processor\t: %u\n"
1089 "cpu family\t: %d\n"
1091 "model name\t: %s\n",
1093 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1096 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1098 if (c->x86_mask || c->cpuid_level >= 0)
1099 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1101 seq_printf(m, "stepping\t: unknown\n");
1103 if (cpu_has(c,X86_FEATURE_TSC)) {
1104 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
1107 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1108 freq / 1000, (freq % 1000));
1112 if (c->x86_cache_size >= 0)
1113 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1116 if (smp_num_siblings * c->x86_max_cores > 1) {
1117 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1118 seq_printf(m, "siblings\t: %d\n",
1119 cpus_weight(per_cpu(cpu_core_map, cpu)));
1120 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1121 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1127 "fpu_exception\t: yes\n"
1128 "cpuid level\t: %d\n"
1135 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1136 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1137 seq_printf(m, " %s", x86_cap_flags[i]);
1140 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1141 c->loops_per_jiffy/(500000/HZ),
1142 (c->loops_per_jiffy/(5000/HZ)) % 100);
1144 if (c->x86_tlbsize > 0)
1145 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1146 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1147 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1149 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1150 c->x86_phys_bits, c->x86_virt_bits);
1152 seq_printf(m, "power management:");
1155 for (i = 0; i < 32; i++)
1156 if (c->x86_power & (1 << i)) {
1157 if (i < ARRAY_SIZE(x86_power_flags) &&
1159 seq_printf(m, "%s%s",
1160 x86_power_flags[i][0]?" ":"",
1161 x86_power_flags[i]);
1163 seq_printf(m, " [%d]", i);
1167 seq_printf(m, "\n\n");
1172 static void *c_start(struct seq_file *m, loff_t *pos)
1174 if (*pos == 0) /* just in case, cpu 0 is not the first */
1175 *pos = first_cpu(cpu_online_map);
1176 if ((*pos) < NR_CPUS && cpu_online(*pos))
1177 return &cpu_data(*pos);
1181 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1183 *pos = next_cpu(*pos, cpu_online_map);
1184 return c_start(m, pos);
1187 static void c_stop(struct seq_file *m, void *v)
1191 struct seq_operations cpuinfo_op = {
1195 .show = show_cpuinfo,