]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kernel/cpu/common.c
x86: build fix
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / cpu / common.c
index 9a57106c199579dc6f338f50ef9dc0d6d2a97af2..b9c9ea0217a9b1fd4581f27fed692aaa6fe167ee 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/pat.h>
 #include <asm/asm.h>
 #include <asm/numa.h>
+#include <asm/smp.h>
 #ifdef CONFIG_X86_LOCAL_APIC
 #include <asm/mpspec.h>
 #include <asm/apic.h>
@@ -124,18 +125,25 @@ static inline int flag_is_changeable_p(u32 flag)
 {
        u32 f1, f2;
 
-       asm("pushfl\n\t"
-           "pushfl\n\t"
-           "popl %0\n\t"
-           "movl %0,%1\n\t"
-           "xorl %2,%0\n\t"
-           "pushl %0\n\t"
-           "popfl\n\t"
-           "pushfl\n\t"
-           "popl %0\n\t"
-           "popfl\n\t"
-           : "=&r" (f1), "=&r" (f2)
-           : "ir" (flag));
+       /*
+        * Cyrix and IDT cpus allow disabling of CPUID
+        * so the code below may return different results
+        * when it is executed before and after enabling
+        * the CPUID. Add "volatile" to not allow gcc to
+        * optimize the subsequent calls to this function.
+        */
+       asm volatile ("pushfl\n\t"
+                     "pushfl\n\t"
+                     "popl %0\n\t"
+                     "movl %0,%1\n\t"
+                     "xorl %2,%0\n\t"
+                     "pushl %0\n\t"
+                     "popfl\n\t"
+                     "pushfl\n\t"
+                     "popl %0\n\t"
+                     "popfl\n\t"
+                     : "=&r" (f1), "=&r" (f2)
+                     : "ir" (flag));
 
        return ((f1^f2) & flag) != 0;
 }
@@ -406,7 +414,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
 
        if (!printed) {
                printed++;
-               printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
+               printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v);
                printk(KERN_ERR "CPU: Your system may be unstable.\n");
        }
 
@@ -465,14 +473,6 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
        }
 
 #ifdef CONFIG_X86_64
-       /* Transmeta-defined flags: level 0x80860001 */
-       xlvl = cpuid_eax(0x80860000);
-       if ((xlvl & 0xffff0000) == 0x80860000) {
-               /* Don't set x86_cpuid_level here for now to not confuse. */
-               if (xlvl >= 0x80860001)
-                       c->x86_capability[2] = cpuid_edx(0x80860001);
-       }
-
        if (c->extended_cpuid_level >= 0x80000008) {
                u32 eax = cpuid_eax(0x80000008);
 
@@ -485,6 +485,33 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
                c->x86_power = cpuid_edx(0x80000007);
 
 }
+
+static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_X86_32
+       int i;
+
+       /*
+        * First of all, decide if this is a 486 or higher
+        * It's a 486 if we can modify the AC flag
+        */
+       if (flag_is_changeable_p(X86_EFLAGS_AC))
+               c->x86 = 4;
+       else
+               c->x86 = 3;
+
+       for (i = 0; i < X86_VENDOR_NUM; i++)
+               if (cpu_devs[i] && cpu_devs[i]->c_identify) {
+                       c->x86_vendor_id[0] = 0;
+                       cpu_devs[i]->c_identify(c);
+                       if (c->x86_vendor_id[0]) {
+                               get_cpu_vendor(c);
+                               break;
+                       }
+               }
+#endif
+}
+
 /*
  * Do minimum CPU detection early.
  * Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -503,13 +530,16 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 #endif
        c->x86_cache_alignment = c->x86_clflush_size;
 
-       if (!have_cpuid_p())
-               return;
-
        memset(&c->x86_capability, 0, sizeof c->x86_capability);
-
        c->extended_cpuid_level = 0;
 
+       if (!have_cpuid_p())
+               identify_cpu_without_cpuid(c);
+
+       /* cyrix could have cpuid enabled via c_identify()*/
+       if (!have_cpuid_p())
+               return;
+
        cpu_detect(c);
 
        get_cpu_vendor(c);
@@ -520,6 +550,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
                this_cpu->c_early_init(c);
 
        validate_pat_support(c);
+
+#ifdef CONFIG_SMP
+       c->cpu_index = boot_cpu_id;
+#endif
 }
 
 void __init early_cpu_init(void)
@@ -550,43 +584,27 @@ void __init early_cpu_init(void)
 
 /*
  * The NOPL instruction is supposed to exist on all CPUs with
- * family >= 6, unfortunately, that's not true in practice because
+ * family >= 6; unfortunately, that's not true in practice because
  * of early VIA chips and (more importantly) broken virtualizers that
- * are not easy to detect.  Hence, probe for it based on first
- * principles.
- *
- * Note: no 64-bit chip is known to lack these, but put the code here
- * for consistency with 32 bits, and to make it utterly trivial to
- * diagnose the problem should it ever surface.
+ * are not easy to detect.  In the latter case it doesn't even *fail*
+ * reliably, so probing for it doesn't even work.  Disable it completely
+ * unless we can find a reliable way to detect all the broken cases.
  */
 static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
 {
-       const u32 nopl_signature = 0x888c53b1; /* Random number */
-       u32 has_nopl = nopl_signature;
-
        clear_cpu_cap(c, X86_FEATURE_NOPL);
-       if (c->x86 >= 6) {
-               asm volatile("\n"
-                            "1:      .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */
-                            "2:\n"
-                            "        .section .fixup,\"ax\"\n"
-                            "3:      xor %0,%0\n"
-                            "        jmp 2b\n"
-                            "        .previous\n"
-                            _ASM_EXTABLE(1b,3b)
-                            : "+a" (has_nopl));
-
-               if (has_nopl == nopl_signature)
-                       set_cpu_cap(c, X86_FEATURE_NOPL);
-       }
 }
 
 static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
 {
+       c->extended_cpuid_level = 0;
+
        if (!have_cpuid_p())
-               return;
+               identify_cpu_without_cpuid(c);
 
-       c->extended_cpuid_level = 0;
+       /* cyrix could have cpuid enabled via c_identify()*/
+       if (!have_cpuid_p())
+               return;
 
        cpu_detect(c);
 
@@ -639,17 +657,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
        c->x86_cache_alignment = c->x86_clflush_size;
        memset(&c->x86_capability, 0, sizeof c->x86_capability);
 
-       if (!have_cpuid_p()) {
-               /*
-                * First of all, decide if this is a 486 or higher
-                * It's a 486 if we can modify the AC flag
-                */
-               if (flag_is_changeable_p(X86_EFLAGS_AC))
-                       c->x86 = 4;
-               else
-                       c->x86 = 3;
-       }
-
        generic_identify(c);
 
        if (this_cpu->c_identify)
@@ -724,12 +731,24 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
 #endif
 }
 
+#ifdef CONFIG_X86_64
+static void vgetcpu_set_mode(void)
+{
+       if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
+               vgetcpu_mode = VGETCPU_RDTSCP;
+       else
+               vgetcpu_mode = VGETCPU_LSL;
+}
+#endif
+
 void __init identify_boot_cpu(void)
 {
        identify_cpu(&boot_cpu_data);
 #ifdef CONFIG_X86_32
        sysenter_setup();
        enable_sep_cpu();
+#else
+       vgetcpu_set_mode();
 #endif
 }
 
@@ -802,7 +821,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
        else if (c->cpuid_level >= 0)
                vendor = c->x86_vendor_id;
 
-       if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
+       if (vendor && !strstr(c->x86_model_id, vendor))
                printk(KERN_CONT "%s ", vendor);
 
        if (c->x86_model_id[0])
@@ -1120,22 +1139,11 @@ void __cpuinit cpu_init(void)
        /*
         * Boot processor to setup the FP and extended state context info.
         */
-       if (!smp_processor_id())
+       if (smp_processor_id() == boot_cpu_id)
                init_thread_xstate();
 
        xsave_init();
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-void __cpuinit cpu_uninit(void)
-{
-       int cpu = raw_smp_processor_id();
-       cpu_clear(cpu, cpu_initialized);
-
-       /* lazy TLB state */
-       per_cpu(cpu_tlbstate, cpu).state = 0;
-       per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
-}
-#endif
 
 #endif