]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kernel/cpu/perfctr-watchdog.c
Merge branch 'for-2.6.28' of git://git.kernel.dk/linux-2.6-block
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / cpu / perfctr-watchdog.c
index de7439f82b9230d7a65fa8a12af027bc7a578660..6bff382094f58a2a40b0adebdf5fb264905614ba 100644 (file)
@@ -295,13 +295,19 @@ static int setup_k7_watchdog(unsigned nmi_hz)
        /* setup the timer */
        wrmsr(evntsel_msr, evntsel, 0);
        write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz);
-       apic_write(APIC_LVTPC, APIC_DM_NMI);
-       evntsel |= K7_EVNTSEL_ENABLE;
-       wrmsr(evntsel_msr, evntsel, 0);
 
+       /* initialize the wd struct before enabling */
        wd->perfctr_msr = perfctr_msr;
        wd->evntsel_msr = evntsel_msr;
        wd->cccr_msr = 0;  /* unused */
+
+       /* ok, everything is initialized, announce that we're set */
+       cpu_nmi_set_wd_enabled();
+
+       apic_write(APIC_LVTPC, APIC_DM_NMI);
+       evntsel |= K7_EVNTSEL_ENABLE;
+       wrmsr(evntsel_msr, evntsel, 0);
+
        return 1;
 }
 
@@ -379,13 +385,19 @@ static int setup_p6_watchdog(unsigned nmi_hz)
        wrmsr(evntsel_msr, evntsel, 0);
        nmi_hz = adjust_for_32bit_ctr(nmi_hz);
        write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz);
-       apic_write(APIC_LVTPC, APIC_DM_NMI);
-       evntsel |= P6_EVNTSEL0_ENABLE;
-       wrmsr(evntsel_msr, evntsel, 0);
 
+       /* initialize the wd struct before enabling */
        wd->perfctr_msr = perfctr_msr;
        wd->evntsel_msr = evntsel_msr;
        wd->cccr_msr = 0;  /* unused */
+
+       /* ok, everything is initialized, announce that we're set */
+       cpu_nmi_set_wd_enabled();
+
+       apic_write(APIC_LVTPC, APIC_DM_NMI);
+       evntsel |= P6_EVNTSEL0_ENABLE;
+       wrmsr(evntsel_msr, evntsel, 0);
+
        return 1;
 }
 
@@ -432,6 +444,27 @@ static const struct wd_ops p6_wd_ops = {
 #define P4_CCCR_ENABLE         (1 << 12)
 #define P4_CCCR_OVF            (1 << 31)
 
+#define P4_CONTROLS 18
+static unsigned int p4_controls[18] = {
+       MSR_P4_BPU_CCCR0,
+       MSR_P4_BPU_CCCR1,
+       MSR_P4_BPU_CCCR2,
+       MSR_P4_BPU_CCCR3,
+       MSR_P4_MS_CCCR0,
+       MSR_P4_MS_CCCR1,
+       MSR_P4_MS_CCCR2,
+       MSR_P4_MS_CCCR3,
+       MSR_P4_FLAME_CCCR0,
+       MSR_P4_FLAME_CCCR1,
+       MSR_P4_FLAME_CCCR2,
+       MSR_P4_FLAME_CCCR3,
+       MSR_P4_IQ_CCCR0,
+       MSR_P4_IQ_CCCR1,
+       MSR_P4_IQ_CCCR2,
+       MSR_P4_IQ_CCCR3,
+       MSR_P4_IQ_CCCR4,
+       MSR_P4_IQ_CCCR5,
+};
 /*
  * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
  * CRU_ESCR0 (with any non-null event selector) through a complemented
@@ -473,12 +506,38 @@ static int setup_p4_watchdog(unsigned nmi_hz)
                evntsel_msr = MSR_P4_CRU_ESCR0;
                cccr_msr = MSR_P4_IQ_CCCR0;
                cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
+
+               /*
+                * If we're on the kdump kernel or other situation, we may
+                * still have other performance counter registers set to
+                * interrupt and they'll keep interrupting forever because
+                * of the P4_CCCR_OVF quirk. So we need to ACK all the
+                * pending interrupts and disable all the registers here,
+                * before reenabling the NMI delivery. Refer to p4_rearm()
+                * about the P4_CCCR_OVF quirk.
+                */
+               if (reset_devices) {
+                       unsigned int low, high;
+                       int i;
+
+                       for (i = 0; i < P4_CONTROLS; i++) {
+                               rdmsr(p4_controls[i], low, high);
+                               low &= ~(P4_CCCR_ENABLE | P4_CCCR_OVF);
+                               wrmsr(p4_controls[i], low, high);
+                       }
+               }
        } else {
                /* logical cpu 1 */
                perfctr_msr = MSR_P4_IQ_PERFCTR1;
                evntsel_msr = MSR_P4_CRU_ESCR0;
                cccr_msr = MSR_P4_IQ_CCCR1;
-               cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
+
+               /* Pentium 4 D processors don't support P4_CCCR_OVF_PMI1 */
+               if (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask == 4)
+                       cccr_val = P4_CCCR_OVF_PMI0;
+               else
+                       cccr_val = P4_CCCR_OVF_PMI1;
+               cccr_val |= P4_CCCR_ESCR_SELECT(4);
        }
 
        evntsel = P4_ESCR_EVENT_SELECT(0x3F)
@@ -493,12 +552,17 @@ static int setup_p4_watchdog(unsigned nmi_hz)
        wrmsr(evntsel_msr, evntsel, 0);
        wrmsr(cccr_msr, cccr_val, 0);
        write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz);
-       apic_write(APIC_LVTPC, APIC_DM_NMI);
-       cccr_val |= P4_CCCR_ENABLE;
-       wrmsr(cccr_msr, cccr_val, 0);
+
        wd->perfctr_msr = perfctr_msr;
        wd->evntsel_msr = evntsel_msr;
        wd->cccr_msr = cccr_msr;
+
+       /* ok, everything is initialized, announce that we're set */
+       cpu_nmi_set_wd_enabled();
+
+       apic_write(APIC_LVTPC, APIC_DM_NMI);
+       cccr_val |= P4_CCCR_ENABLE;
+       wrmsr(cccr_msr, cccr_val, 0);
        return 1;
 }
 
@@ -614,13 +678,17 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
        wrmsr(evntsel_msr, evntsel, 0);
        nmi_hz = adjust_for_32bit_ctr(nmi_hz);
        write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz);
-       apic_write(APIC_LVTPC, APIC_DM_NMI);
-       evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
-       wrmsr(evntsel_msr, evntsel, 0);
 
        wd->perfctr_msr = perfctr_msr;
        wd->evntsel_msr = evntsel_msr;
        wd->cccr_msr = 0;  /* unused */
+
+       /* ok, everything is initialized, announce that we're set */
+       cpu_nmi_set_wd_enabled();
+
+       apic_write(APIC_LVTPC, APIC_DM_NMI);
+       evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+       wrmsr(evntsel_msr, evntsel, 0);
        intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1);
        return 1;
 }