#include <linux/moduleparam.h>
#include <linux/sched.h> /* need_resched() */
#include <linux/latency.h>
+#include <linux/clockchips.h>
+
+/*
+ * Include the apic definitions for x86 to have the APIC timer related defines
+ * available also for UP (on SMP it gets magically included via linux/smp.h).
+ * asm/acpi.h is not an option, as it would require more include magic. Also
+ * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
+ */
+#ifdef CONFIG_X86
+#include <asm/apic.h>
+#endif
+
+/*
+ * Include the apic definitions for x86 to have the APIC timer related defines
+ * available also for UP (on SMP it gets magically included via linux/smp.h).
+ */
+#ifdef CONFIG_X86
+#include <asm/apic.h>
+#endif
#include <asm/io.h>
#include <asm/uaccess.h>
#define ACPI_PROCESSOR_COMPONENT 0x01000000
#define ACPI_PROCESSOR_CLASS "processor"
-#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
-ACPI_MODULE_NAME("acpi_processor")
+ACPI_MODULE_NAME("processor_idle");
#define ACPI_PROCESSOR_FILE_POWER "power"
#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
static unsigned int bm_history __read_mostly =
(HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
module_param(bm_history, uint, 0644);
+
+static unsigned use_ipi = 2;
+module_param(use_ipi, uint, 0644);
+MODULE_PARM_DESC(use_ipi, "IPI (vs. LAPIC) irqs for not waking up from C2/C3"
+ " machines. 0=apic, 1=ipi, 2=auto\n");
+
/* --------------------------------------------------------------------------
Power Management
-------------------------------------------------------------------------- */
}
}
+#ifdef ARCH_APICTIMER_STOPS_ON_C3
+
+/*
+ * Some BIOS implementations switch to C3 in the published C2 state.
+ * This seems to be a common problem on AMD boxen and Intel Dothan/Banias
+ * Pentium M machines.
+ */
+static void acpi_timer_check_state(int state, struct acpi_processor *pr,
+ struct acpi_processor_cx *cx)
+{
+ struct acpi_processor_power *pwr = &pr->power;
+
+ /*
+ * Check, if one of the previous states already marked the lapic
+ * unstable
+ */
+ if (pwr->timer_broadcast_on_state < state)
+ return;
+
+ if (cx->type >= ACPI_STATE_C2) {
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ pr->power.timer_broadcast_on_state = state;
+ else if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6) &&
+ (boot_cpu_data.x86_model == 13 ||
+ boot_cpu_data.x86_model == 9))
+ {
+ pr->power.timer_broadcast_on_state = state;
+ }
+ }
+}
+
+static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
+{
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ unsigned long reason;
+
+ reason = pr->power.timer_broadcast_on_state < INT_MAX ?
+ CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
+
+ clockevents_notify(reason, &pr->id);
+#else
+ cpumask_t mask = cpumask_of_cpu(pr->id);
+
+ if (use_ipi == 0)
+ on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
+ else if (use_ipi == 1)
+ on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
+ else {
+ if (pr->power.timer_broadcast_on_state < INT_MAX)
+ on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
+ else
+ on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
+ }
+#endif
+}
+
+/* Power(C) State timer broadcast control */
+static void acpi_state_timer_broadcast(struct acpi_processor *pr,
+ struct acpi_processor_cx *cx,
+ int broadcast)
+{
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+
+ int state = cx - pr->power.states;
+
+ if (state >= pr->power.timer_broadcast_on_state) {
+ unsigned long reason;
+
+ reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
+ CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
+ clockevents_notify(reason, &pr->id);
+ }
+#endif
+}
+
+#else
+
+static void acpi_timer_check_state(int state, struct acpi_processor *pr,
+ struct acpi_processor_cx *cstate) { }
+static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
+static void acpi_state_timer_broadcast(struct acpi_processor *pr,
+ struct acpi_processor_cx *cx,
+ int broadcast)
+{
+}
+
+#endif
+
static void acpi_processor_idle(void)
{
struct acpi_processor *pr = NULL;
/* Get start time (ticks) */
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Invoke C2 */
+ acpi_state_timer_broadcast(pr, cx, 1);
acpi_cstate_enter(cx);
/* Get end time (ticks) */
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Compute time (ticks) that we were actually asleep */
sleep_ticks =
ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
+ acpi_state_timer_broadcast(pr, cx, 0);
break;
case ACPI_STATE_C3:
/* Get start time (ticks) */
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Invoke C3 */
+ acpi_state_timer_broadcast(pr, cx, 1);
acpi_cstate_enter(cx);
/* Get end time (ticks) */
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Compute time (ticks) that we were actually asleep */
sleep_ticks =
ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
+ acpi_state_timer_broadcast(pr, cx, 0);
break;
default:
unsigned int i;
unsigned int working = 0;
-#ifdef ARCH_APICTIMER_STOPS_ON_C3
- int timer_broadcast = 0;
- cpumask_t mask = cpumask_of_cpu(pr->id);
- on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
-#endif
+ pr->power.timer_broadcast_on_state = INT_MAX;
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
struct acpi_processor_cx *cx = &pr->power.states[i];
case ACPI_STATE_C2:
acpi_processor_power_verify_c2(cx);
-#ifdef ARCH_APICTIMER_STOPS_ON_C3
- /* Some AMD systems fake C3 as C2, but still
- have timer troubles */
- if (cx->valid &&
- boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
- timer_broadcast++;
-#endif
+ if (cx->valid && use_ipi != 0 && use_ipi != 1)
+ acpi_timer_check_state(i, pr, cx);
break;
case ACPI_STATE_C3:
acpi_processor_power_verify_c3(pr, cx);
-#ifdef ARCH_APICTIMER_STOPS_ON_C3
- if (cx->valid)
- timer_broadcast++;
-#endif
+ if (cx->valid && use_ipi != 0 && use_ipi != 1)
+ acpi_timer_check_state(i, pr, cx);
break;
}
working++;
}
-#ifdef ARCH_APICTIMER_STOPS_ON_C3
- if (timer_broadcast)
- on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
-#endif
+ acpi_propagate_timer_broadcast(pr);
return (working);
}