]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/acpi/processor_idle.c
3c59x: check return of pci_enable_device()
[linux-2.6-omap-h63xx.git] / drivers / acpi / processor_idle.c
index ee5759bef9454df682d7520032e8c33f5bfbc0b8..f18261368e76e87059606d8d8dfc4e753266b99a 100644 (file)
@@ -63,6 +63,7 @@
 ACPI_MODULE_NAME("processor_idle");
 #define ACPI_PROCESSOR_FILE_POWER      "power"
 #define US_TO_PM_TIMER_TICKS(t)                ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
+#define PM_TIMER_TICK_NS               (1000000000ULL/PM_TIMER_FREQUENCY)
 #define C2_OVERHEAD                    4       /* 1us (3.579 ticks per us) */
 #define C3_OVERHEAD                    4       /* 1us (3.579 ticks per us) */
 static void (*pm_idle_save) (void) __read_mostly;
@@ -324,6 +325,23 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr,
 
 #endif
 
+/*
+ * Suspend / resume control
+ */
+static int acpi_idle_suspend;
+
+int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
+{
+       acpi_idle_suspend = 1;
+       return 0;
+}
+
+int acpi_processor_resume(struct acpi_device * device)
+{
+       acpi_idle_suspend = 0;
+       return 0;
+}
+
 static void acpi_processor_idle(void)
 {
        struct acpi_processor *pr = NULL;
@@ -332,16 +350,18 @@ static void acpi_processor_idle(void)
        int sleep_ticks = 0;
        u32 t1, t2 = 0;
 
-       pr = processors[smp_processor_id()];
-       if (!pr)
-               return;
-
        /*
         * Interrupts must be disabled during bus mastering calculations and
         * for C2/C3 transitions.
         */
        local_irq_disable();
 
+       pr = processors[smp_processor_id()];
+       if (!pr) {
+               local_irq_enable();
+               return;
+       }
+
        /*
         * Check whether we truly need to go idle, or should
         * reschedule:
@@ -352,7 +372,7 @@ static void acpi_processor_idle(void)
        }
 
        cx = pr->power.state;
-       if (!cx) {
+       if (!cx || acpi_idle_suspend) {
                if (pm_idle_save)
                        pm_idle_save();
                else
@@ -460,6 +480,9 @@ static void acpi_processor_idle(void)
                 * TBD: Can't get time duration while in C1, as resumes
                 *      go to an ISR rather than here.  Need to instrument
                 *      base interrupt handler.
+                *
+                * Note: the TSC better not stop in C1, sched_clock() will
+                *       skew otherwise.
                 */
                sleep_ticks = 0xFFFFFFFF;
                break;
@@ -467,28 +490,45 @@ static void acpi_processor_idle(void)
        case ACPI_STATE_C2:
                /* Get start time (ticks) */
                t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+               /* Tell the scheduler that we are going deep-idle: */
+               sched_clock_idle_sleep_event();
                /* Invoke C2 */
                acpi_state_timer_broadcast(pr, cx, 1);
                acpi_cstate_enter(cx);
                /* Get end time (ticks) */
                t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
 
-#ifdef CONFIG_GENERIC_TIME
+#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
                /* TSC halts in C2, so notify users */
                mark_tsc_unstable("possible TSC halt in C2");
 #endif
+               /* Compute time (ticks) that we were actually asleep */
+               sleep_ticks = ticks_elapsed(t1, t2);
+
+               /* Tell the scheduler how much we idled: */
+               sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+
                /* Re-enable interrupts */
                local_irq_enable();
+               /* Do not account our idle-switching overhead: */
+               sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
+
                current_thread_info()->status |= TS_POLLING;
-               /* Compute time (ticks) that we were actually asleep */
-               sleep_ticks =
-                   ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
                acpi_state_timer_broadcast(pr, cx, 0);
                break;
 
        case ACPI_STATE_C3:
-
-               if (pr->flags.bm_check) {
+               /*
+                * disable bus master
+                * bm_check implies we need ARB_DIS
+                * !bm_check implies we need cache flush
+                * bm_control implies whether we can do ARB_DIS
+                *
+                * That leaves a case where bm_check is set and bm_control is
+                * not set. In that case we cannot do much, we enter C3
+                * without doing anything.
+                */
+               if (pr->flags.bm_check && pr->flags.bm_control) {
                        if (atomic_inc_return(&c3_cpu_count) ==
                            num_online_cpus()) {
                                /*
@@ -497,7 +537,7 @@ static void acpi_processor_idle(void)
                                 */
                                acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
                        }
-               } else {
+               } else if (!pr->flags.bm_check) {
                        /* SMP with no shared cache... Invalidate cache  */
                        ACPI_FLUSH_CPU_CACHE();
                }
@@ -506,25 +546,32 @@ static void acpi_processor_idle(void)
                t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
                /* Invoke C3 */
                acpi_state_timer_broadcast(pr, cx, 1);
+               /* Tell the scheduler that we are going deep-idle: */
+               sched_clock_idle_sleep_event();
                acpi_cstate_enter(cx);
                /* Get end time (ticks) */
                t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
-               if (pr->flags.bm_check) {
+               if (pr->flags.bm_check && pr->flags.bm_control) {
                        /* Enable bus master arbitration */
                        atomic_dec(&c3_cpu_count);
                        acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
                }
 
-#ifdef CONFIG_GENERIC_TIME
+#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
                /* TSC halts in C3, so notify users */
                mark_tsc_unstable("TSC halts in C3");
 #endif
+               /* Compute time (ticks) that we were actually asleep */
+               sleep_ticks = ticks_elapsed(t1, t2);
+               /* Tell the scheduler how much we idled: */
+               sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+
                /* Re-enable interrupts */
                local_irq_enable();
+               /* Do not account our idle-switching overhead: */
+               sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
+
                current_thread_info()->status |= TS_POLLING;
-               /* Compute time (ticks) that we were actually asleep */
-               sleep_ticks =
-                   ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
                acpi_state_timer_broadcast(pr, cx, 0);
                break;
 
@@ -957,11 +1004,17 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
        }
 
        if (pr->flags.bm_check) {
-               /* bus mastering control is necessary */
                if (!pr->flags.bm_control) {
-                       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                         "C3 support requires bus mastering control\n"));
-                       return;
+                       if (pr->flags.has_cst != 1) {
+                               /* bus mastering control is necessary */
+                               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                                       "C3 support requires BM control\n"));
+                               return;
+                       } else {
+                               /* Here we enter C3 without bus mastering */
+                               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                                       "C3 support without BM control\n"));
+                       }
                }
        } else {
                /*