]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/ppc64/kernel/idle.c
Merge master.kernel.org:/home/rmk/linux-2.6-i2c
[linux-2.6-omap-h63xx.git] / arch / ppc64 / kernel / idle.c
index f24ce2b872004ae4feb1f419eddb3c0866c1c440..954395d426363ab54c21d4f484b25a91e805abf0 100644 (file)
 #include <linux/kernel.h>
 #include <linux/smp.h>
 #include <linux/cpu.h>
-#include <linux/module.h>
 #include <linux/sysctl.h>
-#include <linux/smp.h>
 
 #include <asm/system.h>
 #include <asm/processor.h>
-#include <asm/mmu.h>
 #include <asm/cputable.h>
 #include <asm/time.h>
-#include <asm/iSeries/HvCall.h>
-#include <asm/iSeries/ItLpQueue.h>
-#include <asm/plpar_wrappers.h>
 #include <asm/systemcfg.h>
+#include <asm/machdep.h>
 
 extern void power4_idle(void);
 
-static int (*idle_loop)(void);
-
-#ifdef CONFIG_PPC_ISERIES
-static unsigned long maxYieldTime = 0;
-static unsigned long minYieldTime = 0xffffffffffffffffUL;
-
-static void yield_shared_processor(void)
-{
-       unsigned long tb;
-       unsigned long yieldTime;
-
-       HvCall_setEnabledInterrupts(HvCall_MaskIPI |
-                                   HvCall_MaskLpEvent |
-                                   HvCall_MaskLpProd |
-                                   HvCall_MaskTimeout);
-
-       tb = get_tb();
-       /* Compute future tb value when yield should expire */
-       HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
-
-       yieldTime = get_tb() - tb;
-       if (yieldTime > maxYieldTime)
-               maxYieldTime = yieldTime;
-
-       if (yieldTime < minYieldTime)
-               minYieldTime = yieldTime;
-       
-       /*
-        * The decrementer stops during the yield.  Force a fake decrementer
-        * here and let the timer_interrupt code sort out the actual time.
-        */
-       get_paca()->lppaca.int_dword.fields.decr_int = 1;
-       process_iSeries_events();
-}
-
-static int iSeries_idle(void)
-{
-       struct paca_struct *lpaca;
-       long oldval;
-
-       /* ensure iSeries run light will be out when idle */
-       ppc64_runlatch_off();
-
-       lpaca = get_paca();
-
-       while (1) {
-               if (lpaca->lppaca.shared_proc) {
-                       if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
-                               process_iSeries_events();
-                       if (!need_resched())
-                               yield_shared_processor();
-               } else {
-                       oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
-
-                       if (!oldval) {
-                               set_thread_flag(TIF_POLLING_NRFLAG);
-
-                               while (!need_resched()) {
-                                       HMT_medium();
-                                       if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
-                                               process_iSeries_events();
-                                       HMT_low();
-                               }
-
-                               HMT_medium();
-                               clear_thread_flag(TIF_POLLING_NRFLAG);
-                       } else {
-                               set_need_resched();
-                       }
-               }
-
-               ppc64_runlatch_on();
-               schedule();
-               ppc64_runlatch_off();
-       }
-
-       return 0;
-}
-
-#else
-
-static int default_idle(void)
+int default_idle(void)
 {
        long oldval;
        unsigned int cpu = smp_processor_id();
@@ -129,7 +43,8 @@ static int default_idle(void)
                        set_thread_flag(TIF_POLLING_NRFLAG);
 
                        while (!need_resched() && !cpu_is_offline(cpu)) {
-                               barrier();
+                               ppc64_runlatch_off();
+
                                /*
                                 * Go into low thread priority and possibly
                                 * low power mode.
@@ -144,6 +59,7 @@ static int default_idle(void)
                        set_need_resched();
                }
 
+               ppc64_runlatch_on();
                schedule();
                if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
                        cpu_die();
@@ -152,127 +68,19 @@ static int default_idle(void)
        return 0;
 }
 
-#ifdef CONFIG_PPC_PSERIES
-
-DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
-
-int dedicated_idle(void)
-{
-       long oldval;
-       struct paca_struct *lpaca = get_paca(), *ppaca;
-       unsigned long start_snooze;
-       unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
-       unsigned int cpu = smp_processor_id();
-
-       ppaca = &paca[cpu ^ 1];
-
-       while (1) {
-               /*
-                * Indicate to the HV that we are idle. Now would be
-                * a good time to find other work to dispatch.
-                */
-               lpaca->lppaca.idle = 1;
-
-               oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
-               if (!oldval) {
-                       set_thread_flag(TIF_POLLING_NRFLAG);
-                       start_snooze = __get_tb() +
-                               *smt_snooze_delay * tb_ticks_per_usec;
-                       while (!need_resched() && !cpu_is_offline(cpu)) {
-                               /*
-                                * Go into low thread priority and possibly
-                                * low power mode.
-                                */
-                               HMT_low();
-                               HMT_very_low();
-
-                               if (*smt_snooze_delay == 0 ||
-                                   __get_tb() < start_snooze)
-                                       continue;
-
-                               HMT_medium();
-
-                               if (!(ppaca->lppaca.idle)) {
-                                       local_irq_disable();
-
-                                       /*
-                                        * We are about to sleep the thread
-                                        * and so wont be polling any
-                                        * more.
-                                        */
-                                       clear_thread_flag(TIF_POLLING_NRFLAG);
-
-                                       /*
-                                        * SMT dynamic mode. Cede will result
-                                        * in this thread going dormant, if the
-                                        * partner thread is still doing work.
-                                        * Thread wakes up if partner goes idle,
-                                        * an interrupt is presented, or a prod
-                                        * occurs.  Returning from the cede
-                                        * enables external interrupts.
-                                        */
-                                       if (!need_resched())
-                                               cede_processor();
-                                       else
-                                               local_irq_enable();
-                               } else {
-                                       /*
-                                        * Give the HV an opportunity at the
-                                        * processor, since we are not doing
-                                        * any work.
-                                        */
-                                       poll_pending();
-                               }
-                       }
-
-                       clear_thread_flag(TIF_POLLING_NRFLAG);
-               } else {
-                       set_need_resched();
-               }
-
-               HMT_medium();
-               lpaca->lppaca.idle = 0;
-               schedule();
-               if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
-                       cpu_die();
-       }
-       return 0;
-}
-
-static int shared_idle(void)
+int native_idle(void)
 {
-       struct paca_struct *lpaca = get_paca();
-       unsigned int cpu = smp_processor_id();
-
        while (1) {
-               /*
-                * Indicate to the HV that we are idle. Now would be
-                * a good time to find other work to dispatch.
-                */
-               lpaca->lppaca.idle = 1;
+               ppc64_runlatch_off();
 
-               while (!need_resched() && !cpu_is_offline(cpu)) {
-                       local_irq_disable();
+               if (!need_resched())
+                       power4_idle();
 
-                       /*
-                        * Yield the processor to the hypervisor.  We return if
-                        * an external interrupt occurs (which are driven prior
-                        * to returning here) or if a prod occurs from another 
-                        * processor. When returning here, external interrupts
-                        * are enabled.
-                        *
-                        * Check need_resched() again with interrupts disabled
-                        * to avoid a race.
-                        */
-                       if (!need_resched())
-                               cede_processor();
-                       else
-                               local_irq_enable();
+               if (need_resched()) {
+                       ppc64_runlatch_on();
+                       schedule();
                }
 
-               HMT_medium();
-               lpaca->lppaca.idle = 0;
-               schedule();
                if (cpu_is_offline(smp_processor_id()) &&
                    system_state == SYSTEM_RUNNING)
                        cpu_die();
@@ -281,29 +89,10 @@ static int shared_idle(void)
        return 0;
 }
 
-#endif /* CONFIG_PPC_PSERIES */
-
-static int native_idle(void)
-{
-       while(1) {
-               /* check CPU type here */
-               if (!need_resched())
-                       power4_idle();
-               if (need_resched())
-                       schedule();
-
-               if (cpu_is_offline(_smp_processor_id()) &&
-                   system_state == SYSTEM_RUNNING)
-                       cpu_die();
-       }
-       return 0;
-}
-
-#endif /* CONFIG_PPC_ISERIES */
-
 void cpu_idle(void)
 {
-       idle_loop();
+       BUG_ON(NULL == ppc_md.idle_loop);
+       ppc_md.idle_loop();
 }
 
 int powersave_nap;
@@ -337,42 +126,3 @@ register_powersave_nap_sysctl(void)
 }
 __initcall(register_powersave_nap_sysctl);
 #endif
-
-int idle_setup(void)
-{
-       /*
-        * Move that junk to each platform specific file, eventually define
-        * a pSeries_idle for shared processor stuff
-        */
-#ifdef CONFIG_PPC_ISERIES
-       idle_loop = iSeries_idle;
-       return 1;
-#else
-       idle_loop = default_idle;
-#endif
-#ifdef CONFIG_PPC_PSERIES
-       if (systemcfg->platform & PLATFORM_PSERIES) {
-               if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
-                       if (get_paca()->lppaca.shared_proc) {
-                               printk(KERN_INFO "Using shared processor idle loop\n");
-                               idle_loop = shared_idle;
-                       } else {
-                               printk(KERN_INFO "Using dedicated idle loop\n");
-                               idle_loop = dedicated_idle;
-                       }
-               } else {
-                       printk(KERN_INFO "Using default idle loop\n");
-                       idle_loop = default_idle;
-               }
-       }
-#endif /* CONFIG_PPC_PSERIES */
-#ifndef CONFIG_PPC_ISERIES
-       if (systemcfg->platform == PLATFORM_POWERMAC ||
-           systemcfg->platform == PLATFORM_MAPLE) {
-               printk(KERN_INFO "Using native/NAP idle loop\n");
-               idle_loop = native_idle;
-       }
-#endif /* CONFIG_PPC_ISERIES */
-
-       return 1;
-}