]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kernel/process_32.c
Merge branch 'x86/core' into x86/unify-pci
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / process_32.c
index ee4ab461c50d4522184a0e9b21ab20ce794db270..9a139f6c9df30fa2c304e96da89b99fd62b78a8d 100644 (file)
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
-static int hlt_counter;
-
-unsigned long boot_option_idle_override = 0;
-EXPORT_SYMBOL(boot_option_idle_override);
-
 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
 EXPORT_PER_CPU_SYMBOL(current_task);
 
@@ -77,57 +72,24 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
        return ((unsigned long *)tsk->thread.sp)[3];
 }
 
-/*
- * Powermanagement idle function, if any..
- */
-void (*pm_idle)(void);
-EXPORT_SYMBOL(pm_idle);
+#ifdef CONFIG_HOTPLUG_CPU
+#include <asm/nmi.h>
 
-void disable_hlt(void)
+static void cpu_exit_clear(void)
 {
-       hlt_counter++;
-}
+       int cpu = raw_smp_processor_id();
 
-EXPORT_SYMBOL(disable_hlt);
+       idle_task_exit();
 
-void enable_hlt(void)
-{
-       hlt_counter--;
-}
+       cpu_uninit();
+       irq_ctx_exit(cpu);
 
-EXPORT_SYMBOL(enable_hlt);
-
-/*
- * We use this if we don't have any better
- * idle routine..
- */
-void default_idle(void)
-{
-       if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
-               current_thread_info()->status &= ~TS_POLLING;
-               /*
-                * TS_POLLING-cleared state must be visible before we
-                * test NEED_RESCHED:
-                */
-               smp_mb();
+       cpu_clear(cpu, cpu_callout_map);
+       cpu_clear(cpu, cpu_callin_map);
 
-               if (!need_resched())
-                       safe_halt();    /* enables interrupts racelessly */
-               else
-                       local_irq_enable();
-               current_thread_info()->status |= TS_POLLING;
-       } else {
-               local_irq_enable();
-               /* loop is done by the caller */
-               cpu_relax();
-       }
+       numa_remove_cpu(cpu);
 }
-#ifdef CONFIG_APM_MODULE
-EXPORT_SYMBOL(default_idle);
-#endif
 
-#ifdef CONFIG_HOTPLUG_CPU
-#include <asm/nmi.h>
 /* We don't actually take CPU down, just spin without interrupts. */
 static inline void play_dead(void)
 {
@@ -328,6 +290,7 @@ void flush_thread(void)
        /*
         * Forget coprocessor state..
         */
+       tsk->fpu_counter = 0;
        clear_fpu(tsk);
        clear_used_math();
 }
@@ -644,8 +607,11 @@ struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct
        /* If the task has used fpu the last 5 timeslices, just do a full
         * restore of the math state immediately to avoid the trap; the
         * chances of needing FPU soon are obviously high now
+        *
+        * tsk_used_math() checks prevent calling math_state_restore(),
+        * which can sleep in the case of !tsk_used_math()
         */
-       if (next_p->fpu_counter > 5)
+       if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
                math_state_restore();
 
        /*