]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/i386/kernel/process.c
signal/timer/event: signalfd wire up x86 arches
[linux-2.6-omap-h63xx.git] / arch / i386 / kernel / process.c
index 7845d480c29359a166314cad7d2c7781aa11a3e3..d76d9bc33b30c02d2aa02198270295e4cd57c681 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/mm.h>
 #include <linux/elfcore.h>
 #include <linux/smp.h>
-#include <linux/smp_lock.h>
 #include <linux/stddef.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
@@ -38,6 +37,8 @@
 #include <linux/ptrace.h>
 #include <linux/random.h>
 #include <linux/personality.h>
+#include <linux/tick.h>
+#include <linux/percpu.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -48,7 +49,6 @@
 #include <asm/i387.h>
 #include <asm/desc.h>
 #include <asm/vm86.h>
-#include <asm/idle.h>
 #ifdef CONFIG_MATH_EMULATION
 #include <asm/math_emu.h>
 #endif
@@ -57,7 +57,6 @@
 
 #include <asm/tlbflush.h>
 #include <asm/cpu.h>
-#include <asm/pda.h>
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
@@ -66,6 +65,12 @@ static int hlt_counter;
 unsigned long boot_option_idle_override = 0;
 EXPORT_SYMBOL(boot_option_idle_override);
 
+DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
+EXPORT_PER_CPU_SYMBOL(current_task);
+
+DEFINE_PER_CPU(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+
 /*
  * Return saved PC of a blocked thread.
  */
@@ -81,42 +86,6 @@ void (*pm_idle)(void);
 EXPORT_SYMBOL(pm_idle);
 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
 
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
-       atomic_notifier_chain_register(&idle_notifier, n);
-}
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
-       atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-
-static DEFINE_PER_CPU(volatile unsigned long, idle_state);
-
-void enter_idle(void)
-{
-       /* needs to be atomic w.r.t. interrupts, not against other CPUs */
-       __set_bit(0, &__get_cpu_var(idle_state));
-       atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
-}
-
-static void __exit_idle(void)
-{
-       /* needs to be atomic w.r.t. interrupts, not against other CPUs */
-       if (__test_and_clear_bit(0, &__get_cpu_var(idle_state)) == 0)
-               return;
-       atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
-}
-
-void exit_idle(void)
-{
-       if (current->pid)
-               return;
-       __exit_idle();
-}
-
 void disable_hlt(void)
 {
        hlt_counter++;
@@ -167,7 +136,6 @@ EXPORT_SYMBOL(default_idle);
  */
 static void poll_idle (void)
 {
-       local_irq_enable();
        cpu_relax();
 }
 
@@ -211,6 +179,7 @@ void cpu_idle(void)
 
        /* endless idle loop with no priority at all */
        while (1) {
+               tick_nohz_stop_sched_tick();
                while (!need_resched()) {
                        void (*idle)(void);
 
@@ -227,17 +196,9 @@ void cpu_idle(void)
                                play_dead();
 
                        __get_cpu_var(irq_stat).idle_timestamp = jiffies;
-
-                       /*
-                        * Idle routines should keep interrupts disabled
-                        * from here on, until they go to idle.
-                        * Otherwise, idle callbacks can misfire.
-                        */
-                       local_irq_disable();
-                       enter_idle();
                        idle();
-                       __exit_idle();
                }
+               tick_nohz_restart_sched_tick();
                preempt_enable_no_resched();
                schedule();
                preempt_disable();
@@ -290,11 +251,7 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
                __monitor((void *)&current_thread_info()->flags, 0, 0);
                smp_mb();
                if (!need_resched())
-                       __sti_mwait(eax, ecx);
-               else
-                       local_irq_enable();
-       } else {
-               local_irq_enable();
+                       __mwait(eax, ecx);
        }
 }
 
@@ -320,25 +277,24 @@ void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
        }
 }
 
-static int __init idle_setup (char *str)
+static int __init idle_setup(char *str)
 {
-       if (!strncmp(str, "poll", 4)) {
+       if (!strcmp(str, "poll")) {
                printk("using polling idle threads.\n");
                pm_idle = poll_idle;
 #ifdef CONFIG_X86_SMP
                if (smp_num_siblings > 1)
                        printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
 #endif
-       } else if (!strncmp(str, "halt", 4)) {
-               printk("using halt in idle threads.\n");
-               pm_idle = default_idle;
-       }
+       } else if (!strcmp(str, "mwait"))
+               force_mwait = 1;
+       else
+               return -1;
 
        boot_option_idle_override = 1;
-       return 1;
+       return 0;
 }
-
-__setup("idle=", idle_setup);
+early_param("idle", idle_setup);
 
 void show_regs(struct pt_regs * regs)
 {
@@ -391,7 +347,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 
        regs.xds = __USER_DS;
        regs.xes = __USER_DS;
-       regs.xfs = __KERNEL_PDA;
+       regs.xfs = __KERNEL_PERCPU;
        regs.orig_eax = -1;
        regs.eip = (unsigned long) kernel_thread_helper;
        regs.xcs = __KERNEL_CS | get_kernel_rpl();
@@ -424,7 +380,7 @@ void exit_thread(void)
                t->io_bitmap_max = 0;
                tss->io_bitmap_owner = NULL;
                tss->io_bitmap_max = 0;
-               tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
+               tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
                put_cpu();
        }
 }
@@ -603,7 +559,7 @@ static noinline void __switch_to_xtra(struct task_struct *next_p,
                 * Disable the bitmap via an invalid offset. We still cache
                 * the previous bitmap owner and the IO bitmap contents:
                 */
-               tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
+               tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
                return;
        }
 
@@ -613,7 +569,7 @@ static noinline void __switch_to_xtra(struct task_struct *next_p,
                 * matches the next task, we dont have to do anything but
                 * to set a valid offset in the TSS:
                 */
-               tss->io_bitmap_base = IO_BITMAP_OFFSET;
+               tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
                return;
        }
        /*
@@ -625,7 +581,7 @@ static noinline void __switch_to_xtra(struct task_struct *next_p,
         * redundant copies when the currently switched task does not
         * perform any I/O during its timeslice.
         */
-       tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
+       tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
 }
 
 /*
@@ -760,7 +716,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
        if (prev->gs | next->gs)
                loadsegment(gs, next->gs);
 
-       write_pda(pcurrent, next_p);
+       x86_write_percpu(current_task, next_p);
 
        return prev_p;
 }