]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kernel/process_64.c
Merge branch 'linus' into x86/pebs
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / process_64.c
index a4ad0d7ea621be0db78fc32d21c0a2a214a82d9f..91ffce47af8ec5401e8be094b39774f8dc843c57 100644 (file)
@@ -56,15 +56,6 @@ asmlinkage extern void ret_from_fork(void);
 
 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
 
-unsigned long boot_option_idle_override = 0;
-EXPORT_SYMBOL(boot_option_idle_override);
-
-/*
- * Powermanagement idle function, if any..
- */
-void (*pm_idle)(void);
-EXPORT_SYMBOL(pm_idle);
-
 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
 
 void idle_notifier_register(struct notifier_block *n)
@@ -94,25 +85,6 @@ void exit_idle(void)
        __exit_idle();
 }
 
-/*
- * We use this if we don't have any better
- * idle routine..
- */
-void default_idle(void)
-{
-       current_thread_info()->status &= ~TS_POLLING;
-       /*
-        * TS_POLLING-cleared state must be visible before we
-        * test NEED_RESCHED:
-        */
-       smp_mb();
-       if (!need_resched())
-               safe_halt();    /* enables interrupts racelessly */
-       else
-               local_irq_enable();
-       current_thread_info()->status |= TS_POLLING;
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
 DECLARE_PER_CPU(int, cpu_state);
 
@@ -148,14 +120,11 @@ void cpu_idle(void)
        current_thread_info()->status |= TS_POLLING;
        /* endless idle loop with no priority at all */
        while (1) {
-               tick_nohz_stop_sched_tick();
+               tick_nohz_stop_sched_tick(1);
                while (!need_resched()) {
-                       void (*idle)(void);
 
                        rmb();
-                       idle = pm_idle;
-                       if (!idle)
-                               idle = default_idle;
+
                        if (cpu_is_offline(smp_processor_id()))
                                play_dead();
                        /*
@@ -165,7 +134,10 @@ void cpu_idle(void)
                         */
                        local_irq_disable();
                        enter_idle();
-                       idle();
+                       /* Don't trace irqs off for idle */
+                       stop_critical_timings();
+                       pm_idle();
+                       start_critical_timings();
                        /* In many cases the interrupt that ended idle
                           has already called exit_idle. But some idle
                           loops can be woken up without interrupt. */
@@ -374,10 +346,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
        p->thread.fs = me->thread.fs;
        p->thread.gs = me->thread.gs;
 
-       asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
-       asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
-       asm("mov %%es,%0" : "=m" (p->thread.es));
-       asm("mov %%ds,%0" : "=m" (p->thread.ds));
+       savesegment(gs, p->thread.gsindex);
+       savesegment(fs, p->thread.fsindex);
+       savesegment(es, p->thread.es);
+       savesegment(ds, p->thread.ds);
 
        if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
                p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
@@ -416,7 +388,9 @@ out:
 void
 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
 {
-       asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0));
+       loadsegment(fs, 0);
+       loadsegment(es, 0);
+       loadsegment(ds, 0);
        load_gs_index(0);
        regs->ip                = new_ip;
        regs->sp                = new_sp;
@@ -585,10 +559,11 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
 struct task_struct *
 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 {
-       struct thread_struct *prev = &prev_p->thread,
-                                *next = &next_p->thread;
+       struct thread_struct *prev = &prev_p->thread;
+       struct thread_struct *next = &next_p->thread;
        int cpu = smp_processor_id();
        struct tss_struct *tss = &per_cpu(init_tss, cpu);
+       unsigned fsindex, gsindex;
 
        /* we're going to use this soon, after a few expensive things */
        if (next_p->fpu_counter>5)
@@ -603,52 +578,64 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * Switch DS and ES.
         * This won't pick up thread selector changes, but I guess that is ok.
         */
-       asm volatile("mov %%es,%0" : "=m" (prev->es));
+       savesegment(es, prev->es);
        if (unlikely(next->es | prev->es))
                loadsegment(es, next->es); 
-       
-       asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
+
+       savesegment(ds, prev->ds);
        if (unlikely(next->ds | prev->ds))
                loadsegment(ds, next->ds);
 
+
+       /* We must save %fs and %gs before load_TLS() because
+        * %fs and %gs may be cleared by load_TLS().
+        *
+        * (e.g. xen_load_tls())
+        */
+       savesegment(fs, fsindex);
+       savesegment(gs, gsindex);
+
        load_TLS(next, cpu);
 
+       /*
+        * Leave lazy mode, flushing any hypercalls made here.
+        * This must be done before restoring TLS segments so
+        * the GDT and LDT are properly updated, and must be
+        * done before math_state_restore, so the TS bit is up
+        * to date.
+        */
+       arch_leave_lazy_cpu_mode();
+
        /* 
         * Switch FS and GS.
+        *
+        * Segment register != 0 always requires a reload.  Also
+        * reload when it has changed.  When prev process used 64bit
+        * base always reload to avoid an information leak.
         */
-       { 
-               unsigned fsindex;
-               asm volatile("movl %%fs,%0" : "=r" (fsindex)); 
-               /* segment register != 0 always requires a reload. 
-                  also reload when it has changed. 
-                  when prev process used 64bit base always reload
-                  to avoid an information leak. */
-               if (unlikely(fsindex | next->fsindex | prev->fs)) {
-                       loadsegment(fs, next->fsindex);
-                       /* check if the user used a selector != 0
-                        * if yes clear 64bit base, since overloaded base
-                         * is always mapped to the Null selector
-                         */
-                       if (fsindex)
+       if (unlikely(fsindex | next->fsindex | prev->fs)) {
+               loadsegment(fs, next->fsindex);
+               /* 
+                * Check if the user used a selector != 0; if yes
+                *  clear 64bit base, since overloaded base is always
+                *  mapped to the Null selector
+                */
+               if (fsindex)
                        prev->fs = 0;                           
-               }
-               /* when next process has a 64bit base use it */
-               if (next->fs) 
-                       wrmsrl(MSR_FS_BASE, next->fs); 
-               prev->fsindex = fsindex;
        }
-       { 
-               unsigned gsindex;
-               asm volatile("movl %%gs,%0" : "=r" (gsindex)); 
-               if (unlikely(gsindex | next->gsindex | prev->gs)) {
-                       load_gs_index(next->gsindex);
-                       if (gsindex)
+       /* when next process has a 64bit base use it */
+       if (next->fs)
+               wrmsrl(MSR_FS_BASE, next->fs);
+       prev->fsindex = fsindex;
+
+       if (unlikely(gsindex | next->gsindex | prev->gs)) {
+               load_gs_index(next->gsindex);
+               if (gsindex)
                        prev->gs = 0;                           
-               }
-               if (next->gs)
-                       wrmsrl(MSR_KERNEL_GS_BASE, next->gs); 
-               prev->gsindex = gsindex;
        }
+       if (next->gs)
+               wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
+       prev->gsindex = gsindex;
 
        /* Must be after DS reload */
        unlazy_fpu(prev_p);
@@ -661,7 +648,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        write_pda(pcurrent, next_p); 
 
        write_pda(kernelstack,
-       (unsigned long)task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
+                 (unsigned long)task_stack_page(next_p) +
+                 THREAD_SIZE - PDA_STACKOFFSET);
 #ifdef CONFIG_CC_STACKPROTECTOR
        write_pda(stack_canary, next_p->stack_canary);
        /*
@@ -820,7 +808,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                        set_32bit_tls(task, FS_TLS, addr);
                        if (doit) {
                                load_TLS(&task->thread, cpu);
-                               asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
+                               loadsegment(fs, FS_TLS_SEL);
                        }
                        task->thread.fsindex = FS_TLS_SEL;
                        task->thread.fs = 0;
@@ -830,7 +818,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                        if (doit) {
                                /* set the selector to 0 to not confuse
                                   __switch_to */
-                               asm volatile("movl %0,%%fs" :: "r" (0));
+                               loadsegment(fs, 0);
                                ret = checking_wrmsrl(MSR_FS_BASE, addr);
                        }
                }
@@ -853,7 +841,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
                if (task->thread.gsindex == GS_TLS_SEL)
                        base = read_32bit_tls(task, GS_TLS);
                else if (doit) {
-                       asm("movl %%gs,%0" : "=r" (gsindex));
+                       savesegment(gs, gsindex);
                        if (gsindex)
                                rdmsrl(MSR_KERNEL_GS_BASE, base);
                        else