]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/kernel/process_32.c
x86: fix typo in step.c
[linux-2.6-omap-h63xx.git] / arch / x86 / kernel / process_32.c
index 2b9db93710607bee99b82baf86a933d2222762f4..be3c7a299f02541cb8f1e03797f737230dcca0ce 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/user.h>
-#include <linux/a.out.h>
 #include <linux/interrupt.h>
 #include <linux/utsname.h>
 #include <linux/delay.h>
@@ -198,6 +197,9 @@ void cpu_idle(void)
                        rmb();
                        idle = pm_idle;
 
+                       if (rcu_pending(cpu))
+                               rcu_check_callbacks(cpu, 0);
+
                        if (!idle)
                                idle = default_idle;
 
@@ -248,7 +250,7 @@ void cpu_idle_wait(void)
                 * because it has nothing to do.
                 * Give all the remaining CPUS a kick.
                 */
-               smp_call_function_mask(map, do_nothing, 0, 0);
+               smp_call_function_mask(map, do_nothing, NULL, 0);
        } while (!cpus_empty(map));
 
        set_cpus_allowed(current, tmp);
@@ -282,19 +284,37 @@ static void mwait_idle(void)
        mwait_idle_with_hints(0, 0);
 }
 
+static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
+{
+       if (force_mwait)
+               return 1;
+       /* Any C1 states supported? */
+       return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
+}
+
 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
 {
-       if (cpu_has(c, X86_FEATURE_MWAIT)) {
-               printk("monitor/mwait feature present.\n");
+       static int selected;
+
+       if (selected)
+               return;
+#ifdef CONFIG_X86_SMP
+       if (pm_idle == poll_idle && smp_num_siblings > 1) {
+               printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
+                       " performance may degrade.\n");
+       }
+#endif
+       if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
                /*
                 * Skip, if setup has overridden idle.
                 * One CPU supports mwait => All CPUs supports mwait
                 */
                if (!pm_idle) {
-                       printk("using mwait in idle threads.\n");
+                       printk(KERN_INFO "using mwait in idle threads.\n");
                        pm_idle = mwait_idle;
                }
        }
+       selected = 1;
 }
 
 static int __init idle_setup(char *str)
@@ -302,10 +322,6 @@ static int __init idle_setup(char *str)
        if (!strcmp(str, "poll")) {
                printk("using polling idle threads.\n");
                pm_idle = poll_idle;
-#ifdef CONFIG_X86_SMP
-               if (smp_num_siblings > 1)
-                       printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
-#endif
        } else if (!strcmp(str, "mwait"))
                force_mwait = 1;
        else
@@ -379,7 +395,7 @@ void __show_registers(struct pt_regs *regs, int all)
 void show_regs(struct pt_regs *regs)
 {
        __show_registers(regs, 1);
-       show_trace(NULL, regs, &regs->sp);
+       show_trace(NULL, regs, &regs->sp, regs->bp);
 }
 
 /*
@@ -522,73 +538,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
        return err;
 }
 
-/*
- * fill in the user structure for a core dump..
- */
-void dump_thread(struct pt_regs * regs, struct user * dump)
-{
-       u16 gs;
-
-/* changed the size calculations - should hopefully work better. lbt */
-       dump->magic = CMAGIC;
-       dump->start_code = 0;
-       dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
-       dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
-       dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
-       dump->u_dsize -= dump->u_tsize;
-       dump->u_ssize = 0;
-       dump->u_debugreg[0] = current->thread.debugreg0;
-       dump->u_debugreg[1] = current->thread.debugreg1;
-       dump->u_debugreg[2] = current->thread.debugreg2;
-       dump->u_debugreg[3] = current->thread.debugreg3;
-       dump->u_debugreg[4] = 0;
-       dump->u_debugreg[5] = 0;
-       dump->u_debugreg[6] = current->thread.debugreg6;
-       dump->u_debugreg[7] = current->thread.debugreg7;
-
-       if (dump->start_stack < TASK_SIZE)
-               dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
-
-       dump->regs.bx = regs->bx;
-       dump->regs.cx = regs->cx;
-       dump->regs.dx = regs->dx;
-       dump->regs.si = regs->si;
-       dump->regs.di = regs->di;
-       dump->regs.bp = regs->bp;
-       dump->regs.ax = regs->ax;
-       dump->regs.ds = (u16)regs->ds;
-       dump->regs.es = (u16)regs->es;
-       dump->regs.fs = (u16)regs->fs;
-       savesegment(gs,gs);
-       dump->regs.orig_ax = regs->orig_ax;
-       dump->regs.ip = regs->ip;
-       dump->regs.cs = (u16)regs->cs;
-       dump->regs.flags = regs->flags;
-       dump->regs.sp = regs->sp;
-       dump->regs.ss = (u16)regs->ss;
-
-       dump->u_fpvalid = dump_fpu (regs, &dump->i387);
-}
-EXPORT_SYMBOL(dump_thread);
-
-/*
- * Capture the user space registers if the task is not running (in user space)
- */
-int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
-{
-       struct pt_regs ptregs = *task_pt_regs(tsk);
-       ptregs.cs &= 0xffff;
-       ptregs.ds &= 0xffff;
-       ptregs.es &= 0xffff;
-       ptregs.ss &= 0xffff;
-
-       elf_core_copy_regs(regs, &ptregs);
-
-       return 1;
-}
-
 #ifdef CONFIG_SECCOMP
-void hard_disable_TSC(void)
+static void hard_disable_TSC(void)
 {
        write_cr4(read_cr4() | X86_CR4_TSD);
 }
@@ -603,7 +554,7 @@ void disable_TSC(void)
                hard_disable_TSC();
        preempt_enable();
 }
-void hard_enable_TSC(void)
+static void hard_enable_TSC(void)
 {
        write_cr4(read_cr4() & ~X86_CR4_TSD);
 }
@@ -652,11 +603,13 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
        }
 #endif
 
+#ifdef X86_BTS
        if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
                ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
 
        if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
                ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
+#endif
 
 
        if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
@@ -716,7 +669,7 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
  * the task-switch, and shows up in ret_from_fork in entry.S,
  * for example.
  */
-struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 {
        struct thread_struct *prev = &prev_p->thread,
                                 *next = &next_p->thread;