]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched.c
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[linux-2.6-omap-h63xx.git] / kernel / sched.c
index 66d957227de9c847953961224828eff723e71057..7ffaabd64f893863bd485bd32b8f77e672cec9be 100644 (file)
@@ -178,13 +178,6 @@ static unsigned int task_timeslice(task_t *p)
 #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran)      \
                                < (long long) (sd)->cache_hot_time)
 
-void __put_task_struct_cb(struct rcu_head *rhp)
-{
-       __put_task_struct(container_of(rhp, struct task_struct, rcu));
-}
-
-EXPORT_SYMBOL_GPL(__put_task_struct_cb);
-
 /*
  * These are the runqueue data structures:
  */
@@ -244,6 +237,7 @@ struct runqueue {
 
        task_t *migration_thread;
        struct list_head migration_queue;
+       int cpu;
 #endif
 
 #ifdef CONFIG_SCHEDSTATS
@@ -713,12 +707,6 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
                                p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
                                                DEF_TIMESLICE);
                } else {
-                       /*
-                        * The lower the sleep avg a task has the more
-                        * rapidly it will rise with sleep time.
-                        */
-                       sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
-
                        /*
                         * Tasks waking from uninterruptible sleep are
                         * limited in their sleep_avg rise as they
@@ -1667,6 +1655,9 @@ unsigned long nr_iowait(void)
 /*
  * double_rq_lock - safely lock two runqueues
  *
+ * We must take them in cpu order to match code in
+ * dependent_sleeper and wake_dependent_sleeper.
+ *
  * Note this does not disable interrupts like task_rq_lock,
  * you need to do so manually before calling.
  */
@@ -1678,7 +1669,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
                spin_lock(&rq1->lock);
                __acquire(rq2->lock);   /* Fake it out ;) */
        } else {
-               if (rq1 < rq2) {
+               if (rq1->cpu < rq2->cpu) {
                        spin_lock(&rq1->lock);
                        spin_lock(&rq2->lock);
                } else {
@@ -1714,7 +1705,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
        __acquires(this_rq->lock)
 {
        if (unlikely(!spin_trylock(&busiest->lock))) {
-               if (busiest < this_rq) {
+               if (busiest->cpu < this_rq->cpu) {
                        spin_unlock(&this_rq->lock);
                        spin_lock(&busiest->lock);
                        spin_lock(&this_rq->lock);
@@ -2882,7 +2873,7 @@ asmlinkage void __sched schedule(void)
         */
        if (likely(!current->exit_state)) {
                if (unlikely(in_atomic())) {
-                       printk(KERN_ERR "scheduling while atomic: "
+                       printk(KERN_ERR "BUG: scheduling while atomic: "
                                "%s/0x%08x/%d\n",
                                current->comm, preempt_count(), current->pid);
                        dump_stack();
@@ -4028,6 +4019,8 @@ static inline void __cond_resched(void)
         */
        if (unlikely(preempt_count()))
                return;
+       if (unlikely(system_state != SYSTEM_RUNNING))
+               return;
        do {
                add_preempt_count(PREEMPT_ACTIVE);
                schedule();
@@ -4333,6 +4326,7 @@ void __devinit init_idle(task_t *idle, int cpu)
        runqueue_t *rq = cpu_rq(cpu);
        unsigned long flags;
 
+       idle->timestamp = sched_clock();
        idle->sleep_avg = 0;
        idle->array = NULL;
        idle->prio = MAX_PRIO;
@@ -5058,7 +5052,18 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
 #define MAX_DOMAIN_DISTANCE 32
 
 static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] =
-               { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] = -1LL };
+               { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] =
+/*
+ * Architectures may override the migration cost and thus avoid
+ * boot-time calibration. Unit is nanoseconds. Mostly useful for
+ * virtualized hardware:
+ */
+#ifdef CONFIG_DEFAULT_MIGRATION_COST
+                       CONFIG_DEFAULT_MIGRATION_COST
+#else
+                       -1LL
+#endif
+};
 
 /*
  * Allow override of migration cost - in units of microseconds.
@@ -6028,6 +6033,7 @@ void __init sched_init(void)
                rq->push_cpu = 0;
                rq->migration_thread = NULL;
                INIT_LIST_HEAD(&rq->migration_queue);
+               rq->cpu = i;
 #endif
                atomic_set(&rq->nr_iowait, 0);
 
@@ -6068,7 +6074,7 @@ void __might_sleep(char *file, int line)
                if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
                        return;
                prev_jiffy = jiffies;
-               printk(KERN_ERR "Debug: sleeping function called from invalid"
+               printk(KERN_ERR "BUG: sleeping function called from invalid"
                                " context at %s:%d\n", file, line);
                printk("in_atomic():%d, irqs_disabled():%d\n",
                        in_atomic(), irqs_disabled());