]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched.c
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[linux-2.6-omap-h63xx.git] / kernel / sched.c
index 4d46e90f59c32fcbe88f320af11794c43af9e55a..7ffaabd64f893863bd485bd32b8f77e672cec9be 100644 (file)
@@ -237,6 +237,7 @@ struct runqueue {
 
        task_t *migration_thread;
        struct list_head migration_queue;
+       int cpu;
 #endif
 
 #ifdef CONFIG_SCHEDSTATS
@@ -706,12 +707,6 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
                                p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
                                                DEF_TIMESLICE);
                } else {
-                       /*
-                        * The lower the sleep avg a task has the more
-                        * rapidly it will rise with sleep time.
-                        */
-                       sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
-
                        /*
                         * Tasks waking from uninterruptible sleep are
                         * limited in their sleep_avg rise as they
@@ -1660,6 +1655,9 @@ unsigned long nr_iowait(void)
 /*
  * double_rq_lock - safely lock two runqueues
  *
+ * We must take them in cpu order to match code in
+ * dependent_sleeper and wake_dependent_sleeper.
+ *
  * Note this does not disable interrupts like task_rq_lock,
  * you need to do so manually before calling.
  */
@@ -1671,7 +1669,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
                spin_lock(&rq1->lock);
                __acquire(rq2->lock);   /* Fake it out ;) */
        } else {
-               if (rq1 < rq2) {
+               if (rq1->cpu < rq2->cpu) {
                        spin_lock(&rq1->lock);
                        spin_lock(&rq2->lock);
                } else {
@@ -1707,7 +1705,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
        __acquires(this_rq->lock)
 {
        if (unlikely(!spin_trylock(&busiest->lock))) {
-               if (busiest < this_rq) {
+               if (busiest->cpu < this_rq->cpu) {
                        spin_unlock(&this_rq->lock);
                        spin_lock(&busiest->lock);
                        spin_lock(&this_rq->lock);
@@ -2875,7 +2873,7 @@ asmlinkage void __sched schedule(void)
         */
        if (likely(!current->exit_state)) {
                if (unlikely(in_atomic())) {
-                       printk(KERN_ERR "scheduling while atomic: "
+                       printk(KERN_ERR "BUG: scheduling while atomic: "
                                "%s/0x%08x/%d\n",
                                current->comm, preempt_count(), current->pid);
                        dump_stack();
@@ -6035,6 +6033,7 @@ void __init sched_init(void)
                rq->push_cpu = 0;
                rq->migration_thread = NULL;
                INIT_LIST_HEAD(&rq->migration_queue);
+               rq->cpu = i;
 #endif
                atomic_set(&rq->nr_iowait, 0);
 
@@ -6075,7 +6074,7 @@ void __might_sleep(char *file, int line)
                if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
                        return;
                prev_jiffy = jiffies;
-               printk(KERN_ERR "Debug: sleeping function called from invalid"
+               printk(KERN_ERR "BUG: sleeping function called from invalid"
                                " context at %s:%d\n", file, line);
                printk("in_atomic():%d, irqs_disabled():%d\n",
                        in_atomic(), irqs_disabled());