]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched.c
[PATCH] sched: fix the all pinned logic in load_balance_newidle()
[linux-2.6-omap-h63xx.git] / kernel / sched.c
index cb31fb4a1379e23b0628795d5e1bfd64c6031bf0..a35a92ff38fdda8dc02049b90274e80e1b729f65 100644 (file)
@@ -301,7 +301,7 @@ struct rq {
        struct lock_class_key rq_lock_key;
 };
 
-static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 static DEFINE_MUTEX(sched_hotcpu_mutex);
 
 static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
@@ -2235,7 +2235,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
 
                        rq = cpu_rq(i);
 
-                       if (*sd_idle && !idle_cpu(i))
+                       if (*sd_idle && rq->nr_running)
                                *sd_idle = 0;
 
                        /* Bias balancing toward cpus of our domain */
@@ -2257,9 +2257,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
                /*
                 * First idle cpu or the first cpu(busiest) in this sched group
                 * is eligible for doing load balancing at this and above
-                * domains.
+                * domains. In the newly idle case, we will allow all the cpu's
+                * to do the newly idle load balance.
                 */
-               if (local_group && balance_cpu != this_cpu && balance) {
+               if (idle != CPU_NEWLY_IDLE && local_group &&
+                   balance_cpu != this_cpu && balance) {
                        *balance = 0;
                        goto ret;
                }
@@ -2677,6 +2679,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
        unsigned long imbalance;
        int nr_moved = 0;
        int sd_idle = 0;
+       int all_pinned = 0;
        cpumask_t cpus = CPU_MASK_ALL;
 
        /*
@@ -2715,10 +2718,11 @@ redo:
                double_lock_balance(this_rq, busiest);
                nr_moved = move_tasks(this_rq, this_cpu, busiest,
                                        minus_1_or_zero(busiest->nr_running),
-                                       imbalance, sd, CPU_NEWLY_IDLE, NULL);
+                                       imbalance, sd, CPU_NEWLY_IDLE,
+                                       &all_pinned);
                spin_unlock(&busiest->lock);
 
-               if (!nr_moved) {
+               if (unlikely(all_pinned)) {
                        cpu_clear(cpu_of(busiest), cpus);
                        if (!cpus_empty(cpus))
                                goto redo;