]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched.c
sched: do not do cond_resched() when CONFIG_PREEMPT
[linux-2.6-omap-h63xx.git] / kernel / sched.c
index 7f827b70ae027ae2783d4169393e978c6b6e7596..6ee37602a6d832ff6c4c26179b0328b6f5ff625e 100644 (file)
@@ -22,6 +22,8 @@
  *              by Peter Williams
  *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
  *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
+ *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
+ *              Thomas Gleixner, Mike Kravetz
  */
 
 #include <linux/mm.h>
@@ -96,10 +98,9 @@ unsigned long long __attribute__((weak)) sched_clock(void)
 #define MAX_USER_PRIO          (USER_PRIO(MAX_PRIO))
 
 /*
- * Some helpers for converting nanosecond timing to jiffy resolution
+ * Helpers for converting nanosecond timing to jiffy resolution
  */
 #define NS_TO_JIFFIES(TIME)    ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
-#define JIFFIES_TO_NS(TIME)    ((TIME) * (NSEC_PER_SEC / HZ))
 
 #define NICE_0_LOAD            SCHED_LOAD_SCALE
 #define NICE_0_SHIFT           SCHED_LOAD_SHIFT
@@ -168,9 +169,43 @@ struct task_group {
        struct sched_entity **se;
        /* runqueue "owned" by this group on each cpu */
        struct cfs_rq **cfs_rq;
+
+       /*
+        * shares assigned to a task group governs how much of cpu bandwidth
+        * is allocated to the group. The more shares a group has, the more is
+        * the cpu bandwidth allocated to it.
+        *
+        * For ex, lets say that there are three task groups, A, B and C which
+        * have been assigned shares 1000, 2000 and 3000 respectively. Then,
+        * cpu bandwidth allocated by the scheduler to task groups A, B and C
+        * should be:
+        *
+        *      Bw(A) = 1000/(1000+2000+3000) * 100 = 16.66%
+        *      Bw(B) = 2000/(1000+2000+3000) * 100 = 33.33%
+        *      Bw(C) = 3000/(1000+2000+3000) * 100 = 50%
+        *
+        * The weight assigned to a task group's schedulable entities on every
+        * cpu (task_group.se[a_cpu]->load.weight) is derived from the task
+        * group's shares. For ex: lets say that task group A has been
+        * assigned shares of 1000 and there are two CPUs in a system. Then,
+        *
+        *  tg_A->se[0]->load.weight = tg_A->se[1]->load.weight = 1000;
+        *
+        * Note: It's not necessary that each of a task's group schedulable
+        *       entity have the same weight on all CPUs. If the group
+        *       has 2 of its tasks on CPU0 and 1 task on CPU1, then a
+        *       better distribution of weight could be:
+        *
+        *      tg_A->se[0]->load.weight = 2/3 * 2000 = 1333
+        *      tg_A->se[1]->load.weight = 1/2 * 2000 =  667
+        *
+        * rebalance_shares() is responsible for distributing the shares of a
+        * task groups like this among the group's schedulable entities across
+        * cpus.
+        *
+        */
        unsigned long shares;
-       /* spinlock to serialize modification to shares */
-       spinlock_t lock;
+
        struct rcu_head rcu;
 };
 
@@ -182,20 +217,38 @@ static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
 static struct sched_entity *init_sched_entity_p[NR_CPUS];
 static struct cfs_rq *init_cfs_rq_p[NR_CPUS];
 
+/* task_group_mutex serializes add/remove of task groups and also changes to
+ * a task group's cpu shares.
+ */
+static DEFINE_MUTEX(task_group_mutex);
+
+/* doms_cur_mutex serializes access to doms_cur[] array */
+static DEFINE_MUTEX(doms_cur_mutex);
+
+#ifdef CONFIG_SMP
+/* kernel thread that runs rebalance_shares() periodically */
+static struct task_struct *lb_monitor_task;
+static int load_balance_monitor(void *unused);
+#endif
+
+static void set_se_shares(struct sched_entity *se, unsigned long shares);
+
 /* Default task group.
  *     Every task in system belong to this group at bootup.
  */
 struct task_group init_task_group = {
-       .se     = init_sched_entity_p,
+       .se     = init_sched_entity_p,
        .cfs_rq = init_cfs_rq_p,
 };
 
 #ifdef CONFIG_FAIR_USER_SCHED
-# define INIT_TASK_GROUP_LOAD  2*NICE_0_LOAD
+# define INIT_TASK_GROUP_LOAD  (2*NICE_0_LOAD)
 #else
 # define INIT_TASK_GROUP_LOAD  NICE_0_LOAD
 #endif
 
+#define MIN_GROUP_SHARES       2
+
 static int init_task_group_load = INIT_TASK_GROUP_LOAD;
 
 /* return group to which a task belongs */
@@ -221,9 +274,33 @@ static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu)
        p->se.parent = task_group(p)->se[cpu];
 }
 
+static inline void lock_task_group_list(void)
+{
+       mutex_lock(&task_group_mutex);
+}
+
+static inline void unlock_task_group_list(void)
+{
+       mutex_unlock(&task_group_mutex);
+}
+
+static inline void lock_doms_cur(void)
+{
+       mutex_lock(&doms_cur_mutex);
+}
+
+static inline void unlock_doms_cur(void)
+{
+       mutex_unlock(&doms_cur_mutex);
+}
+
 #else
 
 static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { }
+static inline void lock_task_group_list(void) { }
+static inline void unlock_task_group_list(void) { }
+static inline void lock_doms_cur(void) { }
+static inline void unlock_doms_cur(void) { }
 
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
@@ -266,8 +343,44 @@ struct rt_rq {
        struct rt_prio_array active;
        int rt_load_balance_idx;
        struct list_head *rt_load_balance_head, *rt_load_balance_curr;
+       unsigned long rt_nr_running;
+       unsigned long rt_nr_migratory;
+       /* highest queued rt task prio */
+       int highest_prio;
+       int overloaded;
+};
+
+#ifdef CONFIG_SMP
+
+/*
+ * We add the notion of a root-domain which will be used to define per-domain
+ * variables. Each exclusive cpuset essentially defines an island domain by
+ * fully partitioning the member cpus from any other cpuset. Whenever a new
+ * exclusive cpuset is created, we also create and attach a new root-domain
+ * object.
+ *
+ */
+struct root_domain {
+       atomic_t refcount;
+       cpumask_t span;
+       cpumask_t online;
+
+       /*
+        * The "RT overload" flag: it gets set if a CPU has more than
+        * one runnable RT task.
+        */
+       cpumask_t rto_mask;
+       atomic_t rto_count;
 };
 
+/*
+ * By default the system creates a single root-domain with all cpus as
+ * members (mimicking the global state we have today).
+ */
+static struct root_domain def_root_domain;
+
+#endif
+
 /*
  * This is the main, per-CPU runqueue data structure.
  *
@@ -325,6 +438,7 @@ struct rq {
        atomic_t nr_iowait;
 
 #ifdef CONFIG_SMP
+       struct root_domain *rd;
        struct sched_domain *sd;
 
        /* For active balancing */
@@ -363,7 +477,6 @@ struct rq {
 };
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-static DEFINE_MUTEX(sched_hotcpu_mutex);
 
 static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
 {
@@ -871,6 +984,23 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
 static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
 #endif
 
+static inline void inc_cpu_load(struct rq *rq, unsigned long load)
+{
+       update_load_add(&rq->load, load);
+}
+
+static inline void dec_cpu_load(struct rq *rq, unsigned long load)
+{
+       update_load_sub(&rq->load, load);
+}
+
+#ifdef CONFIG_SMP
+static unsigned long source_load(int cpu, int type);
+static unsigned long target_load(int cpu, int type);
+static unsigned long cpu_avg_load_per_task(int cpu);
+static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
+#endif /* CONFIG_SMP */
+
 #include "sched_stats.h"
 #include "sched_idletask.c"
 #include "sched_fair.c"
@@ -881,26 +1011,14 @@ static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
 
 #define sched_class_highest (&rt_sched_class)
 
-static inline void inc_load(struct rq *rq, const struct task_struct *p)
-{
-       update_load_add(&rq->load, p->se.load.weight);
-}
-
-static inline void dec_load(struct rq *rq, const struct task_struct *p)
-{
-       update_load_sub(&rq->load, p->se.load.weight);
-}
-
 static void inc_nr_running(struct task_struct *p, struct rq *rq)
 {
        rq->nr_running++;
-       inc_load(rq, p);
 }
 
 static void dec_nr_running(struct task_struct *p, struct rq *rq)
 {
        rq->nr_running--;
-       dec_load(rq, p);
 }
 
 static void set_load_weight(struct task_struct *p)
@@ -1036,12 +1154,24 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
 #endif
 }
 
+static inline void check_class_changed(struct rq *rq, struct task_struct *p,
+                                      const struct sched_class *prev_class,
+                                      int oldprio, int running)
+{
+       if (prev_class != p->sched_class) {
+               if (prev_class->switched_from)
+                       prev_class->switched_from(rq, p, running);
+               p->sched_class->switched_to(rq, p, running);
+       } else
+               p->sched_class->prio_changed(rq, p, oldprio, running);
+}
+
 #ifdef CONFIG_SMP
 
 /*
  * Is this task likely cache-hot:
  */
-static inline int
+static int
 task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
 {
        s64 delta;
@@ -1266,7 +1396,7 @@ static unsigned long target_load(int cpu, int type)
 /*
  * Return the average load per task on the cpu's run queue
  */
-static inline unsigned long cpu_avg_load_per_task(int cpu)
+static unsigned long cpu_avg_load_per_task(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
        unsigned long total = weighted_cpuload(cpu);
@@ -1423,58 +1553,6 @@ static int sched_balance_self(int cpu, int flag)
 
 #endif /* CONFIG_SMP */
 
-/*
- * wake_idle() will wake a task on an idle cpu if task->cpu is
- * not idle and an idle cpu is available.  The span of cpus to
- * search starts with cpus closest then further out as needed,
- * so we always favor a closer, idle cpu.
- *
- * Returns the CPU we should wake onto.
- */
-#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
-static int wake_idle(int cpu, struct task_struct *p)
-{
-       cpumask_t tmp;
-       struct sched_domain *sd;
-       int i;
-
-       /*
-        * If it is idle, then it is the best cpu to run this task.
-        *
-        * This cpu is also the best, if it has more than one task already.
-        * Siblings must be also busy(in most cases) as they didn't already
-        * pickup the extra load from this cpu and hence we need not check
-        * sibling runqueue info. This will avoid the checks and cache miss
-        * penalities associated with that.
-        */
-       if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1)
-               return cpu;
-
-       for_each_domain(cpu, sd) {
-               if (sd->flags & SD_WAKE_IDLE) {
-                       cpus_and(tmp, sd->span, p->cpus_allowed);
-                       for_each_cpu_mask(i, tmp) {
-                               if (idle_cpu(i)) {
-                                       if (i != task_cpu(p)) {
-                                               schedstat_inc(p,
-                                                       se.nr_wakeups_idle);
-                                       }
-                                       return i;
-                               }
-                       }
-               } else {
-                       break;
-               }
-       }
-       return cpu;
-}
-#else
-static inline int wake_idle(int cpu, struct task_struct *p)
-{
-       return cpu;
-}
-#endif
-
 /***
  * try_to_wake_up - wake up a thread
  * @p: the to-be-woken-up thread
@@ -1495,11 +1573,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
        unsigned long flags;
        long old_state;
        struct rq *rq;
-#ifdef CONFIG_SMP
-       struct sched_domain *sd, *this_sd = NULL;
-       unsigned long load, this_load;
-       int new_cpu;
-#endif
 
        rq = task_rq_lock(p, &flags);
        old_state = p->state;
@@ -1517,92 +1590,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
        if (unlikely(task_running(rq, p)))
                goto out_activate;
 
-       new_cpu = cpu;
-
-       schedstat_inc(rq, ttwu_count);
-       if (cpu == this_cpu) {
-               schedstat_inc(rq, ttwu_local);
-               goto out_set_cpu;
-       }
-
-       for_each_domain(this_cpu, sd) {
-               if (cpu_isset(cpu, sd->span)) {
-                       schedstat_inc(sd, ttwu_wake_remote);
-                       this_sd = sd;
-                       break;
-               }
-       }
-
-       if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
-               goto out_set_cpu;
-
-       /*
-        * Check for affine wakeup and passive balancing possibilities.
-        */
-       if (this_sd) {
-               int idx = this_sd->wake_idx;
-               unsigned int imbalance;
-
-               imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
-
-               load = source_load(cpu, idx);
-               this_load = target_load(this_cpu, idx);
-
-               new_cpu = this_cpu; /* Wake to this CPU if we can */
-
-               if (this_sd->flags & SD_WAKE_AFFINE) {
-                       unsigned long tl = this_load;
-                       unsigned long tl_per_task;
-
-                       /*
-                        * Attract cache-cold tasks on sync wakeups:
-                        */
-                       if (sync && !task_hot(p, rq->clock, this_sd))
-                               goto out_set_cpu;
-
-                       schedstat_inc(p, se.nr_wakeups_affine_attempts);
-                       tl_per_task = cpu_avg_load_per_task(this_cpu);
-
-                       /*
-                        * If sync wakeup then subtract the (maximum possible)
-                        * effect of the currently running task from the load
-                        * of the current CPU:
-                        */
-                       if (sync)
-                               tl -= current->se.load.weight;
-
-                       if ((tl <= load &&
-                               tl + target_load(cpu, idx) <= tl_per_task) ||
-                              100*(tl + p->se.load.weight) <= imbalance*load) {
-                               /*
-                                * This domain has SD_WAKE_AFFINE and
-                                * p is cache cold in this domain, and
-                                * there is no bad imbalance.
-                                */
-                               schedstat_inc(this_sd, ttwu_move_affine);
-                               schedstat_inc(p, se.nr_wakeups_affine);
-                               goto out_set_cpu;
-                       }
-               }
-
-               /*
-                * Start passive balancing when half the imbalance_pct
-                * limit is reached.
-                */
-               if (this_sd->flags & SD_WAKE_BALANCE) {
-                       if (imbalance*this_load <= 100*load) {
-                               schedstat_inc(this_sd, ttwu_move_balance);
-                               schedstat_inc(p, se.nr_wakeups_passive);
-                               goto out_set_cpu;
-                       }
-               }
-       }
-
-       new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
-out_set_cpu:
-       new_cpu = wake_idle(new_cpu, p);
-       if (new_cpu != cpu) {
-               set_task_cpu(p, new_cpu);
+       cpu = p->sched_class->select_task_rq(p, sync);
+       if (cpu != orig_cpu) {
+               set_task_cpu(p, cpu);
                task_rq_unlock(rq, &flags);
                /* might preempt at this point */
                rq = task_rq_lock(p, &flags);
@@ -1616,6 +1606,21 @@ out_set_cpu:
                cpu = task_cpu(p);
        }
 
+#ifdef CONFIG_SCHEDSTATS
+       schedstat_inc(rq, ttwu_count);
+       if (cpu == this_cpu)
+               schedstat_inc(rq, ttwu_local);
+       else {
+               struct sched_domain *sd;
+               for_each_domain(this_cpu, sd) {
+                       if (cpu_isset(cpu, sd->span)) {
+                               schedstat_inc(sd, ttwu_wake_remote);
+                               break;
+                       }
+               }
+       }
+#endif
+
 out_activate:
 #endif /* CONFIG_SMP */
        schedstat_inc(p, se.nr_wakeups);
@@ -1634,6 +1639,10 @@ out_activate:
 
 out_running:
        p->state = TASK_RUNNING;
+#ifdef CONFIG_SMP
+       if (p->sched_class->task_wake_up)
+               p->sched_class->task_wake_up(rq, p);
+#endif
 out:
        task_rq_unlock(rq, &flags);
 
@@ -1676,7 +1685,7 @@ static void __sched_fork(struct task_struct *p)
        p->se.wait_max                  = 0;
 #endif
 
-       INIT_LIST_HEAD(&p->run_list);
+       INIT_LIST_HEAD(&p->rt.run_list);
        p->se.on_rq = 0;
 
 #ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -1756,6 +1765,10 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
                inc_nr_running(p, rq);
        }
        check_preempt_curr(rq, p);
+#ifdef CONFIG_SMP
+       if (p->sched_class->task_wake_up)
+               p->sched_class->task_wake_up(rq, p);
+#endif
        task_rq_unlock(rq, &flags);
 }
 
@@ -1876,6 +1889,11 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
        prev_state = prev->state;
        finish_arch_switch(prev);
        finish_lock_switch(rq, prev);
+#ifdef CONFIG_SMP
+       if (current->sched_class->post_schedule)
+               current->sched_class->post_schedule(rq);
+#endif
+
        fire_sched_in_preempt_notifiers(current);
        if (mm)
                mmdrop(mm);
@@ -2109,11 +2127,13 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
 /*
  * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
  */
-static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
+static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
        __releases(this_rq->lock)
        __acquires(busiest->lock)
        __acquires(this_rq->lock)
 {
+       int ret = 0;
+
        if (unlikely(!irqs_disabled())) {
                /* printk() doesn't work good under rq->lock */
                spin_unlock(&this_rq->lock);
@@ -2124,9 +2144,11 @@ static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
                        spin_unlock(&this_rq->lock);
                        spin_lock(&busiest->lock);
                        spin_lock(&this_rq->lock);
+                       ret = 1;
                } else
                        spin_lock(&busiest->lock);
        }
+       return ret;
 }
 
 /*
@@ -3639,6 +3661,11 @@ need_resched_nonpreemptible:
                switch_count = &prev->nvcsw;
        }
 
+#ifdef CONFIG_SMP
+       if (prev->sched_class->pre_schedule)
+               prev->sched_class->pre_schedule(rq, prev);
+#endif
+
        if (unlikely(!rq->nr_running))
                idle_balance(cpu, rq);
 
@@ -4004,6 +4031,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        unsigned long flags;
        int oldprio, on_rq, running;
        struct rq *rq;
+       const struct sched_class *prev_class = p->sched_class;
 
        BUG_ON(prio < 0 || prio > MAX_PRIO);
 
@@ -4029,18 +4057,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        if (on_rq) {
                if (running)
                        p->sched_class->set_curr_task(rq);
+
                enqueue_task(rq, p, 0);
-               /*
-                * Reschedule if we are currently running on this runqueue and
-                * our priority decreased, or if we are not currently running on
-                * this runqueue and our priority is higher than the current's
-                */
-               if (running) {
-                       if (p->prio > oldprio)
-                               resched_task(rq->curr);
-               } else {
-                       check_preempt_curr(rq, p);
-               }
+
+               check_class_changed(rq, p, prev_class, oldprio, running);
        }
        task_rq_unlock(rq, &flags);
 }
@@ -4072,10 +4092,8 @@ void set_user_nice(struct task_struct *p, long nice)
                goto out_unlock;
        }
        on_rq = p->se.on_rq;
-       if (on_rq) {
+       if (on_rq)
                dequeue_task(rq, p, 0);
-               dec_load(rq, p);
-       }
 
        p->static_prio = NICE_TO_PRIO(nice);
        set_load_weight(p);
@@ -4085,7 +4103,6 @@ void set_user_nice(struct task_struct *p, long nice)
 
        if (on_rq) {
                enqueue_task(rq, p, 0);
-               inc_load(rq, p);
                /*
                 * If the task increased its priority or is running and
                 * lowered its priority, then reschedule its CPU:
@@ -4243,6 +4260,7 @@ int sched_setscheduler(struct task_struct *p, int policy,
 {
        int retval, oldprio, oldpolicy = -1, on_rq, running;
        unsigned long flags;
+       const struct sched_class *prev_class = p->sched_class;
        struct rq *rq;
 
        /* may grab non-irq protected spin_locks */
@@ -4336,18 +4354,10 @@ recheck:
        if (on_rq) {
                if (running)
                        p->sched_class->set_curr_task(rq);
+
                activate_task(rq, p, 0);
-               /*
-                * Reschedule if we are currently running on this runqueue and
-                * our priority decreased, or if we are not currently running on
-                * this runqueue and our priority is higher than the current's
-                */
-               if (running) {
-                       if (p->prio > oldprio)
-                               resched_task(rq->curr);
-               } else {
-                       check_preempt_curr(rq, p);
-               }
+
+               check_class_changed(rq, p, prev_class, oldprio, running);
        }
        __task_rq_unlock(rq);
        spin_unlock_irqrestore(&p->pi_lock, flags);
@@ -4475,13 +4485,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
        struct task_struct *p;
        int retval;
 
-       mutex_lock(&sched_hotcpu_mutex);
+       get_online_cpus();
        read_lock(&tasklist_lock);
 
        p = find_process_by_pid(pid);
        if (!p) {
                read_unlock(&tasklist_lock);
-               mutex_unlock(&sched_hotcpu_mutex);
+               put_online_cpus();
                return -ESRCH;
        }
 
@@ -4521,7 +4531,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
        }
 out_unlock:
        put_task_struct(p);
-       mutex_unlock(&sched_hotcpu_mutex);
+       put_online_cpus();
        return retval;
 }
 
@@ -4578,7 +4588,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
        struct task_struct *p;
        int retval;
 
-       mutex_lock(&sched_hotcpu_mutex);
+       get_online_cpus();
        read_lock(&tasklist_lock);
 
        retval = -ESRCH;
@@ -4594,7 +4604,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
 
 out_unlock:
        read_unlock(&tasklist_lock);
-       mutex_unlock(&sched_hotcpu_mutex);
+       put_online_cpus();
 
        return retval;
 }
@@ -4668,7 +4678,8 @@ static void __cond_resched(void)
        } while (need_resched());
 }
 
-int __sched cond_resched(void)
+#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
+int __sched _cond_resched(void)
 {
        if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
                                        system_state == SYSTEM_RUNNING) {
@@ -4677,7 +4688,8 @@ int __sched cond_resched(void)
        }
        return 0;
 }
-EXPORT_SYMBOL(cond_resched);
+EXPORT_SYMBOL(_cond_resched);
+#endif
 
 /*
  * cond_resched_lock() - if a reschedule is pending, drop the given lock,
@@ -4875,7 +4887,7 @@ out_unlock:
 
 static const char stat_nam[] = "RSDTtZX";
 
-static void show_task(struct task_struct *p)
+void sched_show_task(struct task_struct *p)
 {
        unsigned long free = 0;
        unsigned state;
@@ -4928,7 +4940,7 @@ void show_state_filter(unsigned long state_filter)
                 */
                touch_nmi_watchdog();
                if (!state_filter || (p->state & state_filter))
-                       show_task(p);
+                       sched_show_task(p);
        } while_each_thread(g, p);
 
        touch_all_softlockup_watchdogs();
@@ -5062,7 +5074,13 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
                goto out;
        }
 
-       p->cpus_allowed = new_mask;
+       if (p->sched_class->set_cpus_allowed)
+               p->sched_class->set_cpus_allowed(p, &new_mask);
+       else {
+               p->cpus_allowed = new_mask;
+               p->nr_cpus_allowed = cpus_weight(new_mask);
+       }
+
        /* Can the task run on the task's current CPU? If so, we're done */
        if (cpu_isset(task_cpu(p), new_mask))
                goto out;
@@ -5554,9 +5572,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
        struct rq *rq;
 
        switch (action) {
-       case CPU_LOCK_ACQUIRE:
-               mutex_lock(&sched_hotcpu_mutex);
-               break;
 
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
@@ -5575,6 +5590,15 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
        case CPU_ONLINE_FROZEN:
                /* Strictly unnecessary, as first user will wake it. */
                wake_up_process(cpu_rq(cpu)->migration_thread);
+
+               /* Update our root-domain */
+               rq = cpu_rq(cpu);
+               spin_lock_irqsave(&rq->lock, flags);
+               if (rq->rd) {
+                       BUG_ON(!cpu_isset(cpu, rq->rd->span));
+                       cpu_set(cpu, rq->rd->online);
+               }
+               spin_unlock_irqrestore(&rq->lock, flags);
                break;
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -5625,10 +5649,18 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                }
                spin_unlock_irq(&rq->lock);
                break;
-#endif
-       case CPU_LOCK_RELEASE:
-               mutex_unlock(&sched_hotcpu_mutex);
+
+       case CPU_DOWN_PREPARE:
+               /* Update our root-domain */
+               rq = cpu_rq(cpu);
+               spin_lock_irqsave(&rq->lock, flags);
+               if (rq->rd) {
+                       BUG_ON(!cpu_isset(cpu, rq->rd->span));
+                       cpu_clear(cpu, rq->rd->online);
+               }
+               spin_unlock_irqrestore(&rq->lock, flags);
                break;
+#endif
        }
        return NOTIFY_OK;
 }
@@ -5816,11 +5848,76 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
        return 1;
 }
 
+static void rq_attach_root(struct rq *rq, struct root_domain *rd)
+{
+       unsigned long flags;
+       const struct sched_class *class;
+
+       spin_lock_irqsave(&rq->lock, flags);
+
+       if (rq->rd) {
+               struct root_domain *old_rd = rq->rd;
+
+               for (class = sched_class_highest; class; class = class->next) {
+                       if (class->leave_domain)
+                               class->leave_domain(rq);
+               }
+
+               cpu_clear(rq->cpu, old_rd->span);
+               cpu_clear(rq->cpu, old_rd->online);
+
+               if (atomic_dec_and_test(&old_rd->refcount))
+                       kfree(old_rd);
+       }
+
+       atomic_inc(&rd->refcount);
+       rq->rd = rd;
+
+       cpu_set(rq->cpu, rd->span);
+       if (cpu_isset(rq->cpu, cpu_online_map))
+               cpu_set(rq->cpu, rd->online);
+
+       for (class = sched_class_highest; class; class = class->next) {
+               if (class->join_domain)
+                       class->join_domain(rq);
+       }
+
+       spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+static void init_rootdomain(struct root_domain *rd)
+{
+       memset(rd, 0, sizeof(*rd));
+
+       cpus_clear(rd->span);
+       cpus_clear(rd->online);
+}
+
+static void init_defrootdomain(void)
+{
+       init_rootdomain(&def_root_domain);
+       atomic_set(&def_root_domain.refcount, 1);
+}
+
+static struct root_domain *alloc_rootdomain(void)
+{
+       struct root_domain *rd;
+
+       rd = kmalloc(sizeof(*rd), GFP_KERNEL);
+       if (!rd)
+               return NULL;
+
+       init_rootdomain(rd);
+
+       return rd;
+}
+
 /*
- * Attach the domain 'sd' to 'cpu' as its base domain.  Callers must
+ * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
  * hold the hotplug lock.
  */
-static void cpu_attach_domain(struct sched_domain *sd, int cpu)
+static void
+cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
        struct sched_domain *tmp;
@@ -5845,6 +5942,7 @@ static void cpu_attach_domain(struct sched_domain *sd, int cpu)
 
        sched_domain_debug(sd, cpu);
 
+       rq_attach_root(rq, rd);
        rcu_assign_pointer(rq->sd, sd);
 }
 
@@ -6213,6 +6311,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
 static int build_sched_domains(const cpumask_t *cpu_map)
 {
        int i;
+       struct root_domain *rd;
 #ifdef CONFIG_NUMA
        struct sched_group **sched_group_nodes = NULL;
        int sd_allnodes = 0;
@@ -6229,6 +6328,12 @@ static int build_sched_domains(const cpumask_t *cpu_map)
        sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
 #endif
 
+       rd = alloc_rootdomain();
+       if (!rd) {
+               printk(KERN_WARNING "Cannot alloc root domain\n");
+               return -ENOMEM;
+       }
+
        /*
         * Set up domains for cpus specified by the cpu_map.
         */
@@ -6445,7 +6550,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
 #else
                sd = &per_cpu(phys_domains, i);
 #endif
-               cpu_attach_domain(sd, i);
+               cpu_attach_domain(sd, rd, i);
        }
 
        return 0;
@@ -6503,7 +6608,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
        unregister_sched_domain_sysctl();
 
        for_each_cpu_mask(i, *cpu_map)
-               cpu_attach_domain(NULL, i);
+               cpu_attach_domain(NULL, &def_root_domain, i);
        synchronize_sched();
        arch_destroy_sched_domains(cpu_map);
 }
@@ -6533,6 +6638,8 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new)
 {
        int i, j;
 
+       lock_doms_cur();
+
        /* always unregister in case we don't destroy any domains */
        unregister_sched_domain_sysctl();
 
@@ -6573,6 +6680,8 @@ match2:
        ndoms_cur = ndoms_new;
 
        register_sched_domain_sysctl();
+
+       unlock_doms_cur();
 }
 
 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -6580,10 +6689,10 @@ static int arch_reinit_sched_domains(void)
 {
        int err;
 
-       mutex_lock(&sched_hotcpu_mutex);
+       get_online_cpus();
        detach_destroy_domains(&cpu_online_map);
        err = arch_init_sched_domains(&cpu_online_map);
-       mutex_unlock(&sched_hotcpu_mutex);
+       put_online_cpus();
 
        return err;
 }
@@ -6694,12 +6803,12 @@ void __init sched_init_smp(void)
 {
        cpumask_t non_isolated_cpus;
 
-       mutex_lock(&sched_hotcpu_mutex);
+       get_online_cpus();
        arch_init_sched_domains(&cpu_online_map);
        cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
        if (cpus_empty(non_isolated_cpus))
                cpu_set(smp_processor_id(), non_isolated_cpus);
-       mutex_unlock(&sched_hotcpu_mutex);
+       put_online_cpus();
        /* XXX: Theoretical race here - CPU may be hotplugged now */
        hotcpu_notifier(update_sched_domains, 0);
 
@@ -6707,6 +6816,21 @@ void __init sched_init_smp(void)
        if (set_cpus_allowed(current, non_isolated_cpus) < 0)
                BUG();
        sched_init_granularity();
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+       if (nr_cpu_ids == 1)
+               return;
+
+       lb_monitor_task = kthread_create(load_balance_monitor, NULL,
+                                        "group_balance");
+       if (!IS_ERR(lb_monitor_task)) {
+               lb_monitor_task->flags |= PF_NOFREEZE;
+               wake_up_process(lb_monitor_task);
+       } else {
+               printk(KERN_ERR "Could not create load balance monitor thread"
+                       "(error = %ld) \n", PTR_ERR(lb_monitor_task));
+       }
+#endif
 }
 #else
 void __init sched_init_smp(void)
@@ -6736,6 +6860,10 @@ void __init sched_init(void)
        int highest_cpu = 0;
        int i, j;
 
+#ifdef CONFIG_SMP
+       init_defrootdomain();
+#endif
+
        for_each_possible_cpu(i) {
                struct rt_prio_array *array;
                struct rq *rq;
@@ -6768,19 +6896,22 @@ void __init sched_init(void)
                        se->parent = NULL;
                }
                init_task_group.shares = init_task_group_load;
-               spin_lock_init(&init_task_group.lock);
 #endif
 
                for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
                        rq->cpu_load[j] = 0;
 #ifdef CONFIG_SMP
                rq->sd = NULL;
+               rq->rd = NULL;
                rq->active_balance = 0;
                rq->next_balance = jiffies;
                rq->push_cpu = 0;
                rq->cpu = i;
                rq->migration_thread = NULL;
                INIT_LIST_HEAD(&rq->migration_queue);
+               rq->rt.highest_prio = MAX_RT_PRIO;
+               rq->rt.overloaded = 0;
+               rq_attach_root(rq, &def_root_domain);
 #endif
                atomic_set(&rq->nr_iowait, 0);
 
@@ -6960,6 +7091,157 @@ void set_curr_task(int cpu, struct task_struct *p)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 
+#ifdef CONFIG_SMP
+/*
+ * distribute shares of all task groups among their schedulable entities,
+ * to reflect load distrbution across cpus.
+ */
+static int rebalance_shares(struct sched_domain *sd, int this_cpu)
+{
+       struct cfs_rq *cfs_rq;
+       struct rq *rq = cpu_rq(this_cpu);
+       cpumask_t sdspan = sd->span;
+       int balanced = 1;
+
+       /* Walk thr' all the task groups that we have */
+       for_each_leaf_cfs_rq(rq, cfs_rq) {
+               int i;
+               unsigned long total_load = 0, total_shares;
+               struct task_group *tg = cfs_rq->tg;
+
+               /* Gather total task load of this group across cpus */
+               for_each_cpu_mask(i, sdspan)
+                       total_load += tg->cfs_rq[i]->load.weight;
+
+               /* Nothing to do if this group has no load */
+               if (!total_load)
+                       continue;
+
+               /*
+                * tg->shares represents the number of cpu shares the task group
+                * is eligible to hold on a single cpu. On N cpus, it is
+                * eligible to hold (N * tg->shares) number of cpu shares.
+                */
+               total_shares = tg->shares * cpus_weight(sdspan);
+
+               /*
+                * redistribute total_shares across cpus as per the task load
+                * distribution.
+                */
+               for_each_cpu_mask(i, sdspan) {
+                       unsigned long local_load, local_shares;
+
+                       local_load = tg->cfs_rq[i]->load.weight;
+                       local_shares = (local_load * total_shares) / total_load;
+                       if (!local_shares)
+                               local_shares = MIN_GROUP_SHARES;
+                       if (local_shares == tg->se[i]->load.weight)
+                               continue;
+
+                       spin_lock_irq(&cpu_rq(i)->lock);
+                       set_se_shares(tg->se[i], local_shares);
+                       spin_unlock_irq(&cpu_rq(i)->lock);
+                       balanced = 0;
+               }
+       }
+
+       return balanced;
+}
+
+/*
+ * How frequently should we rebalance_shares() across cpus?
+ *
+ * The more frequently we rebalance shares, the more accurate is the fairness
+ * of cpu bandwidth distribution between task groups. However higher frequency
+ * also implies increased scheduling overhead.
+ *
+ * sysctl_sched_min_bal_int_shares represents the minimum interval between
+ * consecutive calls to rebalance_shares() in the same sched domain.
+ *
+ * sysctl_sched_max_bal_int_shares represents the maximum interval between
+ * consecutive calls to rebalance_shares() in the same sched domain.
+ *
+ * These settings allows for the appropriate tradeoff between accuracy of
+ * fairness and the associated overhead.
+ *
+ */
+
+/* default: 8ms, units: milliseconds */
+const_debug unsigned int sysctl_sched_min_bal_int_shares = 8;
+
+/* default: 128ms, units: milliseconds */
+const_debug unsigned int sysctl_sched_max_bal_int_shares = 128;
+
+/* kernel thread that runs rebalance_shares() periodically */
+static int load_balance_monitor(void *unused)
+{
+       unsigned int timeout = sysctl_sched_min_bal_int_shares;
+       struct sched_param schedparm;
+       int ret;
+
+       /*
+        * We don't want this thread's execution to be limited by the shares
+        * assigned to default group (init_task_group). Hence make it run
+        * as a SCHED_RR RT task at the lowest priority.
+        */
+       schedparm.sched_priority = 1;
+       ret = sched_setscheduler(current, SCHED_RR, &schedparm);
+       if (ret)
+               printk(KERN_ERR "Couldn't set SCHED_RR policy for load balance"
+                               " monitor thread (error = %d) \n", ret);
+
+       while (!kthread_should_stop()) {
+               int i, cpu, balanced = 1;
+
+               /* Prevent cpus going down or coming up */
+               get_online_cpus();
+               /* lockout changes to doms_cur[] array */
+               lock_doms_cur();
+               /*
+                * Enter a rcu read-side critical section to safely walk rq->sd
+                * chain on various cpus and to walk task group list
+                * (rq->leaf_cfs_rq_list) in rebalance_shares().
+                */
+               rcu_read_lock();
+
+               for (i = 0; i < ndoms_cur; i++) {
+                       cpumask_t cpumap = doms_cur[i];
+                       struct sched_domain *sd = NULL, *sd_prev = NULL;
+
+                       cpu = first_cpu(cpumap);
+
+                       /* Find the highest domain at which to balance shares */
+                       for_each_domain(cpu, sd) {
+                               if (!(sd->flags & SD_LOAD_BALANCE))
+                                       continue;
+                               sd_prev = sd;
+                       }
+
+                       sd = sd_prev;
+                       /* sd == NULL? No load balance reqd in this domain */
+                       if (!sd)
+                               continue;
+
+                       balanced &= rebalance_shares(sd, cpu);
+               }
+
+               rcu_read_unlock();
+
+               unlock_doms_cur();
+               put_online_cpus();
+
+               if (!balanced)
+                       timeout = sysctl_sched_min_bal_int_shares;
+               else if (timeout < sysctl_sched_max_bal_int_shares)
+                       timeout *= 2;
+
+               msleep_interruptible(timeout);
+       }
+
+       return 0;
+}
+#endif /* CONFIG_SMP */
+
 /* allocate runqueue etc for a new task group */
 struct task_group *sched_create_group(void)
 {
@@ -7008,14 +7290,15 @@ struct task_group *sched_create_group(void)
                se->parent = NULL;
        }
 
+       tg->shares = NICE_0_LOAD;
+
+       lock_task_group_list();
        for_each_possible_cpu(i) {
                rq = cpu_rq(i);
                cfs_rq = tg->cfs_rq[i];
                list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
        }
-
-       tg->shares = NICE_0_LOAD;
-       spin_lock_init(&tg->lock);
+       unlock_task_group_list();
 
        return tg;
 
@@ -7061,10 +7344,12 @@ void sched_destroy_group(struct task_group *tg)
        struct cfs_rq *cfs_rq = NULL;
        int i;
 
+       lock_task_group_list();
        for_each_possible_cpu(i) {
                cfs_rq = tg->cfs_rq[i];
                list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
        }
+       unlock_task_group_list();
 
        BUG_ON(!cfs_rq);
 
@@ -7113,49 +7398,79 @@ done:
        task_rq_unlock(rq, &flags);
 }
 
+/* rq->lock to be locked by caller */
 static void set_se_shares(struct sched_entity *se, unsigned long shares)
 {
        struct cfs_rq *cfs_rq = se->cfs_rq;
        struct rq *rq = cfs_rq->rq;
        int on_rq;
 
-       spin_lock_irq(&rq->lock);
+       if (!shares)
+               shares = MIN_GROUP_SHARES;
 
        on_rq = se->on_rq;
-       if (on_rq)
+       if (on_rq) {
                dequeue_entity(cfs_rq, se, 0);
+               dec_cpu_load(rq, se->load.weight);
+       }
 
        se->load.weight = shares;
        se->load.inv_weight = div64_64((1ULL<<32), shares);
 
-       if (on_rq)
+       if (on_rq) {
                enqueue_entity(cfs_rq, se, 0);
-
-       spin_unlock_irq(&rq->lock);
+               inc_cpu_load(rq, se->load.weight);
+       }
 }
 
 int sched_group_set_shares(struct task_group *tg, unsigned long shares)
 {
        int i;
+       struct cfs_rq *cfs_rq;
+       struct rq *rq;
+
+       lock_task_group_list();
+       if (tg->shares == shares)
+               goto done;
+
+       if (shares < MIN_GROUP_SHARES)
+               shares = MIN_GROUP_SHARES;
 
        /*
-        * A weight of 0 or 1 can cause arithmetics problems.
-        * (The default weight is 1024 - so there's no practical
-        *  limitation from this.)
+        * Prevent any load balance activity (rebalance_shares,
+        * load_balance_fair) from referring to this group first,
+        * by taking it off the rq->leaf_cfs_rq_list on each cpu.
         */
-       if (shares < 2)
-               shares = 2;
+       for_each_possible_cpu(i) {
+               cfs_rq = tg->cfs_rq[i];
+               list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
+       }
 
-       spin_lock(&tg->lock);
-       if (tg->shares == shares)
-               goto done;
+       /* wait for any ongoing reference to this group to finish */
+       synchronize_sched();
 
+       /*
+        * Now we are free to modify the group's share on each cpu
+        * w/o tripping rebalance_share or load_balance_fair.
+        */
        tg->shares = shares;
-       for_each_possible_cpu(i)
+       for_each_possible_cpu(i) {
+               spin_lock_irq(&cpu_rq(i)->lock);
                set_se_shares(tg->se[i], shares);
+               spin_unlock_irq(&cpu_rq(i)->lock);
+       }
 
+       /*
+        * Enable load balance activity on this group, by inserting it back on
+        * each cpu's rq->leaf_cfs_rq_list.
+        */
+       for_each_possible_cpu(i) {
+               rq = cpu_rq(i);
+               cfs_rq = tg->cfs_rq[i];
+               list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
+       }
 done:
-       spin_unlock(&tg->lock);
+       unlock_task_group_list();
        return 0;
 }