]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched_fair.c
sched: group scheduler, fix fairness of cpu bandwidth allocation for task groups
[linux-2.6-omap-h63xx.git] / kernel / sched_fair.c
index 2f16e15c022ca169013c6c62b9f946a2c0111a57..5c208e090ae427de8c9f5dd4240e2c29c344b12f 100644 (file)
@@ -351,6 +351,12 @@ static void update_curr(struct cfs_rq *cfs_rq)
 
        __update_curr(cfs_rq, curr, delta_exec);
        curr->exec_start = now;
+
+       if (entity_is_task(curr)) {
+               struct task_struct *curtask = task_of(curr);
+
+               cpuacct_charge(curtask, delta_exec);
+       }
 }
 
 static inline void
@@ -505,8 +511,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 
        if (!initial) {
                /* sleeps upto a single latency don't count. */
-               if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) &&
-                               task_of(se)->policy != SCHED_BATCH)
+               if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se))
                        vruntime -= sysctl_sched_latency;
 
                /* ensure we never gain time by being placed backwards. */
@@ -685,7 +690,7 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
 
 /* Iterate thr' all leaf cfs_rq's on a runqueue */
 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
-       list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
+       list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
 
 /* Do the two (enqueued) entities belong to the same group ? */
 static inline int
@@ -702,6 +707,8 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
        return se->parent;
 }
 
+#define GROUP_IMBALANCE_PCT    20
+
 #else  /* CONFIG_FAIR_GROUP_SCHED */
 
 #define for_each_sched_entity(se) \
@@ -755,15 +762,26 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
 static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
 {
        struct cfs_rq *cfs_rq;
-       struct sched_entity *se = &p->se;
+       struct sched_entity *se = &p->se,
+                           *topse = NULL;      /* Highest schedulable entity */
+       int incload = 1;
 
        for_each_sched_entity(se) {
-               if (se->on_rq)
+               topse = se;
+               if (se->on_rq) {
+                       incload = 0;
                        break;
+               }
                cfs_rq = cfs_rq_of(se);
                enqueue_entity(cfs_rq, se, wakeup);
                wakeup = 1;
        }
+       /* Increment cpu load if we just enqueued the first task of a group on
+        * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
+        * at the highest grouping level.
+        */
+       if (incload)
+               inc_cpu_load(rq, topse->load.weight);
 }
 
 /*
@@ -774,16 +792,28 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
 {
        struct cfs_rq *cfs_rq;
-       struct sched_entity *se = &p->se;
+       struct sched_entity *se = &p->se,
+                           *topse = NULL;      /* Highest schedulable entity */
+       int decload = 1;
 
        for_each_sched_entity(se) {
+               topse = se;
                cfs_rq = cfs_rq_of(se);
                dequeue_entity(cfs_rq, se, sleep);
                /* Don't dequeue parent if it has other entities besides us */
-               if (cfs_rq->load.weight)
+               if (cfs_rq->load.weight) {
+                       if (parent_entity(se))
+                               decload = 0;
                        break;
+               }
                sleep = 1;
        }
+       /* Decrement cpu load if we just dequeued the last task of a group on
+        * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
+        * at the highest grouping level.
+        */
+       if (decload)
+               dec_cpu_load(rq, topse->load.weight);
 }
 
 /*
@@ -793,8 +823,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
  */
 static void yield_task_fair(struct rq *rq)
 {
-       struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
-       struct sched_entity *rightmost, *se = &rq->curr->se;
+       struct task_struct *curr = rq->curr;
+       struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+       struct sched_entity *rightmost, *se = &curr->se;
 
        /*
         * Are we the only task in the tree?
@@ -802,7 +833,7 @@ static void yield_task_fair(struct rq *rq)
        if (unlikely(cfs_rq->nr_running == 1))
                return;
 
-       if (likely(!sysctl_sched_compat_yield)) {
+       if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
                __update_rq_clock(rq);
                /*
                 * Update run-time statistics of the 'current'.
@@ -938,25 +969,6 @@ static struct task_struct *load_balance_next_fair(void *arg)
        return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
 }
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
-static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
-{
-       struct sched_entity *curr;
-       struct task_struct *p;
-
-       if (!cfs_rq->nr_running)
-               return MAX_PRIO;
-
-       curr = cfs_rq->curr;
-       if (!curr)
-               curr = __pick_next_entity(cfs_rq);
-
-       p = task_of(curr);
-
-       return p->prio;
-}
-#endif
-
 static unsigned long
 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
                  unsigned long max_load_move,
@@ -966,28 +978,45 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
        struct cfs_rq *busy_cfs_rq;
        long rem_load_move = max_load_move;
        struct rq_iterator cfs_rq_iterator;
+       unsigned long load_moved;
 
        cfs_rq_iterator.start = load_balance_start_fair;
        cfs_rq_iterator.next = load_balance_next_fair;
 
        for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
 #ifdef CONFIG_FAIR_GROUP_SCHED
-               struct cfs_rq *this_cfs_rq;
-               long imbalance;
-               unsigned long maxload;
+               struct cfs_rq *this_cfs_rq = busy_cfs_rq->tg->cfs_rq[this_cpu];
+               unsigned long maxload, task_load, group_weight;
+               unsigned long thisload, per_task_load;
+               struct sched_entity *se = busy_cfs_rq->tg->se[busiest->cpu];
 
-               this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
+               task_load = busy_cfs_rq->load.weight;
+               group_weight = se->load.weight;
 
-               imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
-               /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
-               if (imbalance <= 0)
+               /*
+                * 'group_weight' is contributed by tasks of total weight
+                * 'task_load'. To move 'rem_load_move' worth of weight only,
+                * we need to move a maximum task load of:
+                *
+                *      maxload = (remload / group_weight) * task_load;
+                */
+               maxload = (rem_load_move * task_load) / group_weight;
+
+               if (!maxload || !task_load)
                        continue;
 
-               /* Don't pull more than imbalance/2 */
-               imbalance /= 2;
-               maxload = min(rem_load_move, imbalance);
+               per_task_load = task_load / busy_cfs_rq->nr_running;
+               /*
+                * balance_tasks will try to forcibly move atleast one task if
+                * possible (because of SCHED_LOAD_SCALE_FUZZ). Avoid that if
+                * maxload is less than GROUP_IMBALANCE_FUZZ% the per_task_load.
+                */
+                if (100 * maxload < GROUP_IMBALANCE_PCT * per_task_load)
+                       continue;
 
-               *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
+               /* Disable priority-based load balance */
+               *this_best_prio = 0;
+               thisload = this_cfs_rq->load.weight;
 #else
 # define maxload rem_load_move
 #endif
@@ -996,11 +1025,33 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
                 * load_balance_[start|next]_fair iterators
                 */
                cfs_rq_iterator.arg = busy_cfs_rq;
-               rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
+               load_moved = balance_tasks(this_rq, this_cpu, busiest,
                                               maxload, sd, idle, all_pinned,
                                               this_best_prio,
                                               &cfs_rq_iterator);
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+               /*
+                * load_moved holds the task load that was moved. The
+                * effective (group) weight moved would be:
+                *      load_moved_eff = load_moved/task_load * group_weight;
+                */
+               load_moved = (group_weight * load_moved) / task_load;
+
+               /* Adjust shares on both cpus to reflect load_moved */
+               group_weight -= load_moved;
+               set_se_shares(se, group_weight);
+
+               se = busy_cfs_rq->tg->se[this_cpu];
+               if (!thisload)
+                       group_weight = load_moved;
+               else
+                       group_weight = se->load.weight + load_moved;
+               set_se_shares(se, group_weight);
+#endif
+
+               rem_load_move -= load_moved;
+
                if (rem_load_move <= 0)
                        break;
        }
@@ -1126,7 +1177,9 @@ static void print_cfs_stats(struct seq_file *m, int cpu)
 #ifdef CONFIG_FAIR_GROUP_SCHED
        print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
 #endif
+       lock_task_group_list();
        for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
                print_cfs_rq(m, cpu, cfs_rq);
+       unlock_task_group_list();
 }
 #endif