]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched_rt.c
sched: trivial sched_features cleanup
[linux-2.6-omap-h63xx.git] / kernel / sched_rt.c
index c2730a5a4f056c04526cb56008b772fe26712072..8ae3416e0bb419ecc9fcb6ad5bf3cb699e1cf504 100644 (file)
@@ -12,6 +12,9 @@ static inline int rt_overloaded(struct rq *rq)
 
 static inline void rt_set_overload(struct rq *rq)
 {
+       if (!rq->online)
+               return;
+
        cpu_set(rq->cpu, rq->rd->rto_mask);
        /*
         * Make sure the mask is visible before we set
@@ -26,6 +29,9 @@ static inline void rt_set_overload(struct rq *rq)
 
 static inline void rt_clear_overload(struct rq *rq)
 {
+       if (!rq->online)
+               return;
+
        /* the order here really doesn't matter */
        atomic_dec(&rq->rd->rto_count);
        cpu_clear(rq->cpu, rq->rd->rto_mask);
@@ -280,6 +286,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
                        continue;
 
                spin_lock(&iter->rt_runtime_lock);
+               if (iter->rt_runtime == RUNTIME_INF)
+                       goto next;
+
                diff = iter->rt_runtime - iter->rt_time;
                if (diff > 0) {
                        do_div(diff, weight);
@@ -293,12 +302,105 @@ static int balance_runtime(struct rt_rq *rt_rq)
                                break;
                        }
                }
+next:
                spin_unlock(&iter->rt_runtime_lock);
        }
        spin_unlock(&rt_b->rt_runtime_lock);
 
        return more;
 }
+
+static void __disable_runtime(struct rq *rq)
+{
+       struct root_domain *rd = rq->rd;
+       struct rt_rq *rt_rq;
+
+       if (unlikely(!scheduler_running))
+               return;
+
+       for_each_leaf_rt_rq(rt_rq, rq) {
+               struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+               s64 want;
+               int i;
+
+               spin_lock(&rt_b->rt_runtime_lock);
+               spin_lock(&rt_rq->rt_runtime_lock);
+               if (rt_rq->rt_runtime == RUNTIME_INF ||
+                               rt_rq->rt_runtime == rt_b->rt_runtime)
+                       goto balanced;
+               spin_unlock(&rt_rq->rt_runtime_lock);
+
+               want = rt_b->rt_runtime - rt_rq->rt_runtime;
+
+               for_each_cpu_mask(i, rd->span) {
+                       struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
+                       s64 diff;
+
+                       if (iter == rt_rq)
+                               continue;
+
+                       spin_lock(&iter->rt_runtime_lock);
+                       if (want > 0) {
+                               diff = min_t(s64, iter->rt_runtime, want);
+                               iter->rt_runtime -= diff;
+                               want -= diff;
+                       } else {
+                               iter->rt_runtime -= want;
+                               want -= want;
+                       }
+                       spin_unlock(&iter->rt_runtime_lock);
+
+                       if (!want)
+                               break;
+               }
+
+               spin_lock(&rt_rq->rt_runtime_lock);
+               BUG_ON(want);
+balanced:
+               rt_rq->rt_runtime = RUNTIME_INF;
+               spin_unlock(&rt_rq->rt_runtime_lock);
+               spin_unlock(&rt_b->rt_runtime_lock);
+       }
+}
+
+static void disable_runtime(struct rq *rq)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&rq->lock, flags);
+       __disable_runtime(rq);
+       spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+static void __enable_runtime(struct rq *rq)
+{
+       struct root_domain *rd = rq->rd;
+       struct rt_rq *rt_rq;
+
+       if (unlikely(!scheduler_running))
+               return;
+
+       for_each_leaf_rt_rq(rt_rq, rq) {
+               struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+
+               spin_lock(&rt_b->rt_runtime_lock);
+               spin_lock(&rt_rq->rt_runtime_lock);
+               rt_rq->rt_runtime = rt_b->rt_runtime;
+               rt_rq->rt_time = 0;
+               spin_unlock(&rt_rq->rt_runtime_lock);
+               spin_unlock(&rt_b->rt_runtime_lock);
+       }
+}
+
+static void enable_runtime(struct rq *rq)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&rq->lock, flags);
+       __enable_runtime(rq);
+       spin_unlock_irqrestore(&rq->lock, flags);
+}
+
 #endif
 
 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
@@ -328,14 +430,13 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 
 #ifdef CONFIG_SMP
        if (rt_rq->rt_time > runtime) {
-               int more;
-
                spin_unlock(&rt_rq->rt_runtime_lock);
-               more = balance_runtime(rt_rq);
+               balance_runtime(rt_rq);
                spin_lock(&rt_rq->rt_runtime_lock);
 
-               if (more)
-                       runtime = sched_rt_runtime(rt_rq);
+               runtime = sched_rt_runtime(rt_rq);
+               if (runtime == RUNTIME_INF)
+                       return 0;
        }
 #endif
 
@@ -391,12 +492,21 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
        WARN_ON(!rt_prio(rt_se_prio(rt_se)));
        rt_rq->rt_nr_running++;
 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
-       if (rt_se_prio(rt_se) < rt_rq->highest_prio)
+       if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
+               struct rq *rq = rq_of_rt_rq(rt_rq);
+
                rt_rq->highest_prio = rt_se_prio(rt_se);
+#ifdef CONFIG_SMP
+               if (rq->online)
+                       cpupri_set(&rq->rd->cpupri, rq->cpu,
+                                  rt_se_prio(rt_se));
+#endif
+       }
 #endif
 #ifdef CONFIG_SMP
        if (rt_se->nr_cpus_allowed > 1) {
                struct rq *rq = rq_of_rt_rq(rt_rq);
+
                rq->rt.rt_nr_migratory++;
        }
 
@@ -416,6 +526,10 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 static inline
 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
+#ifdef CONFIG_SMP
+       int highest_prio = rt_rq->highest_prio;
+#endif
+
        WARN_ON(!rt_prio(rt_se_prio(rt_se)));
        WARN_ON(!rt_rq->rt_nr_running);
        rt_rq->rt_nr_running--;
@@ -439,6 +553,14 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
                rq->rt.rt_nr_migratory--;
        }
 
+       if (rt_rq->highest_prio != highest_prio) {
+               struct rq *rq = rq_of_rt_rq(rt_rq);
+
+               if (rq->online)
+                       cpupri_set(&rq->rd->cpupri, rq->cpu,
+                                  rt_rq->highest_prio);
+       }
+
        update_rt_migration(rq_of_rt_rq(rt_rq));
 #endif /* CONFIG_SMP */
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -458,7 +580,13 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
        if (group_rq && rt_rq_throttled(group_rq))
                return;
 
-       list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
+       if (rt_se->nr_cpus_allowed == 1)
+               list_add_tail(&rt_se->run_list,
+                             array->xqueue + rt_se_prio(rt_se));
+       else
+               list_add_tail(&rt_se->run_list,
+                             array->squeue + rt_se_prio(rt_se));
+
        __set_bit(rt_se_prio(rt_se), array->bitmap);
 
        inc_rt_tasks(rt_se, rt_rq);
@@ -470,7 +598,8 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
        struct rt_prio_array *array = &rt_rq->active;
 
        list_del_init(&rt_se->run_list);
-       if (list_empty(array->queue + rt_se_prio(rt_se)))
+       if (list_empty(array->squeue + rt_se_prio(rt_se))
+           && list_empty(array->xqueue + rt_se_prio(rt_se)))
                __clear_bit(rt_se_prio(rt_se), array->bitmap);
 
        dec_rt_tasks(rt_se, rt_rq);
@@ -513,8 +642,6 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
         */
        for_each_sched_rt_entity(rt_se)
                enqueue_rt_entity(rt_se);
-
-       inc_cpu_load(rq, p->se.load.weight);
 }
 
 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
@@ -534,20 +661,24 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
                if (rt_rq && rt_rq->rt_nr_running)
                        enqueue_rt_entity(rt_se);
        }
-
-       dec_cpu_load(rq, p->se.load.weight);
 }
 
 /*
  * Put task to the end of the run list without the overhead of dequeue
  * followed by enqueue.
+ *
+ * Note: We always enqueue the task to the shared-queue, regardless of its
+ * previous position w.r.t. exclusive vs shared.  This is so that exclusive RR
+ * tasks fairly round-robin with all tasks on the runqueue, not just other
+ * exclusive tasks.
  */
 static
 void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
 {
        struct rt_prio_array *array = &rt_rq->active;
 
-       list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
+       list_del_init(&rt_se->run_list);
+       list_add_tail(&rt_se->run_list, array->squeue + rt_se_prio(rt_se));
 }
 
 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
@@ -605,13 +736,46 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
 }
 #endif /* CONFIG_SMP */
 
+static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
+                                                  struct rt_rq *rt_rq);
+
 /*
  * Preempt the current task with a newly woken task if needed:
  */
 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
 {
-       if (p->prio < rq->curr->prio)
+       if (p->prio < rq->curr->prio) {
                resched_task(rq->curr);
+               return;
+       }
+
+#ifdef CONFIG_SMP
+       /*
+        * If:
+        *
+        * - the newly woken task is of equal priority to the current task
+        * - the newly woken task is non-migratable while current is migratable
+        * - current will be preempted on the next reschedule
+        *
+        * we should check to see if current can readily move to a different
+        * cpu.  If so, we will reschedule to allow the push logic to try
+        * to move current somewhere else, making room for our non-migratable
+        * task.
+        */
+       if((p->prio == rq->curr->prio)
+          && p->rt.nr_cpus_allowed == 1
+          && rq->curr->rt.nr_cpus_allowed != 1
+          && pick_next_rt_entity(rq, &rq->rt) != &rq->curr->rt) {
+               cpumask_t mask;
+
+               if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
+                       /*
+                        * There appears to be other cpus that can accept
+                        * current, so lets reschedule to try and push it away
+                        */
+                       resched_task(rq->curr);
+       }
+#endif
 }
 
 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
@@ -625,8 +789,15 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
        idx = sched_find_first_bit(array->bitmap);
        BUG_ON(idx >= MAX_RT_PRIO);
 
-       queue = array->queue + idx;
-       next = list_entry(queue->next, struct sched_rt_entity, run_list);
+       queue = array->xqueue + idx;
+       if (!list_empty(queue))
+               next = list_entry(queue->next, struct sched_rt_entity,
+                                 run_list);
+       else {
+               queue = array->squeue + idx;
+               next = list_entry(queue->next, struct sched_rt_entity,
+                                 run_list);
+       }
 
        return next;
 }
@@ -696,7 +867,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
                        continue;
                if (next && next->prio < idx)
                        continue;
-               list_for_each_entry(rt_se, array->queue + idx, run_list) {
+               list_for_each_entry(rt_se, array->squeue + idx, run_list) {
                        struct task_struct *p = rt_task_of(rt_se);
                        if (pick_rt_task(rq, p, cpu)) {
                                next = p;
@@ -714,73 +885,6 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
 
 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
 
-static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
-{
-       int       lowest_prio = -1;
-       int       lowest_cpu  = -1;
-       int       count       = 0;
-       int       cpu;
-
-       cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
-
-       /*
-        * Scan each rq for the lowest prio.
-        */
-       for_each_cpu_mask(cpu, *lowest_mask) {
-               struct rq *rq = cpu_rq(cpu);
-
-               /* We look for lowest RT prio or non-rt CPU */
-               if (rq->rt.highest_prio >= MAX_RT_PRIO) {
-                       /*
-                        * if we already found a low RT queue
-                        * and now we found this non-rt queue
-                        * clear the mask and set our bit.
-                        * Otherwise just return the queue as is
-                        * and the count==1 will cause the algorithm
-                        * to use the first bit found.
-                        */
-                       if (lowest_cpu != -1) {
-                               cpus_clear(*lowest_mask);
-                               cpu_set(rq->cpu, *lowest_mask);
-                       }
-                       return 1;
-               }
-
-               /* no locking for now */
-               if ((rq->rt.highest_prio > task->prio)
-                   && (rq->rt.highest_prio >= lowest_prio)) {
-                       if (rq->rt.highest_prio > lowest_prio) {
-                               /* new low - clear old data */
-                               lowest_prio = rq->rt.highest_prio;
-                               lowest_cpu = cpu;
-                               count = 0;
-                       }
-                       count++;
-               } else
-                       cpu_clear(cpu, *lowest_mask);
-       }
-
-       /*
-        * Clear out all the set bits that represent
-        * runqueues that were of higher prio than
-        * the lowest_prio.
-        */
-       if (lowest_cpu > 0) {
-               /*
-                * Perhaps we could add another cpumask op to
-                * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
-                * Then that could be optimized to use memset and such.
-                */
-               for_each_cpu_mask(cpu, *lowest_mask) {
-                       if (cpu >= lowest_cpu)
-                               break;
-                       cpu_clear(cpu, *lowest_mask);
-               }
-       }
-
-       return count;
-}
-
 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
 {
        int first;
@@ -802,17 +906,12 @@ static int find_lowest_rq(struct task_struct *task)
        cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
        int this_cpu = smp_processor_id();
        int cpu      = task_cpu(task);
-       int count    = find_lowest_cpus(task, lowest_mask);
 
-       if (!count)
-               return -1; /* No targets found */
+       if (task->rt.nr_cpus_allowed == 1)
+               return -1; /* No other targets possible */
 
-       /*
-        * There is no sense in performing an optimal search if only one
-        * target is found.
-        */
-       if (count == 1)
-               return first_cpu(*lowest_mask);
+       if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
+               return -1; /* No targets found */
 
        /*
         * At this point we have built a mask of cpus representing the
@@ -1098,11 +1197,14 @@ static void post_schedule_rt(struct rq *rq)
        }
 }
 
-
+/*
+ * If we are not running and we are not going to reschedule soon, we should
+ * try to push tasks away now
+ */
 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
 {
        if (!task_running(rq, p) &&
-           (p->prio >= rq->rt.highest_prio) &&
+           !test_tsk_need_resched(rq->curr) &&
            rq->rt.overloaded)
                push_rt_tasks(rq);
 }
@@ -1147,6 +1249,14 @@ static void set_cpus_allowed_rt(struct task_struct *p,
                }
 
                update_rt_migration(rq);
+
+               if (unlikely(weight == 1 || p->rt.nr_cpus_allowed == 1))
+                       /*
+                        * If either the new or old weight is a "1", we need
+                        * to requeue to properly move between shared and
+                        * exclusive queues.
+                        */
+                       requeue_task_rt(rq, p);
        }
 
        p->cpus_allowed    = *new_mask;
@@ -1154,17 +1264,25 @@ static void set_cpus_allowed_rt(struct task_struct *p,
 }
 
 /* Assumes rq->lock is held */
-static void join_domain_rt(struct rq *rq)
+static void rq_online_rt(struct rq *rq)
 {
        if (rq->rt.overloaded)
                rt_set_overload(rq);
+
+       __enable_runtime(rq);
+
+       cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
 }
 
 /* Assumes rq->lock is held */
-static void leave_domain_rt(struct rq *rq)
+static void rq_offline_rt(struct rq *rq)
 {
        if (rq->rt.overloaded)
                rt_clear_overload(rq);
+
+       __disable_runtime(rq);
+
+       cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
 }
 
 /*
@@ -1309,7 +1427,7 @@ static void set_curr_task_rt(struct rq *rq)
        p->se.exec_start = rq->clock;
 }
 
-const struct sched_class rt_sched_class = {
+static const struct sched_class rt_sched_class = {
        .next                   = &fair_sched_class,
        .enqueue_task           = enqueue_task_rt,
        .dequeue_task           = dequeue_task_rt,
@@ -1327,8 +1445,8 @@ const struct sched_class rt_sched_class = {
        .load_balance           = load_balance_rt,
        .move_one_task          = move_one_task_rt,
        .set_cpus_allowed       = set_cpus_allowed_rt,
-       .join_domain            = join_domain_rt,
-       .leave_domain           = leave_domain_rt,
+       .rq_online              = rq_online_rt,
+       .rq_offline             = rq_offline_rt,
        .pre_schedule           = pre_schedule_rt,
        .post_schedule          = post_schedule_rt,
        .task_wake_up           = task_wake_up_rt,