]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched_rt.c
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[linux-2.6-omap-h63xx.git] / kernel / sched_rt.c
index 1178257613adef2642bf21a7f59d3366db81df06..0a6d2e516420516cb1d0c35a5345db1f356f71ab 100644 (file)
@@ -55,14 +55,14 @@ static inline int on_rt_rq(struct sched_rt_entity *rt_se)
        return !list_empty(&rt_se->run_list);
 }
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_RT_GROUP_SCHED
 
-static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
+static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 {
        if (!rt_rq->tg)
-               return SCHED_RT_FRAC;
+               return RUNTIME_INF;
 
-       return rt_rq->tg->rt_ratio;
+       return rt_rq->tg->rt_runtime;
 }
 
 #define for_each_leaf_rt_rq(rt_rq, rq) \
@@ -89,17 +89,20 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
 
-static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
+static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
        struct sched_rt_entity *rt_se = rt_rq->rt_se;
 
        if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
+               struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
+
                enqueue_rt_entity(rt_se);
-               resched_task(rq_of_rt_rq(rt_rq)->curr);
+               if (rt_rq->highest_prio < curr->prio)
+                       resched_task(curr);
        }
 }
 
-static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
+static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 {
        struct sched_rt_entity *rt_se = rt_rq->rt_se;
 
@@ -107,11 +110,31 @@ static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
                dequeue_rt_entity(rt_se);
 }
 
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
+}
+
+static int rt_se_boosted(struct sched_rt_entity *rt_se)
+{
+       struct rt_rq *rt_rq = group_rt_rq(rt_se);
+       struct task_struct *p;
+
+       if (rt_rq)
+               return !!rt_rq->rt_nr_boosted;
+
+       p = rt_task_of(rt_se);
+       return p->prio != p->normal_prio;
+}
+
 #else
 
-static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
+static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 {
-       return sysctl_sched_rt_ratio;
+       if (sysctl_sched_rt_runtime == -1)
+               return RUNTIME_INF;
+
+       return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
 }
 
 #define for_each_leaf_rt_rq(rt_rq, rq) \
@@ -138,19 +161,23 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
        return NULL;
 }
 
-static inline void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
+static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
 }
 
-static inline void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
+static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 {
 }
 
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_throttled;
+}
 #endif
 
 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 {
-#ifdef CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_RT_GROUP_SCHED
        struct rt_rq *rt_rq = group_rt_rq(rt_se);
 
        if (rt_rq)
@@ -160,52 +187,51 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
        return rt_task_of(rt_se)->prio;
 }
 
-static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq)
+static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 {
-       unsigned int rt_ratio = sched_rt_ratio(rt_rq);
-       u64 period, ratio;
+       u64 runtime = sched_rt_runtime(rt_rq);
 
-       if (rt_ratio == SCHED_RT_FRAC)
+       if (runtime == RUNTIME_INF)
                return 0;
 
        if (rt_rq->rt_throttled)
-               return 1;
+               return rt_rq_throttled(rt_rq);
 
-       period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
-       ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
+       if (rt_rq->rt_time > runtime) {
+               struct rq *rq = rq_of_rt_rq(rt_rq);
 
-       if (rt_rq->rt_time > ratio) {
+               rq->rt_throttled = 1;
                rt_rq->rt_throttled = 1;
-               sched_rt_ratio_dequeue(rt_rq);
-               return 1;
+
+               if (rt_rq_throttled(rt_rq)) {
+                       sched_rt_rq_dequeue(rt_rq);
+                       return 1;
+               }
        }
 
        return 0;
 }
 
-static void __update_sched_rt_period(struct rt_rq *rt_rq, u64 period)
-{
-       unsigned long rt_ratio = sched_rt_ratio(rt_rq);
-       u64 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
-
-       rt_rq->rt_time -= min(rt_rq->rt_time, ratio);
-       if (rt_rq->rt_throttled) {
-               rt_rq->rt_throttled = 0;
-               sched_rt_ratio_enqueue(rt_rq);
-       }
-}
-
 static void update_sched_rt_period(struct rq *rq)
 {
        struct rt_rq *rt_rq;
        u64 period;
 
        while (rq->clock > rq->rt_period_expire) {
-               period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
+               period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
                rq->rt_period_expire += period;
 
-               for_each_leaf_rt_rq(rt_rq, rq)
-                       __update_sched_rt_period(rt_rq, period);
+               for_each_leaf_rt_rq(rt_rq, rq) {
+                       u64 runtime = sched_rt_runtime(rt_rq);
+
+                       rt_rq->rt_time -= min(rt_rq->rt_time, runtime);
+                       if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
+                               rt_rq->rt_throttled = 0;
+                               sched_rt_rq_enqueue(rt_rq);
+                       }
+               }
+
+               rq->rt_throttled = 0;
        }
 }
 
@@ -234,12 +260,7 @@ static void update_curr_rt(struct rq *rq)
        cpuacct_charge(curr, delta_exec);
 
        rt_rq->rt_time += delta_exec;
-       /*
-        * might make it a tad more accurate:
-        *
-        * update_sched_rt_period(rq);
-        */
-       if (sched_rt_ratio_exceeded(rt_rq))
+       if (sched_rt_runtime_exceeded(rt_rq))
                resched_task(curr);
 }
 
@@ -248,7 +269,7 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
        WARN_ON(!rt_prio(rt_se_prio(rt_se)));
        rt_rq->rt_nr_running++;
-#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
+#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
        if (rt_se_prio(rt_se) < rt_rq->highest_prio)
                rt_rq->highest_prio = rt_se_prio(rt_se);
 #endif
@@ -260,6 +281,10 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 
        update_rt_migration(rq_of_rt_rq(rt_rq));
 #endif
+#ifdef CONFIG_RT_GROUP_SCHED
+       if (rt_se_boosted(rt_se))
+               rt_rq->rt_nr_boosted++;
+#endif
 }
 
 static inline
@@ -268,7 +293,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
        WARN_ON(!rt_prio(rt_se_prio(rt_se)));
        WARN_ON(!rt_rq->rt_nr_running);
        rt_rq->rt_nr_running--;
-#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
+#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
        if (rt_rq->rt_nr_running) {
                struct rt_prio_array *array;
 
@@ -290,6 +315,12 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 
        update_rt_migration(rq_of_rt_rq(rt_rq));
 #endif /* CONFIG_SMP */
+#ifdef CONFIG_RT_GROUP_SCHED
+       if (rt_se_boosted(rt_se))
+               rt_rq->rt_nr_boosted--;
+
+       WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
+#endif
 }
 
 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
@@ -298,7 +329,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
        struct rt_prio_array *array = &rt_rq->active;
        struct rt_rq *group_rq = group_rt_rq(rt_se);
 
-       if (group_rq && group_rq->rt_throttled)
+       if (group_rq && rt_rq_throttled(group_rq))
                return;
 
        list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
@@ -362,8 +393,6 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
         */
        for_each_sched_rt_entity(rt_se)
                enqueue_rt_entity(rt_se);
-
-       inc_cpu_load(rq, p->se.load.weight);
 }
 
 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
@@ -383,8 +412,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
                if (rt_rq && rt_rq->rt_nr_running)
                        enqueue_rt_entity(rt_se);
        }
-
-       dec_cpu_load(rq, p->se.load.weight);
 }
 
 /*
@@ -471,15 +498,12 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
        struct list_head *queue;
        int idx;
 
-       if (sched_rt_ratio_exceeded(rt_rq))
-               goto out;
-
        idx = sched_find_first_bit(array->bitmap);
        BUG_ON(idx >= MAX_RT_PRIO);
 
        queue = array->queue + idx;
        next = list_entry(queue->next, struct sched_rt_entity, run_list);
- out:
+
        return next;
 }
 
@@ -489,19 +513,17 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
        struct task_struct *p;
        struct rt_rq *rt_rq;
 
- retry:
        rt_rq = &rq->rt;
 
        if (unlikely(!rt_rq->rt_nr_running))
                return NULL;
 
-       if (sched_rt_ratio_exceeded(rt_rq))
+       if (rt_rq_throttled(rt_rq))
                return NULL;
 
        do {
                rt_se = pick_next_rt_entity(rq, rt_rq);
-               if (unlikely(!rt_se))
-                       goto retry;
+               BUG_ON(!rt_se);
                rt_rq = group_rt_rq(rt_se);
        } while (rt_rq);
 
@@ -877,10 +899,8 @@ static int pull_rt_task(struct rq *this_rq)
                /*
                 * Are there still pullable RT tasks?
                 */
-               if (src_rq->rt.rt_nr_running <= 1) {
-                       spin_unlock(&src_rq->lock);
-                       continue;
-               }
+               if (src_rq->rt.rt_nr_running <= 1)
+                       goto skip;
 
                p = pick_next_highest_task_rt(src_rq, this_cpu);
 
@@ -904,7 +924,7 @@ static int pull_rt_task(struct rq *this_rq)
                         */
                        if (p->prio < src_rq->curr->prio ||
                            (next && next->prio < src_rq->curr->prio))
-                               goto out;
+                               goto skip;
 
                        ret = 1;
 
@@ -924,7 +944,7 @@ static int pull_rt_task(struct rq *this_rq)
                        next = p;
 
                }
out:
skip:
                spin_unlock(&src_rq->lock);
        }
 
@@ -1087,9 +1107,11 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
                        pull_rt_task(rq);
                /*
                 * If there's a higher priority task waiting to run
-                * then reschedule.
+                * then reschedule. Note, the above pull_rt_task
+                * can release the rq lock and p could migrate.
+                * Only reschedule if p is still on the same runqueue.
                 */
-               if (p->prio > rq->rt.highest_prio)
+               if (p->prio > rq->rt.highest_prio && rq->curr == p)
                        resched_task(p);
 #else
                /* For UP simply resched on drop of prio */
@@ -1122,13 +1144,7 @@ static void watchdog(struct rq *rq, struct task_struct *p)
 
                p->rt.timeout++;
                next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
-               if (next > p->rt.timeout) {
-                       u64 next_time = p->se.sum_exec_runtime;
-
-                       next_time += next * (NSEC_PER_SEC/HZ);
-                       if (p->it_sched_expires > next_time)
-                               p->it_sched_expires = next_time;
-               } else
+               if (p->rt.timeout > next)
                        p->it_sched_expires = p->se.sum_exec_runtime;
        }
 }