2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
7 static cpumask_t rt_overload_mask;
8 static atomic_t rto_count;
9 static inline int rt_overloaded(void)
11 return atomic_read(&rto_count);
13 static inline cpumask_t *rt_overload(void)
15 return &rt_overload_mask;
17 static inline void rt_set_overload(struct rq *rq)
19 cpu_set(rq->cpu, rt_overload_mask);
21 * Make sure the mask is visible before we set
22 * the overload count. That is checked to determine
23 * if we should look at the mask. It would be a shame
24 * if we looked at the mask, but the mask was not
28 atomic_inc(&rto_count);
30 static inline void rt_clear_overload(struct rq *rq)
32 /* the order here really doesn't matter */
33 atomic_dec(&rto_count);
34 cpu_clear(rq->cpu, rt_overload_mask);
37 static void update_rt_migration(struct rq *rq)
39 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1))
42 rt_clear_overload(rq);
44 #endif /* CONFIG_SMP */
47 * Update the current task's runtime statistics. Skip current tasks that
48 * are not in our scheduling class.
50 static void update_curr_rt(struct rq *rq)
52 struct task_struct *curr = rq->curr;
55 if (!task_has_rt_policy(curr))
58 delta_exec = rq->clock - curr->se.exec_start;
59 if (unlikely((s64)delta_exec < 0))
62 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
64 curr->se.sum_exec_runtime += delta_exec;
65 curr->se.exec_start = rq->clock;
66 cpuacct_charge(curr, delta_exec);
69 static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
72 rq->rt.rt_nr_running++;
74 if (p->prio < rq->rt.highest_prio)
75 rq->rt.highest_prio = p->prio;
76 if (p->nr_cpus_allowed > 1)
77 rq->rt.rt_nr_migratory++;
79 update_rt_migration(rq);
80 #endif /* CONFIG_SMP */
83 static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
86 WARN_ON(!rq->rt.rt_nr_running);
87 rq->rt.rt_nr_running--;
89 if (rq->rt.rt_nr_running) {
90 struct rt_prio_array *array;
92 WARN_ON(p->prio < rq->rt.highest_prio);
93 if (p->prio == rq->rt.highest_prio) {
95 array = &rq->rt.active;
97 sched_find_first_bit(array->bitmap);
98 } /* otherwise leave rq->highest prio alone */
100 rq->rt.highest_prio = MAX_RT_PRIO;
101 if (p->nr_cpus_allowed > 1)
102 rq->rt.rt_nr_migratory--;
104 update_rt_migration(rq);
105 #endif /* CONFIG_SMP */
108 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
110 struct rt_prio_array *array = &rq->rt.active;
112 list_add_tail(&p->run_list, array->queue + p->prio);
113 __set_bit(p->prio, array->bitmap);
114 inc_cpu_load(rq, p->se.load.weight);
120 * Adding/removing a task to/from a priority array:
122 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
124 struct rt_prio_array *array = &rq->rt.active;
128 list_del(&p->run_list);
129 if (list_empty(array->queue + p->prio))
130 __clear_bit(p->prio, array->bitmap);
131 dec_cpu_load(rq, p->se.load.weight);
137 * Put task to the end of the run list without the overhead of dequeue
138 * followed by enqueue.
140 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
142 struct rt_prio_array *array = &rq->rt.active;
144 list_move_tail(&p->run_list, array->queue + p->prio);
148 yield_task_rt(struct rq *rq)
150 requeue_task_rt(rq, rq->curr);
154 static int find_lowest_rq(struct task_struct *task);
156 static int select_task_rq_rt(struct task_struct *p, int sync)
158 struct rq *rq = task_rq(p);
161 * If the task will not preempt the RQ, try to find a better RQ
162 * before we even activate the task
164 if ((p->prio >= rq->rt.highest_prio)
165 && (p->nr_cpus_allowed > 1)) {
166 int cpu = find_lowest_rq(p);
168 return (cpu == -1) ? task_cpu(p) : cpu;
172 * Otherwise, just let it ride on the affined RQ and the
173 * post-schedule router will push the preempted task away
177 #endif /* CONFIG_SMP */
180 * Preempt the current task with a newly woken task if needed:
182 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
184 if (p->prio < rq->curr->prio)
185 resched_task(rq->curr);
188 static struct task_struct *pick_next_task_rt(struct rq *rq)
190 struct rt_prio_array *array = &rq->rt.active;
191 struct task_struct *next;
192 struct list_head *queue;
195 idx = sched_find_first_bit(array->bitmap);
196 if (idx >= MAX_RT_PRIO)
199 queue = array->queue + idx;
200 next = list_entry(queue->next, struct task_struct, run_list);
202 next->se.exec_start = rq->clock;
207 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
210 p->se.exec_start = 0;
214 /* Only try algorithms three times */
215 #define RT_MAX_TRIES 3
217 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
218 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
220 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
222 if (!task_running(rq, p) &&
223 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
224 (p->nr_cpus_allowed > 1))
229 /* Return the second highest RT task, NULL otherwise */
230 static struct task_struct *pick_next_highest_task_rt(struct rq *rq,
233 struct rt_prio_array *array = &rq->rt.active;
234 struct task_struct *next;
235 struct list_head *queue;
238 assert_spin_locked(&rq->lock);
240 if (likely(rq->rt.rt_nr_running < 2))
243 idx = sched_find_first_bit(array->bitmap);
244 if (unlikely(idx >= MAX_RT_PRIO)) {
245 WARN_ON(1); /* rt_nr_running is bad */
249 queue = array->queue + idx;
250 BUG_ON(list_empty(queue));
252 next = list_entry(queue->next, struct task_struct, run_list);
253 if (unlikely(pick_rt_task(rq, next, cpu)))
256 if (queue->next->next != queue) {
258 next = list_entry(queue->next->next, struct task_struct, run_list);
259 if (pick_rt_task(rq, next, cpu))
264 /* slower, but more flexible */
265 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
266 if (unlikely(idx >= MAX_RT_PRIO))
269 queue = array->queue + idx;
270 BUG_ON(list_empty(queue));
272 list_for_each_entry(next, queue, run_list) {
273 if (pick_rt_task(rq, next, cpu))
283 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
284 static DEFINE_PER_CPU(cpumask_t, valid_cpu_mask);
286 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
289 cpumask_t *valid_mask = &__get_cpu_var(valid_cpu_mask);
290 int lowest_prio = -1;
293 cpus_clear(*lowest_mask);
294 cpus_and(*valid_mask, cpu_online_map, task->cpus_allowed);
297 * Scan each rq for the lowest prio.
299 for_each_cpu_mask(cpu, *valid_mask) {
300 struct rq *rq = cpu_rq(cpu);
302 /* We look for lowest RT prio or non-rt CPU */
303 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
305 cpus_clear(*lowest_mask);
306 cpu_set(rq->cpu, *lowest_mask);
310 /* no locking for now */
311 if ((rq->rt.highest_prio > task->prio)
312 && (rq->rt.highest_prio >= lowest_prio)) {
313 if (rq->rt.highest_prio > lowest_prio) {
314 /* new low - clear old data */
315 lowest_prio = rq->rt.highest_prio;
316 cpus_clear(*lowest_mask);
318 cpu_set(rq->cpu, *lowest_mask);
326 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
330 /* "this_cpu" is cheaper to preempt than a remote processor */
331 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
334 first = first_cpu(*mask);
335 if (first != NR_CPUS)
341 static int find_lowest_rq(struct task_struct *task)
343 struct sched_domain *sd;
344 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
345 int this_cpu = smp_processor_id();
346 int cpu = task_cpu(task);
348 if (!find_lowest_cpus(task, lowest_mask))
352 * At this point we have built a mask of cpus representing the
353 * lowest priority tasks in the system. Now we want to elect
354 * the best one based on our affinity and topology.
356 * We prioritize the last cpu that the task executed on since
357 * it is most likely cache-hot in that location.
359 if (cpu_isset(cpu, *lowest_mask))
363 * Otherwise, we consult the sched_domains span maps to figure
364 * out which cpu is logically closest to our hot cache data.
367 this_cpu = -1; /* Skip this_cpu opt if the same */
369 for_each_domain(cpu, sd) {
370 if (sd->flags & SD_WAKE_AFFINE) {
371 cpumask_t domain_mask;
374 cpus_and(domain_mask, sd->span, *lowest_mask);
376 best_cpu = pick_optimal_cpu(this_cpu,
384 * And finally, if there were no matches within the domains
385 * just give the caller *something* to work with from the compatible
388 return pick_optimal_cpu(this_cpu, lowest_mask);
391 /* Will lock the rq it finds */
392 static struct rq *find_lock_lowest_rq(struct task_struct *task,
395 struct rq *lowest_rq = NULL;
399 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
400 cpu = find_lowest_rq(task);
402 if ((cpu == -1) || (cpu == rq->cpu))
405 lowest_rq = cpu_rq(cpu);
407 /* if the prio of this runqueue changed, try again */
408 if (double_lock_balance(rq, lowest_rq)) {
410 * We had to unlock the run queue. In
411 * the mean time, task could have
412 * migrated already or had its affinity changed.
413 * Also make sure that it wasn't scheduled on its rq.
415 if (unlikely(task_rq(task) != rq ||
416 !cpu_isset(lowest_rq->cpu, task->cpus_allowed) ||
417 task_running(rq, task) ||
419 spin_unlock(&lowest_rq->lock);
425 /* If this rq is still suitable use it. */
426 if (lowest_rq->rt.highest_prio > task->prio)
430 spin_unlock(&lowest_rq->lock);
438 * If the current CPU has more than one RT task, see if the non
439 * running task can migrate over to a CPU that is running a task
440 * of lesser priority.
442 static int push_rt_task(struct rq *rq)
444 struct task_struct *next_task;
445 struct rq *lowest_rq;
447 int paranoid = RT_MAX_TRIES;
449 assert_spin_locked(&rq->lock);
451 next_task = pick_next_highest_task_rt(rq, -1);
456 if (unlikely(next_task == rq->curr)) {
462 * It's possible that the next_task slipped in of
463 * higher priority than current. If that's the case
464 * just reschedule current.
466 if (unlikely(next_task->prio < rq->curr->prio)) {
467 resched_task(rq->curr);
471 /* We might release rq lock */
472 get_task_struct(next_task);
474 /* find_lock_lowest_rq locks the rq if found */
475 lowest_rq = find_lock_lowest_rq(next_task, rq);
477 struct task_struct *task;
479 * find lock_lowest_rq releases rq->lock
480 * so it is possible that next_task has changed.
481 * If it has, then try again.
483 task = pick_next_highest_task_rt(rq, -1);
484 if (unlikely(task != next_task) && task && paranoid--) {
485 put_task_struct(next_task);
492 assert_spin_locked(&lowest_rq->lock);
494 deactivate_task(rq, next_task, 0);
495 set_task_cpu(next_task, lowest_rq->cpu);
496 activate_task(lowest_rq, next_task, 0);
498 resched_task(lowest_rq->curr);
500 spin_unlock(&lowest_rq->lock);
504 put_task_struct(next_task);
510 * TODO: Currently we just use the second highest prio task on
511 * the queue, and stop when it can't migrate (or there's
512 * no more RT tasks). There may be a case where a lower
513 * priority RT task has a different affinity than the
514 * higher RT task. In this case the lower RT task could
515 * possibly be able to migrate where as the higher priority
516 * RT task could not. We currently ignore this issue.
517 * Enhancements are welcome!
519 static void push_rt_tasks(struct rq *rq)
521 /* push_rt_task will return true if it moved an RT */
522 while (push_rt_task(rq))
526 static int pull_rt_task(struct rq *this_rq)
528 struct task_struct *next;
529 struct task_struct *p;
531 cpumask_t *rto_cpumask;
532 int this_cpu = this_rq->cpu;
536 assert_spin_locked(&this_rq->lock);
539 * If cpusets are used, and we have overlapping
540 * run queue cpusets, then this algorithm may not catch all.
541 * This is just the price you pay on trying to keep
542 * dirtying caches down on large SMP machines.
544 if (likely(!rt_overloaded()))
547 next = pick_next_task_rt(this_rq);
549 rto_cpumask = rt_overload();
551 for_each_cpu_mask(cpu, *rto_cpumask) {
555 src_rq = cpu_rq(cpu);
556 if (unlikely(src_rq->rt.rt_nr_running <= 1)) {
558 * It is possible that overlapping cpusets
559 * will miss clearing a non overloaded runqueue.
562 if (double_lock_balance(this_rq, src_rq)) {
563 /* unlocked our runqueue lock */
564 struct task_struct *old_next = next;
565 next = pick_next_task_rt(this_rq);
566 if (next != old_next)
569 if (likely(src_rq->rt.rt_nr_running <= 1))
571 * Small chance that this_rq->curr changed
572 * but it's really harmless here.
574 rt_clear_overload(this_rq);
577 * Heh, the src_rq is now overloaded, since
578 * we already have the src_rq lock, go straight
579 * to pulling tasks from it.
582 spin_unlock(&src_rq->lock);
587 * We can potentially drop this_rq's lock in
588 * double_lock_balance, and another CPU could
589 * steal our next task - hence we must cause
590 * the caller to recalculate the next task
593 if (double_lock_balance(this_rq, src_rq)) {
594 struct task_struct *old_next = next;
595 next = pick_next_task_rt(this_rq);
596 if (next != old_next)
601 * Are there still pullable RT tasks?
603 if (src_rq->rt.rt_nr_running <= 1) {
604 spin_unlock(&src_rq->lock);
609 p = pick_next_highest_task_rt(src_rq, this_cpu);
612 * Do we have an RT task that preempts
613 * the to-be-scheduled task?
615 if (p && (!next || (p->prio < next->prio))) {
616 WARN_ON(p == src_rq->curr);
617 WARN_ON(!p->se.on_rq);
620 * There's a chance that p is higher in priority
621 * than what's currently running on its cpu.
622 * This is just that p is wakeing up and hasn't
623 * had a chance to schedule. We only pull
624 * p if it is lower in priority than the
625 * current task on the run queue or
626 * this_rq next task is lower in prio than
627 * the current task on that rq.
629 if (p->prio < src_rq->curr->prio ||
630 (next && next->prio < src_rq->curr->prio))
635 deactivate_task(src_rq, p, 0);
636 set_task_cpu(p, this_cpu);
637 activate_task(this_rq, p, 0);
639 * We continue with the search, just in
640 * case there's an even higher prio task
641 * in another runqueue. (low likelyhood
646 * Update next so that we won't pick a task
647 * on another cpu with a priority lower (or equal)
648 * than the one we just picked.
654 spin_unlock(&src_rq->lock);
660 static void schedule_balance_rt(struct rq *rq,
661 struct task_struct *prev)
663 /* Try to pull RT tasks here if we lower this rq's prio */
664 if (unlikely(rt_task(prev)) &&
665 rq->rt.highest_prio > prev->prio)
669 static void schedule_tail_balance_rt(struct rq *rq)
672 * If we have more than one rt_task queued, then
673 * see if we can push the other rt_tasks off to other CPUS.
674 * Note we may release the rq lock, and since
675 * the lock was owned by prev, we need to release it
676 * first via finish_lock_switch and then reaquire it here.
678 if (unlikely(rq->rt.rt_nr_running > 1)) {
679 spin_lock_irq(&rq->lock);
681 spin_unlock_irq(&rq->lock);
686 static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
688 if (unlikely(rt_task(p)) &&
689 !task_running(rq, p) &&
690 (p->prio >= rq->curr->prio))
695 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
696 unsigned long max_load_move,
697 struct sched_domain *sd, enum cpu_idle_type idle,
698 int *all_pinned, int *this_best_prio)
700 /* don't touch RT tasks */
705 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
706 struct sched_domain *sd, enum cpu_idle_type idle)
708 /* don't touch RT tasks */
711 static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
713 int weight = cpus_weight(*new_mask);
718 * Update the migration status of the RQ if we have an RT task
719 * which is running AND changing its weight value.
721 if (p->se.on_rq && (weight != p->nr_cpus_allowed)) {
722 struct rq *rq = task_rq(p);
724 if ((p->nr_cpus_allowed <= 1) && (weight > 1))
725 rq->rt.rt_nr_migratory++;
726 else if((p->nr_cpus_allowed > 1) && (weight <= 1)) {
727 BUG_ON(!rq->rt.rt_nr_migratory);
728 rq->rt.rt_nr_migratory--;
731 update_rt_migration(rq);
734 p->cpus_allowed = *new_mask;
735 p->nr_cpus_allowed = weight;
737 #else /* CONFIG_SMP */
738 # define schedule_tail_balance_rt(rq) do { } while (0)
739 # define schedule_balance_rt(rq, prev) do { } while (0)
740 # define wakeup_balance_rt(rq, p) do { } while (0)
741 #endif /* CONFIG_SMP */
743 static void task_tick_rt(struct rq *rq, struct task_struct *p)
748 * RR tasks need a special form of timeslice management.
749 * FIFO tasks have no timeslices.
751 if (p->policy != SCHED_RR)
757 p->time_slice = DEF_TIMESLICE;
760 * Requeue to the end of queue if we are not the only element
763 if (p->run_list.prev != p->run_list.next) {
764 requeue_task_rt(rq, p);
765 set_tsk_need_resched(p);
769 static void set_curr_task_rt(struct rq *rq)
771 struct task_struct *p = rq->curr;
773 p->se.exec_start = rq->clock;
776 const struct sched_class rt_sched_class = {
777 .next = &fair_sched_class,
778 .enqueue_task = enqueue_task_rt,
779 .dequeue_task = dequeue_task_rt,
780 .yield_task = yield_task_rt,
782 .select_task_rq = select_task_rq_rt,
783 #endif /* CONFIG_SMP */
785 .check_preempt_curr = check_preempt_curr_rt,
787 .pick_next_task = pick_next_task_rt,
788 .put_prev_task = put_prev_task_rt,
791 .load_balance = load_balance_rt,
792 .move_one_task = move_one_task_rt,
793 .set_cpus_allowed = set_cpus_allowed_rt,
796 .set_curr_task = set_curr_task_rt,
797 .task_tick = task_tick_rt,