2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
7 * Update the current task's runtime statistics. Skip current tasks that
8 * are not in our scheduling class.
10 static inline void update_curr_rt(struct rq *rq)
12 struct task_struct *curr = rq->curr;
15 if (!task_has_rt_policy(curr))
18 delta_exec = rq->clock - curr->se.exec_start;
19 if (unlikely((s64)delta_exec < 0))
22 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
24 curr->se.sum_exec_runtime += delta_exec;
25 curr->se.exec_start = rq->clock;
28 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
30 struct rt_prio_array *array = &rq->rt.active;
32 list_add_tail(&p->run_list, array->queue + p->prio);
33 __set_bit(p->prio, array->bitmap);
37 * Adding/removing a task to/from a priority array:
40 dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep, u64 now)
42 struct rt_prio_array *array = &rq->rt.active;
46 list_del(&p->run_list);
47 if (list_empty(array->queue + p->prio))
48 __clear_bit(p->prio, array->bitmap);
52 * Put task to the end of the run list without the overhead of dequeue
53 * followed by enqueue.
55 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
57 struct rt_prio_array *array = &rq->rt.active;
59 list_move_tail(&p->run_list, array->queue + p->prio);
63 yield_task_rt(struct rq *rq, struct task_struct *p)
65 requeue_task_rt(rq, p);
69 * Preempt the current task with a newly woken task if needed:
71 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
73 if (p->prio < rq->curr->prio)
74 resched_task(rq->curr);
77 static struct task_struct *pick_next_task_rt(struct rq *rq, u64 now)
79 struct rt_prio_array *array = &rq->rt.active;
80 struct task_struct *next;
81 struct list_head *queue;
84 idx = sched_find_first_bit(array->bitmap);
85 if (idx >= MAX_RT_PRIO)
88 queue = array->queue + idx;
89 next = list_entry(queue->next, struct task_struct, run_list);
91 next->se.exec_start = rq->clock;
96 static void put_prev_task_rt(struct rq *rq, struct task_struct *p, u64 now)
103 * Load-balancing iterator. Note: while the runqueue stays locked
104 * during the whole iteration, the current task might be
105 * dequeued so the iterator has to be dequeue-safe. Here we
106 * achieve that by always pre-iterating before returning
109 static struct task_struct *load_balance_start_rt(void *arg)
112 struct rt_prio_array *array = &rq->rt.active;
113 struct list_head *head, *curr;
114 struct task_struct *p;
117 idx = sched_find_first_bit(array->bitmap);
118 if (idx >= MAX_RT_PRIO)
121 head = array->queue + idx;
124 p = list_entry(curr, struct task_struct, run_list);
128 rq->rt.rt_load_balance_idx = idx;
129 rq->rt.rt_load_balance_head = head;
130 rq->rt.rt_load_balance_curr = curr;
135 static struct task_struct *load_balance_next_rt(void *arg)
138 struct rt_prio_array *array = &rq->rt.active;
139 struct list_head *head, *curr;
140 struct task_struct *p;
143 idx = rq->rt.rt_load_balance_idx;
144 head = rq->rt.rt_load_balance_head;
145 curr = rq->rt.rt_load_balance_curr;
148 * If we arrived back to the head again then
149 * iterate to the next queue (if any):
151 if (unlikely(head == curr)) {
152 int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
154 if (next_idx >= MAX_RT_PRIO)
158 head = array->queue + idx;
161 rq->rt.rt_load_balance_idx = idx;
162 rq->rt.rt_load_balance_head = head;
165 p = list_entry(curr, struct task_struct, run_list);
169 rq->rt.rt_load_balance_curr = curr;
175 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
176 unsigned long max_nr_move, unsigned long max_load_move,
177 struct sched_domain *sd, enum cpu_idle_type idle,
178 int *all_pinned, int *this_best_prio)
181 struct rq_iterator rt_rq_iterator;
182 unsigned long load_moved;
184 rt_rq_iterator.start = load_balance_start_rt;
185 rt_rq_iterator.next = load_balance_next_rt;
186 /* pass 'busiest' rq argument into
187 * load_balance_[start|next]_rt iterators
189 rt_rq_iterator.arg = busiest;
191 nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move,
192 max_load_move, sd, idle, all_pinned, &load_moved,
193 this_best_prio, &rt_rq_iterator);
198 static void task_tick_rt(struct rq *rq, struct task_struct *p)
201 * RR tasks need a special form of timeslice management.
202 * FIFO tasks have no timeslices.
204 if (p->policy != SCHED_RR)
210 p->time_slice = static_prio_timeslice(p->static_prio);
211 set_tsk_need_resched(p);
213 /* put it at the end of the queue: */
214 requeue_task_rt(rq, p);
217 static struct sched_class rt_sched_class __read_mostly = {
218 .enqueue_task = enqueue_task_rt,
219 .dequeue_task = dequeue_task_rt,
220 .yield_task = yield_task_rt,
222 .check_preempt_curr = check_preempt_curr_rt,
224 .pick_next_task = pick_next_task_rt,
225 .put_prev_task = put_prev_task_rt,
227 .load_balance = load_balance_rt,
229 .task_tick = task_tick_rt,