2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
24 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
26 #ifdef CONFIG_SCHED_DEBUG
27 # define const_debug __read_mostly
29 # define const_debug static const
33 * Targeted preemption latency for CPU-bound tasks:
34 * (default: 20ms, units: nanoseconds)
36 * NOTE: this latency value is not the same as the concept of
37 * 'timeslice length' - timeslices in CFS are of variable length.
38 * (to see the precise effective timeslice length of your workload,
39 * run vmstat and monitor the context-switches field)
41 * On SMP systems the value of this is multiplied by the log2 of the
42 * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
43 * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
44 * Targeted preemption latency for CPU-bound tasks:
46 const_debug unsigned int sysctl_sched_latency = 20000000ULL;
49 * After fork, child runs first. (default) If set to 0 then
50 * parent will (try to) run first.
52 const_debug unsigned int sysctl_sched_child_runs_first = 1;
55 * Minimal preemption granularity for CPU-bound tasks:
56 * (default: 2 msec, units: nanoseconds)
58 unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;
61 * sys_sched_yield() compat mode
63 * This option switches the agressive yield implementation of the
64 * old scheduler back on.
66 unsigned int __read_mostly sysctl_sched_compat_yield;
69 * SCHED_BATCH wake-up granularity.
70 * (default: 25 msec, units: nanoseconds)
72 * This option delays the preemption effects of decoupled workloads
73 * and reduces their over-scheduling. Synchronous workloads will still
74 * have immediate wakeup/sleep latencies.
76 const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
79 * SCHED_OTHER wake-up granularity.
80 * (default: 1 msec, units: nanoseconds)
82 * This option delays the preemption effects of decoupled workloads
83 * and reduces their over-scheduling. Synchronous workloads will still
84 * have immediate wakeup/sleep latencies.
86 const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
88 unsigned int sysctl_sched_runtime_limit __read_mostly;
91 * Debugging: various feature bits
94 SCHED_FEAT_FAIR_SLEEPERS = 1,
95 SCHED_FEAT_SLEEPER_AVG = 2,
96 SCHED_FEAT_SLEEPER_LOAD_AVG = 4,
97 SCHED_FEAT_PRECISE_CPU_LOAD = 8,
98 SCHED_FEAT_START_DEBIT = 16,
99 SCHED_FEAT_SKIP_INITIAL = 32,
102 const_debug unsigned int sysctl_sched_features =
103 SCHED_FEAT_FAIR_SLEEPERS *1 |
104 SCHED_FEAT_SLEEPER_AVG *0 |
105 SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
106 SCHED_FEAT_PRECISE_CPU_LOAD *1 |
107 SCHED_FEAT_START_DEBIT *1 |
108 SCHED_FEAT_SKIP_INITIAL *0;
110 extern struct sched_class fair_sched_class;
112 /**************************************************************
113 * CFS operations on generic schedulable entities:
116 #ifdef CONFIG_FAIR_GROUP_SCHED
118 /* cpu runqueue to which this cfs_rq is attached */
119 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
124 /* currently running entity (if any) on this cfs_rq */
125 static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
130 /* An entity is a task if it doesn't "own" a runqueue */
131 #define entity_is_task(se) (!se->my_q)
134 set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se)
139 #else /* CONFIG_FAIR_GROUP_SCHED */
141 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
143 return container_of(cfs_rq, struct rq, cfs);
146 static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
148 struct rq *rq = rq_of(cfs_rq);
150 if (unlikely(rq->curr->sched_class != &fair_sched_class))
153 return &rq->curr->se;
156 #define entity_is_task(se) 1
159 set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
161 #endif /* CONFIG_FAIR_GROUP_SCHED */
163 static inline struct task_struct *task_of(struct sched_entity *se)
165 return container_of(se, struct task_struct, se);
169 /**************************************************************
170 * Scheduling class tree data structure manipulation methods:
174 * Enqueue an entity into the rb-tree:
177 __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
179 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
180 struct rb_node *parent = NULL;
181 struct sched_entity *entry;
182 s64 key = se->fair_key;
186 * Find the right place in the rbtree:
190 entry = rb_entry(parent, struct sched_entity, run_node);
192 * We dont care about collisions. Nodes with
193 * the same key stay together.
195 if (key - entry->fair_key < 0) {
196 link = &parent->rb_left;
198 link = &parent->rb_right;
204 * Maintain a cache of leftmost tree entries (it is frequently
208 cfs_rq->rb_leftmost = &se->run_node;
210 rb_link_node(&se->run_node, parent, link);
211 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
212 update_load_add(&cfs_rq->load, se->load.weight);
213 cfs_rq->nr_running++;
216 schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
220 __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
222 if (cfs_rq->rb_leftmost == &se->run_node)
223 cfs_rq->rb_leftmost = rb_next(&se->run_node);
224 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
225 update_load_sub(&cfs_rq->load, se->load.weight);
226 cfs_rq->nr_running--;
229 schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
232 static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
234 return cfs_rq->rb_leftmost;
237 static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
239 return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
242 /**************************************************************
243 * Scheduling class statistics methods:
247 * Calculate the preemption granularity needed to schedule every
248 * runnable task once per sysctl_sched_latency amount of time.
249 * (down to a sensible low limit on granularity)
251 * For example, if there are 2 tasks running and latency is 10 msecs,
252 * we switch tasks every 5 msecs. If we have 3 tasks running, we have
253 * to switch tasks every 3.33 msecs to get a 10 msecs observed latency
254 * for each task. We do finer and finer scheduling up to until we
255 * reach the minimum granularity value.
257 * To achieve this we use the following dynamic-granularity rule:
259 * gran = lat/nr - lat/nr/nr
261 * This comes out of the following equations:
266 * kB2 = kB1 - d + d/nr
269 * Where 'k' is key, 'A' is task A (waiting), 'B' is task B (running),
270 * '1' is start of time, '2' is end of time, 'd' is delay between
271 * 1 and 2 (during which task B was running), 'nr' is number of tasks
272 * running, 'lat' is the the period of each task. ('lat' is the
273 * sched_latency that we aim for.)
276 sched_granularity(struct cfs_rq *cfs_rq)
278 unsigned int gran = sysctl_sched_latency;
279 unsigned int nr = cfs_rq->nr_running;
282 gran = gran/nr - gran/nr/nr;
283 gran = max(gran, sysctl_sched_min_granularity);
290 * We rescale the rescheduling granularity of tasks according to their
291 * nice level, but only linearly, not exponentially:
294 niced_granularity(struct sched_entity *curr, unsigned long granularity)
298 if (likely(curr->load.weight == NICE_0_LOAD))
301 * Positive nice levels get the same granularity as nice-0:
303 if (likely(curr->load.weight < NICE_0_LOAD)) {
304 tmp = curr->load.weight * (u64)granularity;
305 return (long) (tmp >> NICE_0_SHIFT);
308 * Negative nice level tasks get linearly finer
311 tmp = curr->load.inv_weight * (u64)granularity;
314 * It will always fit into 'long':
316 return (long) (tmp >> (WMULT_SHIFT-NICE_0_SHIFT));
320 limit_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se)
322 long limit = sysctl_sched_runtime_limit;
325 * Niced tasks have the same history dynamic range as
328 if (unlikely(se->wait_runtime > limit)) {
329 se->wait_runtime = limit;
330 schedstat_inc(se, wait_runtime_overruns);
331 schedstat_inc(cfs_rq, wait_runtime_overruns);
333 if (unlikely(se->wait_runtime < -limit)) {
334 se->wait_runtime = -limit;
335 schedstat_inc(se, wait_runtime_underruns);
336 schedstat_inc(cfs_rq, wait_runtime_underruns);
341 __add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
343 se->wait_runtime += delta;
344 schedstat_add(se, sum_wait_runtime, delta);
345 limit_wait_runtime(cfs_rq, se);
349 add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
351 schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
352 __add_wait_runtime(cfs_rq, se, delta);
353 schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
357 * Update the current task's runtime statistics. Skip current tasks that
358 * are not in our scheduling class.
361 __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
362 unsigned long delta_exec)
364 unsigned long delta, delta_fair, delta_mine;
365 struct load_weight *lw = &cfs_rq->load;
366 unsigned long load = lw->weight;
368 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
370 curr->sum_exec_runtime += delta_exec;
371 cfs_rq->exec_clock += delta_exec;
376 delta_fair = calc_delta_fair(delta_exec, lw);
377 delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
379 if (cfs_rq->sleeper_bonus > sysctl_sched_min_granularity) {
380 delta = min((u64)delta_mine, cfs_rq->sleeper_bonus);
381 delta = min(delta, (unsigned long)(
382 (long)sysctl_sched_runtime_limit - curr->wait_runtime));
383 cfs_rq->sleeper_bonus -= delta;
387 cfs_rq->fair_clock += delta_fair;
389 * We executed delta_exec amount of time on the CPU,
390 * but we were only entitled to delta_mine amount of
391 * time during that period (if nr_running == 1 then
392 * the two values are equal)
393 * [Note: delta_mine - delta_exec is negative]:
395 add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec);
398 static void update_curr(struct cfs_rq *cfs_rq)
400 struct sched_entity *curr = cfs_rq_curr(cfs_rq);
401 u64 now = rq_of(cfs_rq)->clock;
402 unsigned long delta_exec;
408 * Get the amount of time the current task was running
409 * since the last time we changed load (this cannot
410 * overflow on 32 bits):
412 delta_exec = (unsigned long)(now - curr->exec_start);
414 __update_curr(cfs_rq, curr, delta_exec);
415 curr->exec_start = now;
419 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
421 se->wait_start_fair = cfs_rq->fair_clock;
422 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
426 * We calculate fair deltas here, so protect against the random effects
427 * of a multiplication overflow by capping it to the runtime limit:
429 #if BITS_PER_LONG == 32
430 static inline unsigned long
431 calc_weighted(unsigned long delta, unsigned long weight, int shift)
433 u64 tmp = (u64)delta * weight >> shift;
435 if (unlikely(tmp > sysctl_sched_runtime_limit*2))
436 return sysctl_sched_runtime_limit*2;
440 static inline unsigned long
441 calc_weighted(unsigned long delta, unsigned long weight, int shift)
443 return delta * weight >> shift;
448 * Task is being enqueued - update stats:
450 static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
455 * Are we enqueueing a waiting task? (for current tasks
456 * a dequeue/enqueue event is a NOP)
458 if (se != cfs_rq_curr(cfs_rq))
459 update_stats_wait_start(cfs_rq, se);
463 key = cfs_rq->fair_clock;
466 * Optimize the common nice 0 case:
468 if (likely(se->load.weight == NICE_0_LOAD)) {
469 key -= se->wait_runtime;
473 if (se->wait_runtime < 0) {
474 tmp = -se->wait_runtime;
475 key += (tmp * se->load.inv_weight) >>
476 (WMULT_SHIFT - NICE_0_SHIFT);
478 tmp = se->wait_runtime;
479 key -= (tmp * se->load.inv_weight) >>
480 (WMULT_SHIFT - NICE_0_SHIFT);
488 * Note: must be called with a freshly updated rq->fair_clock.
491 __update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se,
492 unsigned long delta_fair)
494 schedstat_set(se->wait_max, max(se->wait_max,
495 rq_of(cfs_rq)->clock - se->wait_start));
497 if (unlikely(se->load.weight != NICE_0_LOAD))
498 delta_fair = calc_weighted(delta_fair, se->load.weight,
501 add_wait_runtime(cfs_rq, se, delta_fair);
505 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
507 unsigned long delta_fair;
509 if (unlikely(!se->wait_start_fair))
512 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
513 (u64)(cfs_rq->fair_clock - se->wait_start_fair));
515 __update_stats_wait_end(cfs_rq, se, delta_fair);
517 se->wait_start_fair = 0;
518 schedstat_set(se->wait_start, 0);
522 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
526 * Mark the end of the wait period if dequeueing a
529 if (se != cfs_rq_curr(cfs_rq))
530 update_stats_wait_end(cfs_rq, se);
534 * We are picking a new current task - update its stats:
537 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
540 * We are starting a new run period:
542 se->exec_start = rq_of(cfs_rq)->clock;
546 * We are descheduling a task - update its stats:
549 update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
554 /**************************************************
555 * Scheduling class queueing methods:
558 static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se,
559 unsigned long delta_fair)
561 unsigned long load = cfs_rq->load.weight;
565 * Do not boost sleepers if there's too much bonus 'in flight'
568 if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
571 if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
572 load = rq_of(cfs_rq)->cpu_load[2];
575 * Fix up delta_fair with the effect of us running
576 * during the whole sleep period:
578 if (sysctl_sched_features & SCHED_FEAT_SLEEPER_AVG)
579 delta_fair = div64_likely32((u64)delta_fair * load,
580 load + se->load.weight);
582 if (unlikely(se->load.weight != NICE_0_LOAD))
583 delta_fair = calc_weighted(delta_fair, se->load.weight,
586 prev_runtime = se->wait_runtime;
587 __add_wait_runtime(cfs_rq, se, delta_fair);
588 delta_fair = se->wait_runtime - prev_runtime;
591 * Track the amount of bonus we've given to sleepers:
593 cfs_rq->sleeper_bonus += delta_fair;
596 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
598 struct task_struct *tsk = task_of(se);
599 unsigned long delta_fair;
601 if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) ||
602 !(sysctl_sched_features & SCHED_FEAT_FAIR_SLEEPERS))
605 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
606 (u64)(cfs_rq->fair_clock - se->sleep_start_fair));
608 __enqueue_sleeper(cfs_rq, se, delta_fair);
610 se->sleep_start_fair = 0;
612 #ifdef CONFIG_SCHEDSTATS
613 if (se->sleep_start) {
614 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
619 if (unlikely(delta > se->sleep_max))
620 se->sleep_max = delta;
623 se->sum_sleep_runtime += delta;
625 if (se->block_start) {
626 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
631 if (unlikely(delta > se->block_max))
632 se->block_max = delta;
635 se->sum_sleep_runtime += delta;
638 * Blocking time is in units of nanosecs, so shift by 20 to
639 * get a milliseconds-range estimation of the amount of
640 * time that the task spent sleeping:
642 if (unlikely(prof_on == SLEEP_PROFILING)) {
643 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
651 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
654 * Update the fair clock.
659 enqueue_sleeper(cfs_rq, se);
661 update_stats_enqueue(cfs_rq, se);
662 __enqueue_entity(cfs_rq, se);
666 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
668 update_stats_dequeue(cfs_rq, se);
670 se->sleep_start_fair = cfs_rq->fair_clock;
671 #ifdef CONFIG_SCHEDSTATS
672 if (entity_is_task(se)) {
673 struct task_struct *tsk = task_of(se);
675 if (tsk->state & TASK_INTERRUPTIBLE)
676 se->sleep_start = rq_of(cfs_rq)->clock;
677 if (tsk->state & TASK_UNINTERRUPTIBLE)
678 se->block_start = rq_of(cfs_rq)->clock;
682 __dequeue_entity(cfs_rq, se);
686 * Preempt the current task with a newly woken task if needed:
689 __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
690 struct sched_entity *curr, unsigned long granularity)
692 s64 __delta = curr->fair_key - se->fair_key;
693 unsigned long ideal_runtime, delta_exec;
696 * ideal_runtime is compared against sum_exec_runtime, which is
697 * walltime, hence do not scale.
699 ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running,
700 (unsigned long)sysctl_sched_min_granularity);
703 * If we executed more than what the latency constraint suggests,
704 * reduce the rescheduling granularity. This way the total latency
705 * of how much a task is not scheduled converges to
706 * sysctl_sched_latency:
708 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
709 if (delta_exec > ideal_runtime)
713 * Take scheduling granularity into account - do not
714 * preempt the current task unless the best task has
715 * a larger than sched_granularity fairness advantage:
717 * scale granularity as key space is in fair_clock.
719 if (__delta > niced_granularity(curr, granularity))
720 resched_task(rq_of(cfs_rq)->curr);
724 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
727 * Any task has to be enqueued before it get to execute on
728 * a CPU. So account for the time it spent waiting on the
729 * runqueue. (note, here we rely on pick_next_task() having
730 * done a put_prev_task_fair() shortly before this, which
731 * updated rq->fair_clock - used by update_stats_wait_end())
733 update_stats_wait_end(cfs_rq, se);
734 update_stats_curr_start(cfs_rq, se);
735 set_cfs_rq_curr(cfs_rq, se);
736 #ifdef CONFIG_SCHEDSTATS
738 * Track our maximum slice length, if the CPU's load is at
739 * least twice that of our own weight (i.e. dont track it
740 * when there are only lesser-weight tasks around):
742 if (rq_of(cfs_rq)->ls.load.weight >= 2*se->load.weight) {
743 se->slice_max = max(se->slice_max,
744 se->sum_exec_runtime - se->prev_sum_exec_runtime);
747 se->prev_sum_exec_runtime = se->sum_exec_runtime;
750 static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
752 struct sched_entity *se = __pick_next_entity(cfs_rq);
754 set_next_entity(cfs_rq, se);
759 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
762 * If still on the runqueue then deactivate_task()
763 * was not called and update_curr() has to be done:
768 update_stats_curr_end(cfs_rq, prev);
771 update_stats_wait_start(cfs_rq, prev);
772 set_cfs_rq_curr(cfs_rq, NULL);
775 static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
777 struct sched_entity *next;
780 * Dequeue and enqueue the task to update its
781 * position within the tree:
783 dequeue_entity(cfs_rq, curr, 0);
784 enqueue_entity(cfs_rq, curr, 0);
787 * Reschedule if another task tops the current one.
789 next = __pick_next_entity(cfs_rq);
793 __check_preempt_curr_fair(cfs_rq, next, curr,
794 sched_granularity(cfs_rq));
797 /**************************************************
798 * CFS operations on tasks:
801 #ifdef CONFIG_FAIR_GROUP_SCHED
803 /* Walk up scheduling entities hierarchy */
804 #define for_each_sched_entity(se) \
805 for (; se; se = se->parent)
807 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
812 /* runqueue on which this entity is (to be) queued */
813 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
818 /* runqueue "owned" by this group */
819 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
824 /* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
825 * another cpu ('this_cpu')
827 static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
829 /* A later patch will take group into account */
830 return &cpu_rq(this_cpu)->cfs;
833 /* Iterate thr' all leaf cfs_rq's on a runqueue */
834 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
835 list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
837 /* Do the two (enqueued) tasks belong to the same group ? */
838 static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
840 if (curr->se.cfs_rq == p->se.cfs_rq)
846 #else /* CONFIG_FAIR_GROUP_SCHED */
848 #define for_each_sched_entity(se) \
849 for (; se; se = NULL)
851 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
853 return &task_rq(p)->cfs;
856 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
858 struct task_struct *p = task_of(se);
859 struct rq *rq = task_rq(p);
864 /* runqueue "owned" by this group */
865 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
870 static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
872 return &cpu_rq(this_cpu)->cfs;
875 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
876 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
878 static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
883 #endif /* CONFIG_FAIR_GROUP_SCHED */
886 * The enqueue_task method is called before nr_running is
887 * increased. Here we update the fair scheduling stats and
888 * then put the task into the rbtree:
890 static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
892 struct cfs_rq *cfs_rq;
893 struct sched_entity *se = &p->se;
895 for_each_sched_entity(se) {
898 cfs_rq = cfs_rq_of(se);
899 enqueue_entity(cfs_rq, se, wakeup);
904 * The dequeue_task method is called before nr_running is
905 * decreased. We remove the task from the rbtree and
906 * update the fair scheduling stats:
908 static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
910 struct cfs_rq *cfs_rq;
911 struct sched_entity *se = &p->se;
913 for_each_sched_entity(se) {
914 cfs_rq = cfs_rq_of(se);
915 dequeue_entity(cfs_rq, se, sleep);
916 /* Don't dequeue parent if it has other entities besides us */
917 if (cfs_rq->load.weight)
923 * sched_yield() support is very simple - we dequeue and enqueue.
925 * If compat_yield is turned on then we requeue to the end of the tree.
927 static void yield_task_fair(struct rq *rq, struct task_struct *p)
929 struct cfs_rq *cfs_rq = task_cfs_rq(p);
930 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
931 struct sched_entity *rightmost, *se = &p->se;
932 struct rb_node *parent;
935 * Are we the only task in the tree?
937 if (unlikely(cfs_rq->nr_running == 1))
940 if (likely(!sysctl_sched_compat_yield)) {
941 __update_rq_clock(rq);
943 * Dequeue and enqueue the task to update its
944 * position within the tree:
946 dequeue_entity(cfs_rq, &p->se, 0);
947 enqueue_entity(cfs_rq, &p->se, 0);
952 * Find the rightmost entry in the rbtree:
956 link = &parent->rb_right;
959 rightmost = rb_entry(parent, struct sched_entity, run_node);
961 * Already in the rightmost position?
963 if (unlikely(rightmost == se))
967 * Minimally necessary key value to be last in the tree:
969 se->fair_key = rightmost->fair_key + 1;
971 if (cfs_rq->rb_leftmost == &se->run_node)
972 cfs_rq->rb_leftmost = rb_next(&se->run_node);
974 * Relink the task to the rightmost position:
976 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
977 rb_link_node(&se->run_node, parent, link);
978 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
982 * Preempt the current task with a newly woken task if needed:
984 static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
986 struct task_struct *curr = rq->curr;
987 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
990 if (unlikely(rt_prio(p->prio))) {
997 gran = sysctl_sched_wakeup_granularity;
999 * Batch tasks prefer throughput over latency:
1001 if (unlikely(p->policy == SCHED_BATCH))
1002 gran = sysctl_sched_batch_wakeup_granularity;
1004 if (is_same_group(curr, p))
1005 __check_preempt_curr_fair(cfs_rq, &p->se, &curr->se, gran);
1008 static struct task_struct *pick_next_task_fair(struct rq *rq)
1010 struct cfs_rq *cfs_rq = &rq->cfs;
1011 struct sched_entity *se;
1013 if (unlikely(!cfs_rq->nr_running))
1017 se = pick_next_entity(cfs_rq);
1018 cfs_rq = group_cfs_rq(se);
1025 * Account for a descheduled task:
1027 static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1029 struct sched_entity *se = &prev->se;
1030 struct cfs_rq *cfs_rq;
1032 for_each_sched_entity(se) {
1033 cfs_rq = cfs_rq_of(se);
1034 put_prev_entity(cfs_rq, se);
1038 /**************************************************
1039 * Fair scheduling class load-balancing methods:
1043 * Load-balancing iterator. Note: while the runqueue stays locked
1044 * during the whole iteration, the current task might be
1045 * dequeued so the iterator has to be dequeue-safe. Here we
1046 * achieve that by always pre-iterating before returning
1049 static inline struct task_struct *
1050 __load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
1052 struct task_struct *p;
1057 p = rb_entry(curr, struct task_struct, se.run_node);
1058 cfs_rq->rb_load_balance_curr = rb_next(curr);
1063 static struct task_struct *load_balance_start_fair(void *arg)
1065 struct cfs_rq *cfs_rq = arg;
1067 return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
1070 static struct task_struct *load_balance_next_fair(void *arg)
1072 struct cfs_rq *cfs_rq = arg;
1074 return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
1077 #ifdef CONFIG_FAIR_GROUP_SCHED
1078 static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
1080 struct sched_entity *curr;
1081 struct task_struct *p;
1083 if (!cfs_rq->nr_running)
1086 curr = __pick_next_entity(cfs_rq);
1093 static unsigned long
1094 load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1095 unsigned long max_nr_move, unsigned long max_load_move,
1096 struct sched_domain *sd, enum cpu_idle_type idle,
1097 int *all_pinned, int *this_best_prio)
1099 struct cfs_rq *busy_cfs_rq;
1100 unsigned long load_moved, total_nr_moved = 0, nr_moved;
1101 long rem_load_move = max_load_move;
1102 struct rq_iterator cfs_rq_iterator;
1104 cfs_rq_iterator.start = load_balance_start_fair;
1105 cfs_rq_iterator.next = load_balance_next_fair;
1107 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1108 #ifdef CONFIG_FAIR_GROUP_SCHED
1109 struct cfs_rq *this_cfs_rq;
1111 unsigned long maxload;
1113 this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
1115 imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
1116 /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
1120 /* Don't pull more than imbalance/2 */
1122 maxload = min(rem_load_move, imbalance);
1124 *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
1126 # define maxload rem_load_move
1128 /* pass busy_cfs_rq argument into
1129 * load_balance_[start|next]_fair iterators
1131 cfs_rq_iterator.arg = busy_cfs_rq;
1132 nr_moved = balance_tasks(this_rq, this_cpu, busiest,
1133 max_nr_move, maxload, sd, idle, all_pinned,
1134 &load_moved, this_best_prio, &cfs_rq_iterator);
1136 total_nr_moved += nr_moved;
1137 max_nr_move -= nr_moved;
1138 rem_load_move -= load_moved;
1140 if (max_nr_move <= 0 || rem_load_move <= 0)
1144 return max_load_move - rem_load_move;
1148 * scheduler tick hitting a task of our scheduling class:
1150 static void task_tick_fair(struct rq *rq, struct task_struct *curr)
1152 struct cfs_rq *cfs_rq;
1153 struct sched_entity *se = &curr->se;
1155 for_each_sched_entity(se) {
1156 cfs_rq = cfs_rq_of(se);
1157 entity_tick(cfs_rq, se);
1162 * Share the fairness runtime between parent and child, thus the
1163 * total amount of pressure for CPU stays equal - new tasks
1164 * get a chance to run but frequent forkers are not allowed to
1165 * monopolize the CPU. Note: the parent runqueue is locked,
1166 * the child is not running yet.
1168 static void task_new_fair(struct rq *rq, struct task_struct *p)
1170 struct cfs_rq *cfs_rq = task_cfs_rq(p);
1171 struct sched_entity *se = &p->se, *curr = cfs_rq_curr(cfs_rq);
1173 sched_info_queued(p);
1175 update_curr(cfs_rq);
1176 update_stats_enqueue(cfs_rq, se);
1178 * Child runs first: we let it run before the parent
1179 * until it reschedules once. We set up the key so that
1180 * it will preempt the parent:
1182 se->fair_key = curr->fair_key -
1183 niced_granularity(curr, sched_granularity(cfs_rq)) - 1;
1185 * The first wait is dominated by the child-runs-first logic,
1186 * so do not credit it with that waiting time yet:
1188 if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL)
1189 se->wait_start_fair = 0;
1192 * The statistical average of wait_runtime is about
1193 * -granularity/2, so initialize the task with that:
1195 if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
1196 se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
1198 __enqueue_entity(cfs_rq, se);
1199 resched_task(rq->curr);
1202 #ifdef CONFIG_FAIR_GROUP_SCHED
1203 /* Account for a task changing its policy or group.
1205 * This routine is mostly called to set cfs_rq->curr field when a task
1206 * migrates between groups/classes.
1208 static void set_curr_task_fair(struct rq *rq)
1210 struct sched_entity *se = &rq->curr->se;
1212 for_each_sched_entity(se)
1213 set_next_entity(cfs_rq_of(se), se);
1216 static void set_curr_task_fair(struct rq *rq)
1222 * All the scheduling class methods:
1224 struct sched_class fair_sched_class __read_mostly = {
1225 .enqueue_task = enqueue_task_fair,
1226 .dequeue_task = dequeue_task_fair,
1227 .yield_task = yield_task_fair,
1229 .check_preempt_curr = check_preempt_curr_fair,
1231 .pick_next_task = pick_next_task_fair,
1232 .put_prev_task = put_prev_task_fair,
1234 .load_balance = load_balance_fair,
1236 .set_curr_task = set_curr_task_fair,
1237 .task_tick = task_tick_fair,
1238 .task_new = task_new_fair,
1241 #ifdef CONFIG_SCHED_DEBUG
1242 static void print_cfs_stats(struct seq_file *m, int cpu)
1244 struct cfs_rq *cfs_rq;
1246 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
1247 print_cfs_rq(m, cpu, cfs_rq);