* (default: 20ms, units: nanoseconds)
*
* NOTE: this latency value is not the same as the concept of
- * 'timeslice length' - timeslices in CFS are of variable length.
- * (to see the precise effective timeslice length of your workload,
- * run vmstat and monitor the context-switches field)
+ * 'timeslice length' - timeslices in CFS are of variable length
+ * and have no persistent notion like in traditional, time-slice
+ * based scheduling concepts.
*
- * On SMP systems the value of this is multiplied by the log2 of the
- * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
- * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
- * Targeted preemption latency for CPU-bound tasks:
+ * (to see the precise effective timeslice length of your workload,
+ * run vmstat and monitor the context-switches (cs) field)
*/
const_debug unsigned int sysctl_sched_latency = 20000000ULL;
* Minimal preemption granularity for CPU-bound tasks:
* (default: 2 msec, units: nanoseconds)
*/
-unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;
+const_debug unsigned int sysctl_sched_nr_latency = 20;
/*
* sys_sched_yield() compat mode
/*
* SCHED_BATCH wake-up granularity.
- * (default: 25 msec, units: nanoseconds)
+ * (default: 10 msec, units: nanoseconds)
*
* This option delays the preemption effects of decoupled workloads
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
*/
-const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
+const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
/*
* SCHED_OTHER wake-up granularity.
- * (default: 1 msec, units: nanoseconds)
+ * (default: 10 msec, units: nanoseconds)
*
* This option delays the preemption effects of decoupled workloads
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
*/
-const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL;
-
-unsigned int sysctl_sched_runtime_limit __read_mostly;
+const_debug unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
-extern struct sched_class fair_sched_class;
+const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
/**************************************************************
* CFS operations on generic schedulable entities:
* Scheduling class tree data structure manipulation methods:
*/
-static inline u64
-max_vruntime(u64 min_vruntime, u64 vruntime)
+static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
{
- if ((vruntime > min_vruntime) ||
- (min_vruntime > (1ULL << 61) && vruntime < (1ULL << 50)))
+ s64 delta = (s64)(vruntime - min_vruntime);
+ if (delta > 0)
min_vruntime = vruntime;
return min_vruntime;
}
-static inline void
-set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost)
+static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
{
- struct sched_entity *se;
+ s64 delta = (s64)(vruntime - min_vruntime);
+ if (delta < 0)
+ min_vruntime = vruntime;
- cfs_rq->rb_leftmost = leftmost;
- if (leftmost)
- se = rb_entry(leftmost, struct sched_entity, run_node);
+ return min_vruntime;
}
-static inline s64
-entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
return se->vruntime - cfs_rq->min_vruntime;
}
/*
* Enqueue an entity into the rb-tree:
*/
-static void
-__enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
struct rb_node *parent = NULL;
* used):
*/
if (leftmost)
- set_leftmost(cfs_rq, &se->run_node);
+ cfs_rq->rb_leftmost = &se->run_node;
rb_link_node(&se->run_node, parent, link);
rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
}
-static void
-__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (cfs_rq->rb_leftmost == &se->run_node)
- set_leftmost(cfs_rq, rb_next(&se->run_node));
+ cfs_rq->rb_leftmost = rb_next(&se->run_node);
rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
}
* Scheduling class statistics methods:
*/
+
+/*
+ * The idea is to set a period in which each task runs once.
+ *
+ * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
+ * this period because otherwise the slices get too small.
+ *
+ * p = (nr <= nl) ? l : l*nr/nl
+ */
static u64 __sched_period(unsigned long nr_running)
{
u64 period = sysctl_sched_latency;
- unsigned long nr_latency =
- sysctl_sched_latency / sysctl_sched_min_granularity;
+ unsigned long nr_latency = sysctl_sched_nr_latency;
if (unlikely(nr_running > nr_latency)) {
period *= nr_running;
return period;
}
+/*
+ * We calculate the wall-time slice from the period by taking a part
+ * proportional to the weight.
+ *
+ * s = p*w/rw
+ */
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- u64 period = __sched_period(cfs_rq->nr_running);
+ u64 slice = __sched_period(cfs_rq->nr_running);
- period *= se->load.weight;
- do_div(period, cfs_rq->load.weight);
+ slice *= se->load.weight;
+ do_div(slice, cfs_rq->load.weight);
- return period;
+ return slice;
+}
+
+/*
+ * We calculate the vruntime slice.
+ *
+ * vs = s/w = p/rw
+ */
+static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
+{
+ u64 vslice = __sched_period(nr_running);
+
+ do_div(vslice, rq_weight);
+
+ return vslice;
+}
+
+static u64 sched_vslice(struct cfs_rq *cfs_rq)
+{
+ return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
+}
+
+static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ return __sched_vslice(cfs_rq->load.weight + se->load.weight,
+ cfs_rq->nr_running + 1);
}
/*
unsigned long delta_exec)
{
unsigned long delta_exec_weighted;
- u64 next_vruntime, min_vruntime;
+ u64 vruntime;
schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
* value tracking the leftmost vruntime in the tree.
*/
if (first_fair(cfs_rq)) {
- next_vruntime = __pick_next_entity(cfs_rq)->vruntime;
-
- /* min_vruntime() := !max_vruntime() */
- min_vruntime = max_vruntime(curr->vruntime, next_vruntime);
- if (min_vruntime == next_vruntime)
- min_vruntime = curr->vruntime;
- else
- min_vruntime = next_vruntime;
+ vruntime = min_vruntime(curr->vruntime,
+ __pick_next_entity(cfs_rq)->vruntime);
} else
- min_vruntime = curr->vruntime;
+ vruntime = curr->vruntime;
cfs_rq->min_vruntime =
- max_vruntime(cfs_rq->min_vruntime, min_vruntime);
+ max_vruntime(cfs_rq->min_vruntime, vruntime);
}
static void update_curr(struct cfs_rq *cfs_rq)
schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
}
-static inline unsigned long
-calc_weighted(unsigned long delta, struct sched_entity *se)
-{
- unsigned long weight = se->load.weight;
-
- if (unlikely(weight != NICE_0_LOAD))
- return (u64)delta * se->load.weight >> NICE_0_SHIFT;
- else
- return delta;
-}
-
/*
* Task is being enqueued - update stats:
*/
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- update_curr(cfs_rq);
/*
* Mark the end of the wait period if dequeueing a
* waiting task:
se->exec_start = rq_of(cfs_rq)->clock;
}
-/*
- * We are descheduling a task - update its stats:
- */
-static inline void
-update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- se->exec_start = 0;
-}
-
/**************************************************
* Scheduling class queueing methods:
*/
#endif
}
+static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+#ifdef CONFIG_SCHED_DEBUG
+ s64 d = se->vruntime - cfs_rq->min_vruntime;
+
+ if (d < 0)
+ d = -d;
+
+ if (d > 3*sysctl_sched_latency)
+ schedstat_inc(cfs_rq, nr_spread_over);
+#endif
+}
+
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
- u64 min_runtime, latency;
+ u64 vruntime;
- min_runtime = cfs_rq->min_vruntime;
+ vruntime = cfs_rq->min_vruntime;
- if (sched_feat(USE_TREE_AVG)) {
+ if (sched_feat(TREE_AVG)) {
struct sched_entity *last = __pick_last_entity(cfs_rq);
if (last) {
- min_runtime = __pick_next_entity(cfs_rq)->vruntime;
- min_runtime += last->vruntime;
- min_runtime >>= 1;
+ vruntime += last->vruntime;
+ vruntime >>= 1;
}
- } else if (sched_feat(APPROX_AVG))
- min_runtime += sysctl_sched_latency/2;
+ } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
+ vruntime += sched_vslice(cfs_rq)/2;
if (initial && sched_feat(START_DEBIT))
- min_runtime += sched_slice(cfs_rq, se);
-
- if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) {
- latency = sysctl_sched_latency;
- if (min_runtime > latency)
- min_runtime -= latency;
- else
- min_runtime = 0;
+ vruntime += sched_vslice_add(cfs_rq, se);
+
+ if (!initial) {
+ if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) &&
+ task_of(se)->policy != SCHED_BATCH)
+ vruntime -= sysctl_sched_latency;
+
+ vruntime = max_t(s64, vruntime, se->vruntime);
}
- se->vruntime = max(se->vruntime, min_runtime);
+ se->vruntime = vruntime;
+
}
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
{
/*
- * Update the fair clock.
+ * Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
}
update_stats_enqueue(cfs_rq, se);
+ check_spread(cfs_rq, se);
if (se != cfs_rq->curr)
__enqueue_entity(cfs_rq, se);
account_entity_enqueue(cfs_rq, se);
static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
{
+ /*
+ * Update run-time statistics of the 'current'.
+ */
+ update_curr(cfs_rq);
+
update_stats_dequeue(cfs_rq, se);
-#ifdef CONFIG_SCHEDSTATS
if (sleep) {
+ se->peer_preempt = 0;
+#ifdef CONFIG_SCHEDSTATS
if (entity_is_task(se)) {
struct task_struct *tsk = task_of(se);
if (tsk->state & TASK_UNINTERRUPTIBLE)
se->block_start = rq_of(cfs_rq)->clock;
}
- }
#endif
+ }
+
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
account_entity_dequeue(cfs_rq, se);
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
- if (delta_exec > ideal_runtime)
+ if (delta_exec > ideal_runtime ||
+ (sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt))
resched_task(rq_of(cfs_rq)->curr);
+ curr->peer_preempt = 0;
}
static void
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
{
- struct sched_entity *se = __pick_next_entity(cfs_rq);
+ struct sched_entity *se = NULL;
- set_next_entity(cfs_rq, se);
+ if (first_fair(cfs_rq)) {
+ se = __pick_next_entity(cfs_rq);
+ set_next_entity(cfs_rq, se);
+ }
return se;
}
if (prev->on_rq)
update_curr(cfs_rq);
- update_stats_curr_end(cfs_rq, prev);
-
+ check_spread(cfs_rq, prev);
if (prev->on_rq) {
update_stats_wait_start(cfs_rq, prev);
/* Put 'current' back into the tree. */
*/
update_curr(cfs_rq);
- if (cfs_rq->nr_running > 1)
+ if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
check_preempt_tick(cfs_rq, curr);
}
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
-/* Do the two (enqueued) tasks belong to the same group ? */
-static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
+/* Do the two (enqueued) entities belong to the same group ? */
+static inline int
+is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
- if (curr->se.cfs_rq == p->se.cfs_rq)
+ if (se->cfs_rq == pse->cfs_rq)
return 1;
return 0;
}
+static inline struct sched_entity *parent_entity(struct sched_entity *se)
+{
+ return se->parent;
+}
+
#else /* CONFIG_FAIR_GROUP_SCHED */
#define for_each_sched_entity(se) \
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
-static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
+static inline int
+is_same_group(struct sched_entity *se, struct sched_entity *pse)
{
return 1;
}
+static inline struct sched_entity *parent_entity(struct sched_entity *se)
+{
+ return NULL;
+}
+
#endif /* CONFIG_FAIR_GROUP_SCHED */
/*
break;
cfs_rq = cfs_rq_of(se);
enqueue_entity(cfs_rq, se, wakeup);
+ wakeup = 1;
}
}
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight)
break;
+ sleep = 1;
}
}
static void yield_task_fair(struct rq *rq)
{
struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
- struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
struct sched_entity *rightmost, *se = &rq->curr->se;
- struct rb_node *parent;
/*
* Are we the only task in the tree?
if (likely(!sysctl_sched_compat_yield)) {
__update_rq_clock(rq);
/*
- * Dequeue and enqueue the task to update its
- * position within the tree:
+ * Update run-time statistics of the 'current'.
*/
- dequeue_entity(cfs_rq, se, 0);
- enqueue_entity(cfs_rq, se, 0);
+ update_curr(cfs_rq);
return;
}
/*
* Find the rightmost entry in the rbtree:
*/
- do {
- parent = *link;
- link = &parent->rb_right;
- } while (*link);
-
- rightmost = rb_entry(parent, struct sched_entity, run_node);
+ rightmost = __pick_last_entity(cfs_rq);
/*
* Already in the rightmost position?
*/
- if (unlikely(rightmost == se))
+ if (unlikely(rightmost->vruntime < se->vruntime))
return;
/*
* Minimally necessary key value to be last in the tree:
+ * Upon rescheduling, sched_class::put_prev_task() will place
+ * 'current' within the tree based on its new key value.
*/
se->vruntime = rightmost->vruntime + 1;
-
- if (cfs_rq->rb_leftmost == &se->run_node)
- cfs_rq->rb_leftmost = rb_next(&se->run_node);
- /*
- * Relink the task to the rightmost position:
- */
- rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
- rb_link_node(&se->run_node, parent, link);
- rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
}
/*
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+ struct sched_entity *se = &curr->se, *pse = &p->se;
+ s64 delta, gran;
if (unlikely(rt_prio(p->prio))) {
update_rq_clock(rq);
resched_task(curr);
return;
}
- if (is_same_group(curr, p)) {
- s64 delta = curr->se.vruntime - p->se.vruntime;
+ /*
+ * Batch tasks do not preempt (their preemption is driven by
+ * the tick):
+ */
+ if (unlikely(p->policy == SCHED_BATCH))
+ return;
+
+ if (sched_feat(WAKEUP_PREEMPT)) {
+ while (!is_same_group(se, pse)) {
+ se = parent_entity(se);
+ pse = parent_entity(pse);
+ }
+
+ delta = se->vruntime - pse->vruntime;
+ gran = sysctl_sched_wakeup_granularity;
+ if (unlikely(se->load.weight != NICE_0_LOAD))
+ gran = calc_delta_fair(gran, &se->load);
+
+ if (delta > gran) {
+ int now = !sched_feat(PREEMPT_RESTRICT);
- if (delta > (s64)sysctl_sched_wakeup_granularity)
- resched_task(curr);
+ if (now || p->prio < curr->prio || !se->peer_preempt++)
+ resched_task(curr);
+ }
}
}
* achieve that by always pre-iterating before returning
* the current task:
*/
-static inline struct task_struct *
+static struct task_struct *
__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
{
struct task_struct *p;
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
+ int this_cpu = smp_processor_id();
sched_info_queued(p);
update_curr(cfs_rq);
place_entity(cfs_rq, se, 1);
- if (sysctl_sched_child_runs_first &&
+ if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
curr->vruntime < se->vruntime) {
/*
* Upon rescheduling, sched_class::put_prev_task() will place
swap(curr->vruntime, se->vruntime);
}
- update_stats_enqueue(cfs_rq, se);
- __enqueue_entity(cfs_rq, se);
- account_entity_enqueue(cfs_rq, se);
+ se->peer_preempt = 0;
+ enqueue_task_fair(rq, p, 0);
resched_task(rq->curr);
}
/*
* All the scheduling class methods:
*/
-struct sched_class fair_sched_class __read_mostly = {
+static const struct sched_class fair_sched_class = {
+ .next = &idle_sched_class,
.enqueue_task = enqueue_task_fair,
.dequeue_task = dequeue_task_fair,
.yield_task = yield_task_fair,