X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=kernel%2Fsched_fair.c;h=fb8994c6d4bb4bbe90a71f89341baee3cc6e9806;hb=13b3c3fa27f8f4ed306ce624f446fab000dd8ee4;hp=bb61fe26b62cc50bd0e60a84781ff658f29e22e2;hpb=eb6a12c2428d21a9f3e0f1a50e927d5fd80fc3d0;p=linux-2.6-omap-h63xx.git diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index bb61fe26b62..fb8994c6d4b 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -878,7 +878,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) #ifdef CONFIG_SCHED_HRTICK static void hrtick_start_fair(struct rq *rq, struct task_struct *p) { - int requeue = rq->curr == p; struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); @@ -899,10 +898,10 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) * Don't schedule slices shorter than 10000ns, that just * doesn't make sense. Rely on vruntime for fairness. */ - if (!requeue) - delta = max(10000LL, delta); + if (rq->curr != p) + delta = max_t(s64, 10000LL, delta); - hrtick_start(rq, delta, requeue); + hrtick_start(rq, delta); } } #else /* !CONFIG_SCHED_HRTICK */ @@ -1004,6 +1003,8 @@ static void yield_task_fair(struct rq *rq) * not idle and an idle cpu is available. The span of cpus to * search starts with cpus closest then further out as needed, * so we always favor a closer, idle cpu. + * Domains may include CPUs that are not usable for migration, + * hence we need to mask them out (cpu_active_map) * * Returns the CPU we should wake onto. */ @@ -1031,6 +1032,7 @@ static int wake_idle(int cpu, struct task_struct *p) || ((sd->flags & SD_WAKE_IDLE_FAR) && !task_hot(p, task_rq(p)->clock, sd))) { cpus_and(tmp, sd->span, p->cpus_allowed); + cpus_and(tmp, tmp, cpu_active_map); for_each_cpu_mask_nr(i, tmp) { if (idle_cpu(i)) { if (i != task_cpu(p)) { @@ -1440,18 +1442,23 @@ __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next) struct task_struct *p = NULL; struct sched_entity *se; - while (next != &cfs_rq->tasks) { + if (next == &cfs_rq->tasks) + return NULL; + + /* Skip over entities that are not tasks */ + do { se = list_entry(next, struct sched_entity, group_node); next = next->next; + } while (next != &cfs_rq->tasks && !entity_is_task(se)); - /* Skip over entities that are not tasks */ - if (entity_is_task(se)) { - p = task_of(se); - break; - } - } + if (next == &cfs_rq->tasks) + return NULL; cfs_rq->balance_iterator = next; + + if (entity_is_task(se)) + p = task_of(se); + return p; }