X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=kernel%2Fsched.c;h=7581e331b139cfb13a1be3c435735c9535a6b802;hb=6888c1ecd63a3e5e206048602b3f59b58f4ffd8f;hp=7fefd8ab8b58bc5e8450118ff6d30c927db7781b;hpb=26797a34a24cfeab9951a6f42f27432c0b2546af;p=linux-2.6-omap-h63xx.git diff --git a/kernel/sched.c b/kernel/sched.c index 7fefd8ab8b5..7581e331b13 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -51,6 +52,7 @@ #include #include #include +#include #include #include #include @@ -153,15 +155,22 @@ struct rt_prio_array { #ifdef CONFIG_FAIR_GROUP_SCHED +#include + struct cfs_rq; /* task group related information */ -struct task_grp { +struct task_group { +#ifdef CONFIG_FAIR_CGROUP_SCHED + struct cgroup_subsys_state css; +#endif /* schedulable entities of this group on each cpu */ struct sched_entity **se; /* runqueue "owned" by this group on each cpu */ struct cfs_rq **cfs_rq; unsigned long shares; + /* spinlock to serialize modification to shares */ + spinlock_t lock; }; /* Default task group's sched entity on each cpu */ @@ -175,7 +184,7 @@ static struct cfs_rq *init_cfs_rq_p[NR_CPUS]; /* Default task group. * Every task in system belong to this group at bootup. */ -struct task_grp init_task_grp = { +struct task_group init_task_group = { .se = init_sched_entity_p, .cfs_rq = init_cfs_rq_p, }; @@ -186,17 +195,20 @@ struct task_grp init_task_grp = { # define INIT_TASK_GRP_LOAD NICE_0_LOAD #endif -static int init_task_grp_load = INIT_TASK_GRP_LOAD; +static int init_task_group_load = INIT_TASK_GRP_LOAD; /* return group to which a task belongs */ -static inline struct task_grp *task_grp(struct task_struct *p) +static inline struct task_group *task_group(struct task_struct *p) { - struct task_grp *tg; + struct task_group *tg; #ifdef CONFIG_FAIR_USER_SCHED tg = p->user->tg; +#elif defined(CONFIG_FAIR_CGROUP_SCHED) + tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), + struct task_group, css); #else - tg = &init_task_grp; + tg = &init_task_group; #endif return tg; @@ -205,8 +217,8 @@ static inline struct task_grp *task_grp(struct task_struct *p) /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ static inline void set_task_cfs_rq(struct task_struct *p) { - p->se.cfs_rq = task_grp(p)->cfs_rq[task_cpu(p)]; - p->se.parent = task_grp(p)->se[task_cpu(p)]; + p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)]; + p->se.parent = task_group(p)->se[task_cpu(p)]; } #else @@ -244,7 +256,7 @@ struct cfs_rq { * list is used during load balance. */ struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ - struct task_grp *tg; /* group that "owns" this runqueue */ + struct task_group *tg; /* group that "owns" this runqueue */ struct rcu_head rcu; #endif }; @@ -264,7 +276,8 @@ struct rt_rq { * acquire operations must be ordered by ascending &runqueue. */ struct rq { - spinlock_t lock; /* runqueue lock */ + /* runqueue lock: */ + spinlock_t lock; /* * nr_running and cpu_load should be in the same cacheline because @@ -277,13 +290,15 @@ struct rq { #ifdef CONFIG_NO_HZ unsigned char in_nohz_recently; #endif - struct load_weight load; /* capture load from *all* tasks on this cpu */ + /* capture load from *all* tasks on this cpu: */ + struct load_weight load; unsigned long nr_load_updates; u64 nr_switches; struct cfs_rq cfs; #ifdef CONFIG_FAIR_GROUP_SCHED - struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */ + /* list of leaf cfs_rq on this cpu: */ + struct list_head leaf_cfs_rq_list; #endif struct rt_rq rt; @@ -315,7 +330,8 @@ struct rq { /* For active balancing */ int active_balance; int push_cpu; - int cpu; /* cpu of this runqueue */ + /* cpu of this runqueue: */ + int cpu; struct task_struct *migration_thread; struct list_head migration_queue; @@ -326,22 +342,22 @@ struct rq { struct sched_info rq_sched_info; /* sys_sched_yield() stats */ - unsigned long yld_exp_empty; - unsigned long yld_act_empty; - unsigned long yld_both_empty; - unsigned long yld_count; + unsigned int yld_exp_empty; + unsigned int yld_act_empty; + unsigned int yld_both_empty; + unsigned int yld_count; /* schedule() stats */ - unsigned long sched_switch; - unsigned long sched_count; - unsigned long sched_goidle; + unsigned int sched_switch; + unsigned int sched_count; + unsigned int sched_goidle; /* try_to_wake_up() stats */ - unsigned long ttwu_count; - unsigned long ttwu_local; + unsigned int ttwu_count; + unsigned int ttwu_local; /* BKL stats */ - unsigned long bkl_count; + unsigned int bkl_count; #endif struct lock_class_key rq_lock_key; }; @@ -440,15 +456,19 @@ static void update_rq_clock(struct rq *rq) enum { SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, SCHED_FEAT_START_DEBIT = 2, - SCHED_FEAT_USE_TREE_AVG = 4, + SCHED_FEAT_TREE_AVG = 4, SCHED_FEAT_APPROX_AVG = 8, + SCHED_FEAT_WAKEUP_PREEMPT = 16, + SCHED_FEAT_PREEMPT_RESTRICT = 32, }; const_debug unsigned int sysctl_sched_features = - SCHED_FEAT_NEW_FAIR_SLEEPERS *1 | - SCHED_FEAT_START_DEBIT *1 | - SCHED_FEAT_USE_TREE_AVG *0 | - SCHED_FEAT_APPROX_AVG *0; + SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | + SCHED_FEAT_START_DEBIT * 1 | + SCHED_FEAT_TREE_AVG * 0 | + SCHED_FEAT_APPROX_AVG * 0 | + SCHED_FEAT_WAKEUP_PREEMPT * 1 | + SCHED_FEAT_PREEMPT_RESTRICT * 1; #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) @@ -470,6 +490,7 @@ unsigned long long cpu_clock(int cpu) return now; } +EXPORT_SYMBOL_GPL(cpu_clock); #ifndef prepare_arch_switch # define prepare_arch_switch(next) do { } while (0) @@ -555,16 +576,13 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) static inline struct rq *__task_rq_lock(struct task_struct *p) __acquires(rq->lock) { - struct rq *rq; - -repeat_lock_task: - rq = task_rq(p); - spin_lock(&rq->lock); - if (unlikely(rq != task_rq(p))) { + for (;;) { + struct rq *rq = task_rq(p); + spin_lock(&rq->lock); + if (likely(rq == task_rq(p))) + return rq; spin_unlock(&rq->lock); - goto repeat_lock_task; } - return rq; } /* @@ -577,15 +595,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) { struct rq *rq; -repeat_lock_task: - local_irq_save(*flags); - rq = task_rq(p); - spin_lock(&rq->lock); - if (unlikely(rq != task_rq(p))) { + for (;;) { + local_irq_save(*flags); + rq = task_rq(p); + spin_lock(&rq->lock); + if (likely(rq == task_rq(p))) + return rq; spin_unlock_irqrestore(&rq->lock, *flags); - goto repeat_lock_task; } - return rq; } static void __task_rq_unlock(struct rq *rq) @@ -1002,6 +1019,28 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) #ifdef CONFIG_SMP +/* + * Is this task likely cache-hot: + */ +static inline int +task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) +{ + s64 delta; + + if (p->sched_class != &fair_sched_class) + return 0; + + if (sysctl_sched_migration_cost == -1) + return 1; + if (sysctl_sched_migration_cost == 0) + return 0; + + delta = now - p->se.exec_start; + + return delta < (s64)sysctl_sched_migration_cost; +} + + void set_task_cpu(struct task_struct *p, unsigned int new_cpu) { int old_cpu = task_cpu(p); @@ -1019,6 +1058,11 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) p->se.sleep_start -= clock_offset; if (p->se.block_start) p->se.block_start -= clock_offset; + if (old_cpu != new_cpu) { + schedstat_inc(p, se.nr_migrations); + if (task_hot(p, old_rq->clock, NULL)) + schedstat_inc(p, se.nr_forced2_migrations); + } #endif p->se.vruntime -= old_cfsrq->min_vruntime - new_cfsrq->min_vruntime; @@ -1076,69 +1120,71 @@ void wait_task_inactive(struct task_struct *p) int running, on_rq; struct rq *rq; -repeat: - /* - * We do the initial early heuristics without holding - * any task-queue locks at all. We'll only try to get - * the runqueue lock when things look like they will - * work out! - */ - rq = task_rq(p); + for (;;) { + /* + * We do the initial early heuristics without holding + * any task-queue locks at all. We'll only try to get + * the runqueue lock when things look like they will + * work out! + */ + rq = task_rq(p); - /* - * If the task is actively running on another CPU - * still, just relax and busy-wait without holding - * any locks. - * - * NOTE! Since we don't hold any locks, it's not - * even sure that "rq" stays as the right runqueue! - * But we don't care, since "task_running()" will - * return false if the runqueue has changed and p - * is actually now running somewhere else! - */ - while (task_running(rq, p)) - cpu_relax(); + /* + * If the task is actively running on another CPU + * still, just relax and busy-wait without holding + * any locks. + * + * NOTE! Since we don't hold any locks, it's not + * even sure that "rq" stays as the right runqueue! + * But we don't care, since "task_running()" will + * return false if the runqueue has changed and p + * is actually now running somewhere else! + */ + while (task_running(rq, p)) + cpu_relax(); - /* - * Ok, time to look more closely! We need the rq - * lock now, to be *sure*. If we're wrong, we'll - * just go back and repeat. - */ - rq = task_rq_lock(p, &flags); - running = task_running(rq, p); - on_rq = p->se.on_rq; - task_rq_unlock(rq, &flags); + /* + * Ok, time to look more closely! We need the rq + * lock now, to be *sure*. If we're wrong, we'll + * just go back and repeat. + */ + rq = task_rq_lock(p, &flags); + running = task_running(rq, p); + on_rq = p->se.on_rq; + task_rq_unlock(rq, &flags); - /* - * Was it really running after all now that we - * checked with the proper locks actually held? - * - * Oops. Go back and try again.. - */ - if (unlikely(running)) { - cpu_relax(); - goto repeat; - } + /* + * Was it really running after all now that we + * checked with the proper locks actually held? + * + * Oops. Go back and try again.. + */ + if (unlikely(running)) { + cpu_relax(); + continue; + } - /* - * It's not enough that it's not actively running, - * it must be off the runqueue _entirely_, and not - * preempted! - * - * So if it wa still runnable (but just not actively - * running right now), it's preempted, and we should - * yield - it could be a while. - */ - if (unlikely(on_rq)) { - yield(); - goto repeat; - } + /* + * It's not enough that it's not actively running, + * it must be off the runqueue _entirely_, and not + * preempted! + * + * So if it wa still runnable (but just not actively + * running right now), it's preempted, and we should + * yield - it could be a while. + */ + if (unlikely(on_rq)) { + schedule_timeout_uninterruptible(1); + continue; + } - /* - * Ahh, all good. It wasn't running, and it wasn't - * runnable, which means that it will never become - * running in the future either. We're all done! - */ + /* + * Ahh, all good. It wasn't running, and it wasn't + * runnable, which means that it will never become + * running in the future either. We're all done! + */ + break; + } } /*** @@ -1229,7 +1275,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) /* Skip over this group if it has no CPUs allowed */ if (!cpus_intersects(group->cpumask, p->cpus_allowed)) - goto nextgroup; + continue; local_group = cpu_isset(this_cpu, group->cpumask); @@ -1257,9 +1303,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) min_load = avg_load; idlest = group; } -nextgroup: - group = group->next; - } while (group != sd->groups); + } while (group = group->next, group != sd->groups); if (!idlest || 100*this_load < imbalance*min_load) return NULL; @@ -1391,8 +1435,13 @@ static int wake_idle(int cpu, struct task_struct *p) if (sd->flags & SD_WAKE_IDLE) { cpus_and(tmp, sd->span, p->cpus_allowed); for_each_cpu_mask(i, tmp) { - if (idle_cpu(i)) + if (idle_cpu(i)) { + if (i != task_cpu(p)) { + schedstat_inc(p, + se.nr_wakeups_idle); + } return i; + } } } else { break; @@ -1423,7 +1472,7 @@ static inline int wake_idle(int cpu, struct task_struct *p) */ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) { - int cpu, this_cpu, success = 0; + int cpu, orig_cpu, this_cpu, success = 0; unsigned long flags; long old_state; struct rq *rq; @@ -1442,6 +1491,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) goto out_running; cpu = task_cpu(p); + orig_cpu = cpu; this_cpu = smp_processor_id(); #ifdef CONFIG_SMP @@ -1485,6 +1535,13 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) unsigned long tl = this_load; unsigned long tl_per_task; + /* + * Attract cache-cold tasks on sync wakeups: + */ + if (sync && !task_hot(p, rq->clock, this_sd)) + goto out_set_cpu; + + schedstat_inc(p, se.nr_wakeups_affine_attempts); tl_per_task = cpu_avg_load_per_task(this_cpu); /* @@ -1504,6 +1561,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) * there is no bad imbalance. */ schedstat_inc(this_sd, ttwu_move_affine); + schedstat_inc(p, se.nr_wakeups_affine); goto out_set_cpu; } } @@ -1515,6 +1573,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) if (this_sd->flags & SD_WAKE_BALANCE) { if (imbalance*this_load <= 100*load) { schedstat_inc(this_sd, ttwu_move_balance); + schedstat_inc(p, se.nr_wakeups_passive); goto out_set_cpu; } } @@ -1540,18 +1599,18 @@ out_set_cpu: out_activate: #endif /* CONFIG_SMP */ + schedstat_inc(p, se.nr_wakeups); + if (sync) + schedstat_inc(p, se.nr_wakeups_sync); + if (orig_cpu != cpu) + schedstat_inc(p, se.nr_wakeups_migrate); + if (cpu == this_cpu) + schedstat_inc(p, se.nr_wakeups_local); + else + schedstat_inc(p, se.nr_wakeups_remote); update_rq_clock(rq); activate_task(rq, p, 1); - /* - * Sync wakeups (i.e. those types of wakeups where the waker - * has indicated that it will leave the CPU in short order) - * don't trigger a preemption, if the woken up task will run on - * this cpu. (in this case the 'I will reschedule' promise of - * the waker guarantees that the freshly woken up task is going - * to be considered on this CPU.) - */ - if (!sync || cpu != this_cpu) - check_preempt_curr(rq, p); + check_preempt_curr(rq, p); success = 1; out_running: @@ -1660,17 +1719,14 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) { unsigned long flags; struct rq *rq; - int this_cpu; rq = task_rq_lock(p, &flags); BUG_ON(p->state != TASK_RUNNING); - this_cpu = smp_processor_id(); /* parent's CPU */ update_rq_clock(rq); p->prio = effective_prio(p); - if (task_cpu(p) != this_cpu || !p->sched_class->task_new || - !current->se.on_rq) { + if (!p->sched_class->task_new || !current->se.on_rq) { activate_task(rq, p, 0); } else { /* @@ -1829,7 +1885,7 @@ asmlinkage void schedule_tail(struct task_struct *prev) preempt_enable(); #endif if (current->set_child_tid) - put_user(current->pid, current->set_child_tid); + put_user(task_pid_vnr(current), current->set_child_tid); } /* @@ -2132,13 +2188,38 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, * 2) cannot be migrated to this CPU due to cpus_allowed, or * 3) are cache-hot on their current CPU. */ - if (!cpu_isset(this_cpu, p->cpus_allowed)) + if (!cpu_isset(this_cpu, p->cpus_allowed)) { + schedstat_inc(p, se.nr_failed_migrations_affine); return 0; + } *all_pinned = 0; - if (task_running(rq, p)) + if (task_running(rq, p)) { + schedstat_inc(p, se.nr_failed_migrations_running); return 0; + } + + /* + * Aggressive migration if: + * 1) task is cache cold, or + * 2) too many balance attempts have failed. + */ + if (!task_hot(p, rq->clock, sd) || + sd->nr_balance_failed > sd->cache_nice_tries) { +#ifdef CONFIG_SCHEDSTATS + if (task_hot(p, rq->clock, sd)) { + schedstat_inc(sd, lb_hot_gained[idle]); + schedstat_inc(p, se.nr_forced_migrations); + } +#endif + return 1; + } + + if (task_hot(p, rq->clock, sd)) { + schedstat_inc(p, se.nr_failed_migrations_hot); + return 0; + } return 1; } @@ -2269,7 +2350,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, unsigned long max_pull; unsigned long busiest_load_per_task, busiest_nr_running; unsigned long this_load_per_task, this_nr_running; - int load_idx; + int load_idx, group_imb = 0; #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) int power_savings_balance = 1; unsigned long leader_nr_running = 0, min_load_per_task = 0; @@ -2288,9 +2369,10 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, load_idx = sd->idle_idx; do { - unsigned long load, group_capacity; + unsigned long load, group_capacity, max_cpu_load, min_cpu_load; int local_group; int i; + int __group_imb = 0; unsigned int balance_cpu = -1, first_idle_cpu = 0; unsigned long sum_nr_running, sum_weighted_load; @@ -2301,6 +2383,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, /* Tally up the load of all CPUs in the group */ sum_weighted_load = sum_nr_running = avg_load = 0; + max_cpu_load = 0; + min_cpu_load = ~0UL; for_each_cpu_mask(i, group->cpumask) { struct rq *rq; @@ -2321,8 +2405,13 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, } load = target_load(i, load_idx); - } else + } else { load = source_load(i, load_idx); + if (load > max_cpu_load) + max_cpu_load = load; + if (min_cpu_load > load) + min_cpu_load = load; + } avg_load += load; sum_nr_running += rq->nr_running; @@ -2348,6 +2437,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, avg_load = sg_div_cpu_power(group, avg_load * SCHED_LOAD_SCALE); + if ((max_cpu_load - min_cpu_load) > SCHED_LOAD_SCALE) + __group_imb = 1; + group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; if (local_group) { @@ -2356,11 +2448,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, this_nr_running = sum_nr_running; this_load_per_task = sum_weighted_load; } else if (avg_load > max_load && - sum_nr_running > group_capacity) { + (sum_nr_running > group_capacity || __group_imb)) { max_load = avg_load; busiest = group; busiest_nr_running = sum_nr_running; busiest_load_per_task = sum_weighted_load; + group_imb = __group_imb; } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) @@ -2432,6 +2525,9 @@ group_next: goto out_balanced; busiest_load_per_task /= busiest_nr_running; + if (group_imb) + busiest_load_per_task = min(busiest_load_per_task, avg_load); + /* * We're trying to get all the cpus to the average_load, so we don't * want to push ourselves above the average load, nor do we wish to @@ -3214,16 +3310,19 @@ unsigned long long task_sched_runtime(struct task_struct *p) /* * Account user cpu time to a process. * @p: the process that the cpu time gets accounted to - * @hardirq_offset: the offset to subtract from hardirq_count() * @cputime: the cpu time spent in user space since the last update */ void account_user_time(struct task_struct *p, cputime_t cputime) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; cputime64_t tmp; + struct rq *rq = this_rq(); p->utime = cputime_add(p->utime, cputime); + if (p != rq->idle) + cpuacct_charge(p, cputime); + /* Add user time to cpustat. */ tmp = cputime_to_cputime64(cputime); if (TASK_NICE(p) > 0) @@ -3232,6 +3331,35 @@ void account_user_time(struct task_struct *p, cputime_t cputime) cpustat->user = cputime64_add(cpustat->user, tmp); } +/* + * Account guest cpu time to a process. + * @p: the process that the cpu time gets accounted to + * @cputime: the cpu time spent in virtual machine since the last update + */ +void account_guest_time(struct task_struct *p, cputime_t cputime) +{ + cputime64_t tmp; + struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; + + tmp = cputime_to_cputime64(cputime); + + p->utime = cputime_add(p->utime, cputime); + p->gtime = cputime_add(p->gtime, cputime); + + cpustat->user = cputime64_add(cpustat->user, tmp); + cpustat->guest = cputime64_add(cpustat->guest, tmp); +} + +/* + * Account scaled user cpu time to a process. + * @p: the process that the cpu time gets accounted to + * @cputime: the cpu time spent in user space since the last update + */ +void account_user_time_scaled(struct task_struct *p, cputime_t cputime) +{ + p->utimescaled = cputime_add(p->utimescaled, cputime); +} + /* * Account system cpu time to a process. * @p: the process that the cpu time gets accounted to @@ -3245,6 +3373,12 @@ void account_system_time(struct task_struct *p, int hardirq_offset, struct rq *rq = this_rq(); cputime64_t tmp; + if (p->flags & PF_VCPU) { + account_guest_time(p, cputime); + p->flags &= ~PF_VCPU; + return; + } + p->stime = cputime_add(p->stime, cputime); /* Add system time to cpustat. */ @@ -3253,9 +3387,10 @@ void account_system_time(struct task_struct *p, int hardirq_offset, cpustat->irq = cputime64_add(cpustat->irq, tmp); else if (softirq_count()) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); - else if (p != rq->idle) + else if (p != rq->idle) { cpustat->system = cputime64_add(cpustat->system, tmp); - else if (atomic_read(&rq->nr_iowait) > 0) + cpuacct_charge(p, cputime); + } else if (atomic_read(&rq->nr_iowait) > 0) cpustat->iowait = cputime64_add(cpustat->iowait, tmp); else cpustat->idle = cputime64_add(cpustat->idle, tmp); @@ -3263,6 +3398,17 @@ void account_system_time(struct task_struct *p, int hardirq_offset, acct_update_integrals(p); } +/* + * Account scaled system cpu time to a process. + * @p: the process that the cpu time gets accounted to + * @hardirq_offset: the offset to subtract from hardirq_count() + * @cputime: the cpu time spent in kernel space since the last update + */ +void account_system_time_scaled(struct task_struct *p, cputime_t cputime) +{ + p->stimescaled = cputime_add(p->stimescaled, cputime); +} + /* * Account for involuntary wait time. * @p: the process from which the cpu time has been stolen @@ -3280,8 +3426,10 @@ void account_steal_time(struct task_struct *p, cputime_t steal) cpustat->iowait = cputime64_add(cpustat->iowait, tmp); else cpustat->idle = cputime64_add(cpustat->idle, tmp); - } else + } else { cpustat->steal = cputime64_add(cpustat->steal, tmp); + cpuacct_charge(p, -tmp); + } } /* @@ -3361,7 +3509,7 @@ EXPORT_SYMBOL(sub_preempt_count); static noinline void __schedule_bug(struct task_struct *prev) { printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n", - prev->comm, preempt_count(), prev->pid); + prev->comm, preempt_count(), task_pid_nr(prev)); debug_show_held_locks(prev); if (irqs_disabled()) print_irqtrace_events(prev); @@ -3513,27 +3661,30 @@ asmlinkage void __sched preempt_schedule(void) if (likely(ti->preempt_count || irqs_disabled())) return; -need_resched: - add_preempt_count(PREEMPT_ACTIVE); - /* - * We keep the big kernel semaphore locked, but we - * clear ->lock_depth so that schedule() doesnt - * auto-release the semaphore: - */ + do { + add_preempt_count(PREEMPT_ACTIVE); + + /* + * We keep the big kernel semaphore locked, but we + * clear ->lock_depth so that schedule() doesnt + * auto-release the semaphore: + */ #ifdef CONFIG_PREEMPT_BKL - saved_lock_depth = task->lock_depth; - task->lock_depth = -1; + saved_lock_depth = task->lock_depth; + task->lock_depth = -1; #endif - schedule(); + schedule(); #ifdef CONFIG_PREEMPT_BKL - task->lock_depth = saved_lock_depth; + task->lock_depth = saved_lock_depth; #endif - sub_preempt_count(PREEMPT_ACTIVE); + sub_preempt_count(PREEMPT_ACTIVE); - /* we could miss a preemption opportunity between schedule and now */ - barrier(); - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) - goto need_resched; + /* + * Check again in case we missed a preemption opportunity + * between schedule and now. + */ + barrier(); + } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); } EXPORT_SYMBOL(preempt_schedule); @@ -3553,29 +3704,32 @@ asmlinkage void __sched preempt_schedule_irq(void) /* Catch callers which need to be fixed */ BUG_ON(ti->preempt_count || !irqs_disabled()); -need_resched: - add_preempt_count(PREEMPT_ACTIVE); - /* - * We keep the big kernel semaphore locked, but we - * clear ->lock_depth so that schedule() doesnt - * auto-release the semaphore: - */ + do { + add_preempt_count(PREEMPT_ACTIVE); + + /* + * We keep the big kernel semaphore locked, but we + * clear ->lock_depth so that schedule() doesnt + * auto-release the semaphore: + */ #ifdef CONFIG_PREEMPT_BKL - saved_lock_depth = task->lock_depth; - task->lock_depth = -1; + saved_lock_depth = task->lock_depth; + task->lock_depth = -1; #endif - local_irq_enable(); - schedule(); - local_irq_disable(); + local_irq_enable(); + schedule(); + local_irq_disable(); #ifdef CONFIG_PREEMPT_BKL - task->lock_depth = saved_lock_depth; + task->lock_depth = saved_lock_depth; #endif - sub_preempt_count(PREEMPT_ACTIVE); + sub_preempt_count(PREEMPT_ACTIVE); - /* we could miss a preemption opportunity between schedule and now */ - barrier(); - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) - goto need_resched; + /* + * Check again in case we missed a preemption opportunity + * between schedule and now. + */ + barrier(); + } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); } #endif /* CONFIG_PREEMPT */ @@ -3691,206 +3845,119 @@ void fastcall complete_all(struct completion *x) } EXPORT_SYMBOL(complete_all); -void fastcall __sched wait_for_completion(struct completion *x) +static inline long __sched +do_wait_for_common(struct completion *x, long timeout, int state) { - might_sleep(); - - spin_lock_irq(&x->wait.lock); if (!x->done) { DECLARE_WAITQUEUE(wait, current); wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { - __set_current_state(TASK_UNINTERRUPTIBLE); - spin_unlock_irq(&x->wait.lock); - schedule(); - spin_lock_irq(&x->wait.lock); - } while (!x->done); - __remove_wait_queue(&x->wait, &wait); - } - x->done--; - spin_unlock_irq(&x->wait.lock); -} -EXPORT_SYMBOL(wait_for_completion); - -unsigned long fastcall __sched -wait_for_completion_timeout(struct completion *x, unsigned long timeout) -{ - might_sleep(); - - spin_lock_irq(&x->wait.lock); - if (!x->done) { - DECLARE_WAITQUEUE(wait, current); - - wait.flags |= WQ_FLAG_EXCLUSIVE; - __add_wait_queue_tail(&x->wait, &wait); - do { - __set_current_state(TASK_UNINTERRUPTIBLE); + if (state == TASK_INTERRUPTIBLE && + signal_pending(current)) { + __remove_wait_queue(&x->wait, &wait); + return -ERESTARTSYS; + } + __set_current_state(state); spin_unlock_irq(&x->wait.lock); timeout = schedule_timeout(timeout); spin_lock_irq(&x->wait.lock); if (!timeout) { __remove_wait_queue(&x->wait, &wait); - goto out; + return timeout; } } while (!x->done); __remove_wait_queue(&x->wait, &wait); } x->done--; -out: - spin_unlock_irq(&x->wait.lock); return timeout; } -EXPORT_SYMBOL(wait_for_completion_timeout); -int fastcall __sched wait_for_completion_interruptible(struct completion *x) +static long __sched +wait_for_common(struct completion *x, long timeout, int state) { - int ret = 0; - might_sleep(); spin_lock_irq(&x->wait.lock); - if (!x->done) { - DECLARE_WAITQUEUE(wait, current); - - wait.flags |= WQ_FLAG_EXCLUSIVE; - __add_wait_queue_tail(&x->wait, &wait); - do { - if (signal_pending(current)) { - ret = -ERESTARTSYS; - __remove_wait_queue(&x->wait, &wait); - goto out; - } - __set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irq(&x->wait.lock); - schedule(); - spin_lock_irq(&x->wait.lock); - } while (!x->done); - __remove_wait_queue(&x->wait, &wait); - } - x->done--; -out: + timeout = do_wait_for_common(x, timeout, state); spin_unlock_irq(&x->wait.lock); + return timeout; +} - return ret; +void fastcall __sched wait_for_completion(struct completion *x) +{ + wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); } -EXPORT_SYMBOL(wait_for_completion_interruptible); +EXPORT_SYMBOL(wait_for_completion); unsigned long fastcall __sched -wait_for_completion_interruptible_timeout(struct completion *x, - unsigned long timeout) +wait_for_completion_timeout(struct completion *x, unsigned long timeout) { - might_sleep(); - - spin_lock_irq(&x->wait.lock); - if (!x->done) { - DECLARE_WAITQUEUE(wait, current); - - wait.flags |= WQ_FLAG_EXCLUSIVE; - __add_wait_queue_tail(&x->wait, &wait); - do { - if (signal_pending(current)) { - timeout = -ERESTARTSYS; - __remove_wait_queue(&x->wait, &wait); - goto out; - } - __set_current_state(TASK_INTERRUPTIBLE); - spin_unlock_irq(&x->wait.lock); - timeout = schedule_timeout(timeout); - spin_lock_irq(&x->wait.lock); - if (!timeout) { - __remove_wait_queue(&x->wait, &wait); - goto out; - } - } while (!x->done); - __remove_wait_queue(&x->wait, &wait); - } - x->done--; -out: - spin_unlock_irq(&x->wait.lock); - return timeout; + return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); } -EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); +EXPORT_SYMBOL(wait_for_completion_timeout); -static inline void -sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags) +int __sched wait_for_completion_interruptible(struct completion *x) { - spin_lock_irqsave(&q->lock, *flags); - __add_wait_queue(q, wait); - spin_unlock(&q->lock); + long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); + if (t == -ERESTARTSYS) + return t; + return 0; } +EXPORT_SYMBOL(wait_for_completion_interruptible); -static inline void -sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags) +unsigned long fastcall __sched +wait_for_completion_interruptible_timeout(struct completion *x, + unsigned long timeout) { - spin_lock_irq(&q->lock); - __remove_wait_queue(q, wait); - spin_unlock_irqrestore(&q->lock, *flags); + return wait_for_common(x, timeout, TASK_INTERRUPTIBLE); } +EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); -void __sched interruptible_sleep_on(wait_queue_head_t *q) +static long __sched +sleep_on_common(wait_queue_head_t *q, int state, long timeout) { unsigned long flags; wait_queue_t wait; init_waitqueue_entry(&wait, current); - current->state = TASK_INTERRUPTIBLE; + __set_current_state(state); - sleep_on_head(q, &wait, &flags); - schedule(); - sleep_on_tail(q, &wait, &flags); + spin_lock_irqsave(&q->lock, flags); + __add_wait_queue(q, &wait); + spin_unlock(&q->lock); + timeout = schedule_timeout(timeout); + spin_lock_irq(&q->lock); + __remove_wait_queue(q, &wait); + spin_unlock_irqrestore(&q->lock, flags); + + return timeout; +} + +void __sched interruptible_sleep_on(wait_queue_head_t *q) +{ + sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } EXPORT_SYMBOL(interruptible_sleep_on); long __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) { - unsigned long flags; - wait_queue_t wait; - - init_waitqueue_entry(&wait, current); - - current->state = TASK_INTERRUPTIBLE; - - sleep_on_head(q, &wait, &flags); - timeout = schedule_timeout(timeout); - sleep_on_tail(q, &wait, &flags); - - return timeout; + return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout); } EXPORT_SYMBOL(interruptible_sleep_on_timeout); void __sched sleep_on(wait_queue_head_t *q) { - unsigned long flags; - wait_queue_t wait; - - init_waitqueue_entry(&wait, current); - - current->state = TASK_UNINTERRUPTIBLE; - - sleep_on_head(q, &wait, &flags); - schedule(); - sleep_on_tail(q, &wait, &flags); + sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } EXPORT_SYMBOL(sleep_on); long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) { - unsigned long flags; - wait_queue_t wait; - - init_waitqueue_entry(&wait, current); - - current->state = TASK_UNINTERRUPTIBLE; - - sleep_on_head(q, &wait, &flags); - timeout = schedule_timeout(timeout); - sleep_on_tail(q, &wait, &flags); - - return timeout; + return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout); } EXPORT_SYMBOL(sleep_on_timeout); @@ -4108,7 +4175,7 @@ struct task_struct *idle_task(int cpu) */ static struct task_struct *find_process_by_pid(pid_t pid) { - return pid ? find_task_by_pid(pid) : current; + return pid ? find_task_by_vpid(pid) : current; } /* Actually do priority change: must hold rq lock. */ @@ -4320,10 +4387,10 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) asmlinkage long sys_sched_getscheduler(pid_t pid) { struct task_struct *p; - int retval = -EINVAL; + int retval; if (pid < 0) - goto out_nounlock; + return -EINVAL; retval = -ESRCH; read_lock(&tasklist_lock); @@ -4334,8 +4401,6 @@ asmlinkage long sys_sched_getscheduler(pid_t pid) retval = p->policy; } read_unlock(&tasklist_lock); - -out_nounlock: return retval; } @@ -4348,10 +4413,10 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) { struct sched_param lp; struct task_struct *p; - int retval = -EINVAL; + int retval; if (!param || pid < 0) - goto out_nounlock; + return -EINVAL; read_lock(&tasklist_lock); p = find_process_by_pid(pid); @@ -4371,7 +4436,6 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) */ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; -out_nounlock: return retval; out_unlock: @@ -4414,8 +4478,21 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) cpus_allowed = cpuset_cpus_allowed(p); cpus_and(new_mask, new_mask, cpus_allowed); + again: retval = set_cpus_allowed(p, new_mask); + if (!retval) { + cpus_allowed = cpuset_cpus_allowed(p); + if (!cpus_subset(new_mask, cpus_allowed)) { + /* + * We must have raced with a concurrent cpuset + * update. Just reset the cpus_allowed to the + * cpuset's cpus_allowed + */ + new_mask = cpus_allowed; + goto again; + } + } out_unlock: put_task_struct(p); mutex_unlock(&sched_hotcpu_mutex); @@ -4727,11 +4804,11 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) { struct task_struct *p; unsigned int time_slice; - int retval = -EINVAL; + int retval; struct timespec t; if (pid < 0) - goto out_nounlock; + return -EINVAL; retval = -ESRCH; read_lock(&tasklist_lock); @@ -4759,8 +4836,8 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) read_unlock(&tasklist_lock); jiffies_to_timespec(time_slice, &t); retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; -out_nounlock: return retval; + out_unlock: read_unlock(&tasklist_lock); return retval; @@ -4774,18 +4851,18 @@ static void show_task(struct task_struct *p) unsigned state; state = p->state ? __ffs(p->state) + 1 : 0; - printk("%-13.13s %c", p->comm, + printk(KERN_INFO "%-13.13s %c", p->comm, state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); #if BITS_PER_LONG == 32 if (state == TASK_RUNNING) - printk(" running "); + printk(KERN_CONT " running "); else - printk(" %08lx ", thread_saved_pc(p)); + printk(KERN_CONT " %08lx ", thread_saved_pc(p)); #else if (state == TASK_RUNNING) - printk(" running task "); + printk(KERN_CONT " running task "); else - printk(" %016lx ", thread_saved_pc(p)); + printk(KERN_CONT " %016lx ", thread_saved_pc(p)); #endif #ifdef CONFIG_DEBUG_STACK_USAGE { @@ -4795,7 +4872,8 @@ static void show_task(struct task_struct *p) free = (unsigned long)n - (unsigned long)end_of_stack(p); } #endif - printk("%5lu %5d %6d\n", free, p->pid, p->parent->pid); + printk(KERN_CONT "%5lu %5d %6d\n", free, + task_pid_nr(p), task_pid_nr(p->parent)); if (state != TASK_RUNNING) show_stack(p, NULL); @@ -5055,8 +5133,19 @@ wait_to_die: } #ifdef CONFIG_HOTPLUG_CPU + +static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) +{ + int ret; + + local_irq_disable(); + ret = __migrate_task(p, src_cpu, dest_cpu); + local_irq_enable(); + return ret; +} + /* - * Figure out where task on dead CPU should go, use force if neccessary. + * Figure out where task on dead CPU should go, use force if necessary. * NOTE: interrupts should be disabled by the caller */ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) @@ -5066,35 +5155,42 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) struct rq *rq; int dest_cpu; -restart: - /* On same node? */ - mask = node_to_cpumask(cpu_to_node(dead_cpu)); - cpus_and(mask, mask, p->cpus_allowed); - dest_cpu = any_online_cpu(mask); - - /* On any allowed CPU? */ - if (dest_cpu == NR_CPUS) - dest_cpu = any_online_cpu(p->cpus_allowed); - - /* No more Mr. Nice Guy. */ - if (dest_cpu == NR_CPUS) { - rq = task_rq_lock(p, &flags); - cpus_setall(p->cpus_allowed); - dest_cpu = any_online_cpu(p->cpus_allowed); - task_rq_unlock(rq, &flags); + do { + /* On same node? */ + mask = node_to_cpumask(cpu_to_node(dead_cpu)); + cpus_and(mask, mask, p->cpus_allowed); + dest_cpu = any_online_cpu(mask); + + /* On any allowed CPU? */ + if (dest_cpu == NR_CPUS) + dest_cpu = any_online_cpu(p->cpus_allowed); + + /* No more Mr. Nice Guy. */ + if (dest_cpu == NR_CPUS) { + cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p); + /* + * Try to stay on the same cpuset, where the + * current cpuset may be a subset of all cpus. + * The cpuset_cpus_allowed_locked() variant of + * cpuset_cpus_allowed() will not block. It must be + * called within calls to cpuset_lock/cpuset_unlock. + */ + rq = task_rq_lock(p, &flags); + p->cpus_allowed = cpus_allowed; + dest_cpu = any_online_cpu(p->cpus_allowed); + task_rq_unlock(rq, &flags); - /* - * Don't tell them about moving exiting tasks or - * kernel threads (both mm NULL), since they never - * leave kernel. - */ - if (p->mm && printk_ratelimit()) - printk(KERN_INFO "process %d (%s) no " - "longer affine to cpu%d\n", - p->pid, p->comm, dead_cpu); - } - if (!__migrate_task(p, dead_cpu, dest_cpu)) - goto restart; + /* + * Don't tell them about moving exiting tasks or + * kernel threads (both mm NULL), since they never + * leave kernel. + */ + if (p->mm && printk_ratelimit()) + printk(KERN_INFO "process %d (%s) no " + "longer affine to cpu%d\n", + task_pid_nr(p), p->comm, dead_cpu); + } + } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); } /* @@ -5122,7 +5218,7 @@ static void migrate_live_tasks(int src_cpu) { struct task_struct *p, *t; - write_lock_irq(&tasklist_lock); + read_lock(&tasklist_lock); do_each_thread(t, p) { if (p == current) @@ -5132,7 +5228,7 @@ static void migrate_live_tasks(int src_cpu) move_task_off_dead_cpu(src_cpu, p); } while_each_thread(t, p); - write_unlock_irq(&tasklist_lock); + read_unlock(&tasklist_lock); } /* @@ -5199,7 +5295,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) struct rq *rq = cpu_rq(dead_cpu); /* Must be exiting, otherwise would be on tasklist. */ - BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); + BUG_ON(!p->exit_state); /* Cannot have done final schedule yet: would have vanished. */ BUG_ON(p->state == TASK_DEAD); @@ -5210,11 +5306,10 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) * Drop lock around migration; if someone else moves it, * that's OK. No task can be added to this CPU, so iteration is * fine. - * NOTE: interrupts should be left disabled --dev@ */ - spin_unlock(&rq->lock); + spin_unlock_irq(&rq->lock); move_task_off_dead_cpu(dead_cpu, p); - spin_lock(&rq->lock); + spin_lock_irq(&rq->lock); put_task_struct(p); } @@ -5261,14 +5356,32 @@ static struct ctl_table sd_ctl_root[] = { static struct ctl_table *sd_alloc_ctl_entry(int n) { struct ctl_table *entry = - kmalloc(n * sizeof(struct ctl_table), GFP_KERNEL); - - BUG_ON(!entry); - memset(entry, 0, n * sizeof(struct ctl_table)); + kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); return entry; } +static void sd_free_ctl_entry(struct ctl_table **tablep) +{ + struct ctl_table *entry; + + /* + * In the intermediate directories, both the child directory and + * procname are dynamically allocated and could fail but the mode + * will always be set. In the lowest directory the names are + * static strings and all have proc handlers. + */ + for (entry = *tablep; entry->mode; entry++) { + if (entry->child) + sd_free_ctl_entry(&entry->child); + if (entry->proc_handler == NULL) + kfree(entry->procname); + } + + kfree(*tablep); + *tablep = NULL; +} + static void set_table_entry(struct ctl_table *entry, const char *procname, void *data, int maxlen, @@ -5284,7 +5397,10 @@ set_table_entry(struct ctl_table *entry, static struct ctl_table * sd_alloc_ctl_domain_table(struct sched_domain *sd) { - struct ctl_table *table = sd_alloc_ctl_entry(14); + struct ctl_table *table = sd_alloc_ctl_entry(12); + + if (table == NULL) + return NULL; set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax); @@ -5304,16 +5420,17 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) sizeof(int), 0644, proc_dointvec_minmax); set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax); - set_table_entry(&table[10], "cache_nice_tries", + set_table_entry(&table[9], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax); - set_table_entry(&table[12], "flags", &sd->flags, + set_table_entry(&table[10], "flags", &sd->flags, sizeof(int), 0644, proc_dointvec_minmax); + /* &table[11] is terminator */ return table; } -static ctl_table *sd_alloc_ctl_cpu_table(int cpu) +static ctl_table * sd_alloc_ctl_cpu_table(int cpu) { struct ctl_table *entry, *table; struct sched_domain *sd; @@ -5323,6 +5440,8 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu) for_each_domain(cpu, sd) domain_num++; entry = table = sd_alloc_ctl_entry(domain_num + 1); + if (table == NULL) + return NULL; i = 0; for_each_domain(cpu, sd) { @@ -5337,24 +5456,38 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu) } static struct ctl_table_header *sd_sysctl_header; -static void init_sched_domain_sysctl(void) +static void register_sched_domain_sysctl(void) { int i, cpu_num = num_online_cpus(); struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); char buf[32]; + if (entry == NULL) + return; + sd_ctl_dir[0].child = entry; - for (i = 0; i < cpu_num; i++, entry++) { + for_each_online_cpu(i) { snprintf(buf, 32, "cpu%d", i); entry->procname = kstrdup(buf, GFP_KERNEL); entry->mode = 0555; entry->child = sd_alloc_ctl_cpu_table(i); + entry++; } sd_sysctl_header = register_sysctl_table(sd_ctl_root); } + +static void unregister_sched_domain_sysctl(void) +{ + unregister_sysctl_table(sd_sysctl_header); + sd_sysctl_header = NULL; + sd_free_ctl_entry(&sd_ctl_dir[0].child); +} #else -static void init_sched_domain_sysctl(void) +static void register_sched_domain_sysctl(void) +{ +} +static void unregister_sched_domain_sysctl(void) { } #endif @@ -5391,7 +5524,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_ONLINE: case CPU_ONLINE_FROZEN: - /* Strictly unneccessary, as first user will wake it. */ + /* Strictly unnecessary, as first user will wake it. */ wake_up_process(cpu_rq(cpu)->migration_thread); break; @@ -5409,19 +5542,21 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_DEAD: case CPU_DEAD_FROZEN: + cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */ migrate_live_tasks(cpu); rq = cpu_rq(cpu); kthread_stop(rq->migration_thread); rq->migration_thread = NULL; /* Idle task back to normal (off runqueue, low prio) */ - rq = task_rq_lock(rq->idle, &flags); + spin_lock_irq(&rq->lock); update_rq_clock(rq); deactivate_task(rq, rq->idle, 0); rq->idle->static_prio = MAX_PRIO; __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); rq->idle->sched_class = &idle_sched_class; migrate_dead_tasks(cpu); - task_rq_unlock(rq, &flags); + spin_unlock_irq(&rq->lock); + cpuset_unlock(); migrate_nr_uninterruptible(rq); BUG_ON(rq->nr_running != 0); @@ -5531,20 +5666,20 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) } if (!group->__cpu_power) { - printk("\n"); + printk(KERN_CONT "\n"); printk(KERN_ERR "ERROR: domain->cpu_power not " "set\n"); break; } if (!cpus_weight(group->cpumask)) { - printk("\n"); + printk(KERN_CONT "\n"); printk(KERN_ERR "ERROR: empty group\n"); break; } if (cpus_intersects(groupmask, group->cpumask)) { - printk("\n"); + printk(KERN_CONT "\n"); printk(KERN_ERR "ERROR: repeated CPUs\n"); break; } @@ -5552,11 +5687,11 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) cpus_or(groupmask, groupmask, group->cpumask); cpumask_scnprintf(str, NR_CPUS, group->cpumask); - printk(" %s", str); + printk(KERN_CONT " %s", str); group = group->next; } while (group != sd->groups); - printk("\n"); + printk(KERN_CONT "\n"); if (!cpus_equal(sd->span, groupmask)) printk(KERN_ERR "ERROR: groups don't span " @@ -5680,7 +5815,7 @@ static int __init isolated_cpu_setup(char *str) return 1; } -__setup ("isolcpus=", isolated_cpu_setup); +__setup("isolcpus=", isolated_cpu_setup); /* * init_sched_build_groups takes the cpumask we wish to span, and a pointer @@ -5836,7 +5971,7 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) { int group; - cpumask_t mask = cpu_sibling_map[cpu]; + cpumask_t mask = per_cpu(cpu_sibling_map, cpu); cpus_and(mask, mask, *cpu_map); group = first_cpu(mask); if (sg) @@ -5865,7 +6000,7 @@ static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, cpus_and(mask, mask, *cpu_map); group = first_cpu(mask); #elif defined(CONFIG_SCHED_SMT) - cpumask_t mask = cpu_sibling_map[cpu]; + cpumask_t mask = per_cpu(cpu_sibling_map, cpu); cpus_and(mask, mask, *cpu_map); group = first_cpu(mask); #else @@ -5909,24 +6044,23 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) if (!sg) return; -next_sg: - for_each_cpu_mask(j, sg->cpumask) { - struct sched_domain *sd; + do { + for_each_cpu_mask(j, sg->cpumask) { + struct sched_domain *sd; - sd = &per_cpu(phys_domains, j); - if (j != first_cpu(sd->groups->cpumask)) { - /* - * Only add "power" once for each - * physical package. - */ - continue; - } + sd = &per_cpu(phys_domains, j); + if (j != first_cpu(sd->groups->cpumask)) { + /* + * Only add "power" once for each + * physical package. + */ + continue; + } - sg_inc_cpu_power(sg, sd->groups->__cpu_power); - } - sg = sg->next; - if (sg != group_head) - goto next_sg; + sg_inc_cpu_power(sg, sd->groups->__cpu_power); + } + sg = sg->next; + } while (sg != group_head); } #endif @@ -6037,7 +6171,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) /* * Allocate the per-node list of sched groups */ - sched_group_nodes = kzalloc(sizeof(struct sched_group *)*MAX_NUMNODES, + sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *), GFP_KERNEL); if (!sched_group_nodes) { printk(KERN_WARNING "Can not alloc sched group node list\n"); @@ -6100,7 +6234,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) p = sd; sd = &per_cpu(cpu_domains, i); *sd = SD_SIBLING_INIT; - sd->span = cpu_sibling_map[i]; + sd->span = per_cpu(cpu_sibling_map, i); cpus_and(sd->span, sd->span, *cpu_map); sd->parent = p; p->child = sd; @@ -6111,7 +6245,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) #ifdef CONFIG_SCHED_SMT /* Set up CPU (sibling) groups */ for_each_cpu_mask(i, *cpu_map) { - cpumask_t this_sibling_map = cpu_sibling_map[i]; + cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i); cpus_and(this_sibling_map, this_sibling_map, *cpu_map); if (i != first_cpu(this_sibling_map)) continue; @@ -6273,24 +6407,31 @@ error: return -ENOMEM; #endif } + +static cpumask_t *doms_cur; /* current sched domains */ +static int ndoms_cur; /* number of sched domains in 'doms_cur' */ + +/* + * Special case: If a kmalloc of a doms_cur partition (array of + * cpumask_t) fails, then fallback to a single sched domain, + * as determined by the single cpumask_t fallback_doms. + */ +static cpumask_t fallback_doms; + /* * Set up scheduler domains and groups. Callers must hold the hotplug lock. + * For now this just excludes isolated cpus, but could be used to + * exclude other special cases in the future. */ static int arch_init_sched_domains(const cpumask_t *cpu_map) { - cpumask_t cpu_default_map; - int err; - - /* - * Setup mask for cpus without special case scheduling requirements. - * For now this just excludes isolated cpus, but could be used to - * exclude other special cases in the future. - */ - cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); - - err = build_sched_domains(&cpu_default_map); - - return err; + ndoms_cur = 1; + doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); + if (!doms_cur) + doms_cur = &fallback_doms; + cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); + register_sched_domain_sysctl(); + return build_sched_domains(doms_cur); } static void arch_destroy_sched_domains(const cpumask_t *cpu_map) @@ -6306,6 +6447,8 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) { int i; + unregister_sched_domain_sysctl(); + for_each_cpu_mask(i, *cpu_map) cpu_attach_domain(NULL, i); synchronize_sched(); @@ -6313,30 +6456,65 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) } /* - * Partition sched domains as specified by the cpumasks below. - * This attaches all cpus from the cpumasks to the NULL domain, - * waits for a RCU quiescent period, recalculates sched - * domain information and then attaches them back to the - * correct sched domains + * Partition sched domains as specified by the 'ndoms_new' + * cpumasks in the array doms_new[] of cpumasks. This compares + * doms_new[] to the current sched domain partitioning, doms_cur[]. + * It destroys each deleted domain and builds each new domain. + * + * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. + * The masks don't intersect (don't overlap.) We should setup one + * sched domain for each mask. CPUs not in any of the cpumasks will + * not be load balanced. If the same cpumask appears both in the + * current 'doms_cur' domains and in the new 'doms_new', we can leave + * it as it is. + * + * The passed in 'doms_new' should be kmalloc'd. This routine takes + * ownership of it and will kfree it when done with it. If the caller + * failed the kmalloc call, then it can pass in doms_new == NULL, + * and partition_sched_domains() will fallback to the single partition + * 'fallback_doms'. + * * Call with hotplug lock held */ -int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) +void partition_sched_domains(int ndoms_new, cpumask_t *doms_new) { - cpumask_t change_map; - int err = 0; + int i, j; - cpus_and(*partition1, *partition1, cpu_online_map); - cpus_and(*partition2, *partition2, cpu_online_map); - cpus_or(change_map, *partition1, *partition2); + if (doms_new == NULL) { + ndoms_new = 1; + doms_new = &fallback_doms; + cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); + } - /* Detach sched domains from all of the affected cpus */ - detach_destroy_domains(&change_map); - if (!cpus_empty(*partition1)) - err = build_sched_domains(partition1); - if (!err && !cpus_empty(*partition2)) - err = build_sched_domains(partition2); + /* Destroy deleted domains */ + for (i = 0; i < ndoms_cur; i++) { + for (j = 0; j < ndoms_new; j++) { + if (cpus_equal(doms_cur[i], doms_new[j])) + goto match1; + } + /* no match - a current sched domain not in new doms_new[] */ + detach_destroy_domains(doms_cur + i); +match1: + ; + } - return err; + /* Build new domains */ + for (i = 0; i < ndoms_new; i++) { + for (j = 0; j < ndoms_cur; j++) { + if (cpus_equal(doms_new[i], doms_cur[j])) + goto match2; + } + /* no match - add a new doms_new */ + build_sched_domains(doms_new + i); +match2: + ; + } + + /* Remember the new sched domains */ + if (doms_cur != &fallback_doms) + kfree(doms_cur); + doms_cur = doms_new; + ndoms_cur = ndoms_new; } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) @@ -6467,8 +6645,6 @@ void __init sched_init_smp(void) /* XXX: Theoretical race here - CPU may be hotplugged now */ hotcpu_notifier(update_sched_domains, 0); - init_sched_domain_sysctl(); - /* Move init over to a non-isolated CPU */ if (set_cpus_allowed(current, non_isolated_cpus) < 0) BUG(); @@ -6522,19 +6698,20 @@ void __init sched_init(void) init_cfs_rq_p[i] = cfs_rq; init_cfs_rq(cfs_rq, rq); - cfs_rq->tg = &init_task_grp; + cfs_rq->tg = &init_task_group; list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); init_sched_entity_p[i] = se; se->cfs_rq = &rq->cfs; se->my_q = cfs_rq; - se->load.weight = init_task_grp_load; + se->load.weight = init_task_group_load; se->load.inv_weight = - div64_64(1ULL<<32, init_task_grp_load); + div64_64(1ULL<<32, init_task_group_load); se->parent = NULL; } - init_task_grp.shares = init_task_grp_load; + init_task_group.shares = init_task_group_load; + spin_lock_init(&init_task_group.lock); #endif for (j = 0; j < CPU_LOAD_IDX_MAX; j++) @@ -6620,15 +6797,34 @@ EXPORT_SYMBOL(__might_sleep); #endif #ifdef CONFIG_MAGIC_SYSRQ +static void normalize_task(struct rq *rq, struct task_struct *p) +{ + int on_rq; + update_rq_clock(rq); + on_rq = p->se.on_rq; + if (on_rq) + deactivate_task(rq, p, 0); + __setscheduler(rq, p, SCHED_NORMAL, 0); + if (on_rq) { + activate_task(rq, p, 0); + resched_task(rq->curr); + } +} + void normalize_rt_tasks(void) { struct task_struct *g, *p; unsigned long flags; struct rq *rq; - int on_rq; read_lock_irq(&tasklist_lock); do_each_thread(g, p) { + /* + * Only normalize user tasks: + */ + if (!p->mm) + continue; + p->se.exec_start = 0; #ifdef CONFIG_SCHEDSTATS p->se.wait_start = 0; @@ -6649,26 +6845,9 @@ void normalize_rt_tasks(void) spin_lock_irqsave(&p->pi_lock, flags); rq = __task_rq_lock(p); -#ifdef CONFIG_SMP - /* - * Do not touch the migration thread: - */ - if (p == rq->migration_thread) - goto out_unlock; -#endif - update_rq_clock(rq); - on_rq = p->se.on_rq; - if (on_rq) - deactivate_task(rq, p, 0); - __setscheduler(rq, p, SCHED_NORMAL, 0); - if (on_rq) { - activate_task(rq, p, 0); - resched_task(rq->curr); - } -#ifdef CONFIG_SMP - out_unlock: -#endif + normalize_task(rq, p); + __task_rq_unlock(rq); spin_unlock_irqrestore(&p->pi_lock, flags); } while_each_thread(g, p); @@ -6725,9 +6904,9 @@ void set_curr_task(int cpu, struct task_struct *p) #ifdef CONFIG_FAIR_GROUP_SCHED /* allocate runqueue etc for a new task group */ -struct task_grp *sched_create_group(void) +struct task_group *sched_create_group(void) { - struct task_grp *tg; + struct task_group *tg; struct cfs_rq *cfs_rq; struct sched_entity *se; struct rq *rq; @@ -6779,22 +6958,20 @@ struct task_grp *sched_create_group(void) } tg->shares = NICE_0_LOAD; + spin_lock_init(&tg->lock); return tg; err: for_each_possible_cpu(i) { - if (tg->cfs_rq && tg->cfs_rq[i]) + if (tg->cfs_rq) kfree(tg->cfs_rq[i]); - if (tg->se && tg->se[i]) + if (tg->se) kfree(tg->se[i]); } - if (tg->cfs_rq) - kfree(tg->cfs_rq); - if (tg->se) - kfree(tg->se); - if (tg) - kfree(tg); + kfree(tg->cfs_rq); + kfree(tg->se); + kfree(tg); return ERR_PTR(-ENOMEM); } @@ -6803,7 +6980,7 @@ err: static void free_sched_group(struct rcu_head *rhp) { struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu); - struct task_grp *tg = cfs_rq->tg; + struct task_group *tg = cfs_rq->tg; struct sched_entity *se; int i; @@ -6822,7 +6999,7 @@ static void free_sched_group(struct rcu_head *rhp) } /* Destroy runqueue etc associated with a task group */ -void sched_destroy_group(struct task_grp *tg) +void sched_destroy_group(struct task_group *tg) { struct cfs_rq *cfs_rq; int i; @@ -6898,20 +7075,139 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares) spin_unlock_irq(&rq->lock); } -int sched_group_set_shares(struct task_grp *tg, unsigned long shares) +int sched_group_set_shares(struct task_group *tg, unsigned long shares) { int i; + spin_lock(&tg->lock); if (tg->shares == shares) - return 0; - - /* return -EINVAL if the new value is not sane */ + goto done; tg->shares = shares; for_each_possible_cpu(i) set_se_shares(tg->se[i], shares); +done: + spin_unlock(&tg->lock); return 0; } +unsigned long sched_group_shares(struct task_group *tg) +{ + return tg->shares; +} + #endif /* CONFIG_FAIR_GROUP_SCHED */ + +#ifdef CONFIG_FAIR_CGROUP_SCHED + +/* return corresponding task_group object of a cgroup */ +static inline struct task_group *cgroup_tg(struct cgroup *cont) +{ + return container_of(cgroup_subsys_state(cont, cpu_cgroup_subsys_id), + struct task_group, css); +} + +static struct cgroup_subsys_state * +cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) +{ + struct task_group *tg; + + if (!cont->parent) { + /* This is early initialization for the top cgroup */ + init_task_group.css.cgroup = cont; + return &init_task_group.css; + } + + /* we support only 1-level deep hierarchical scheduler atm */ + if (cont->parent->parent) + return ERR_PTR(-EINVAL); + + tg = sched_create_group(); + if (IS_ERR(tg)) + return ERR_PTR(-ENOMEM); + + /* Bind the cgroup to task_group object we just created */ + tg->css.cgroup = cont; + + return &tg->css; +} + +static void cpu_cgroup_destroy(struct cgroup_subsys *ss, + struct cgroup *cont) +{ + struct task_group *tg = cgroup_tg(cont); + + sched_destroy_group(tg); +} + +static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, + struct cgroup *cont, struct task_struct *tsk) +{ + /* We don't support RT-tasks being in separate groups */ + if (tsk->sched_class != &fair_sched_class) + return -EINVAL; + + return 0; +} + +static void +cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cont, + struct cgroup *old_cont, struct task_struct *tsk) +{ + sched_move_task(tsk); +} + +static ssize_t cpu_shares_write(struct cgroup *cont, struct cftype *cftype, + struct file *file, const char __user *userbuf, + size_t nbytes, loff_t *ppos) +{ + unsigned long shareval; + struct task_group *tg = cgroup_tg(cont); + char buffer[2*sizeof(unsigned long) + 1]; + int rc; + + if (nbytes > 2*sizeof(unsigned long)) /* safety check */ + return -E2BIG; + + if (copy_from_user(buffer, userbuf, nbytes)) + return -EFAULT; + + buffer[nbytes] = 0; /* nul-terminate */ + shareval = simple_strtoul(buffer, NULL, 10); + + rc = sched_group_set_shares(tg, shareval); + + return (rc < 0 ? rc : nbytes); +} + +static u64 cpu_shares_read_uint(struct cgroup *cont, struct cftype *cft) +{ + struct task_group *tg = cgroup_tg(cont); + + return (u64) tg->shares; +} + +static struct cftype cpu_shares = { + .name = "shares", + .read_uint = cpu_shares_read_uint, + .write = cpu_shares_write, +}; + +static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) +{ + return cgroup_add_file(cont, ss, &cpu_shares); +} + +struct cgroup_subsys cpu_cgroup_subsys = { + .name = "cpu", + .create = cpu_cgroup_create, + .destroy = cpu_cgroup_destroy, + .can_attach = cpu_cgroup_can_attach, + .attach = cpu_cgroup_attach, + .populate = cpu_cgroup_populate, + .subsys_id = cpu_cgroup_subsys_id, + .early_init = 1, +}; + +#endif /* CONFIG_FAIR_CGROUP_SCHED */