X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=kernel%2Fsched.c;h=9474b23c28bf41f5989df3b94c5e810b0f1e971a;hb=801e4062fda6496fe9bee3e6915a2aa108f974e5;hp=524285e46fa788e7e0a04612a611965b7650a2d5;hpb=ef3f2de2b5496f721b12f21a157e19eac816394b;p=linux-2.6-omap-h63xx.git diff --git a/kernel/sched.c b/kernel/sched.c index 524285e46fa..9474b23c28b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1255,12 +1255,12 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); #define sched_class_highest (&rt_sched_class) -static void inc_nr_running(struct task_struct *p, struct rq *rq) +static void inc_nr_running(struct rq *rq) { rq->nr_running++; } -static void dec_nr_running(struct task_struct *p, struct rq *rq) +static void dec_nr_running(struct rq *rq) { rq->nr_running--; } @@ -1350,11 +1350,11 @@ static int effective_prio(struct task_struct *p) */ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) { - if (p->state == TASK_UNINTERRUPTIBLE) + if (task_contributes_to_load(p)) rq->nr_uninterruptible--; enqueue_task(rq, p, wakeup); - inc_nr_running(p, rq); + inc_nr_running(rq); } /* @@ -1362,11 +1362,11 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) */ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) { - if (p->state == TASK_UNINTERRUPTIBLE) + if (task_contributes_to_load(p)) rq->nr_uninterruptible++; dequeue_task(rq, p, sleep); - dec_nr_running(p, rq); + dec_nr_running(rq); } /** @@ -1895,8 +1895,7 @@ out: int fastcall wake_up_process(struct task_struct *p) { - return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | - TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); + return try_to_wake_up(p, TASK_ALL, 0); } EXPORT_SYMBOL(wake_up_process); @@ -2006,7 +2005,7 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) * management (if any): */ p->sched_class->task_new(rq, p); - inc_nr_running(p, rq); + inc_nr_running(rq); } check_preempt_curr(rq, p); #ifdef CONFIG_SMP @@ -4124,8 +4123,7 @@ void complete(struct completion *x) spin_lock_irqsave(&x->wait.lock, flags); x->done++; - __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, - 1, 0, NULL); + __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete); @@ -4136,8 +4134,7 @@ void complete_all(struct completion *x) spin_lock_irqsave(&x->wait.lock, flags); x->done += UINT_MAX/2; - __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, - 0, 0, NULL); + __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete_all); @@ -4151,8 +4148,10 @@ do_wait_for_common(struct completion *x, long timeout, int state) wait.flags |= WQ_FLAG_EXCLUSIVE; __add_wait_queue_tail(&x->wait, &wait); do { - if (state == TASK_INTERRUPTIBLE && - signal_pending(current)) { + if ((state == TASK_INTERRUPTIBLE && + signal_pending(current)) || + (state == TASK_KILLABLE && + fatal_signal_pending(current))) { __remove_wait_queue(&x->wait, &wait); return -ERESTARTSYS; } @@ -4212,6 +4211,15 @@ wait_for_completion_interruptible_timeout(struct completion *x, } EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); +int __sched wait_for_completion_killable(struct completion *x) +{ + long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); + if (t == -ERESTARTSYS) + return t; + return 0; +} +EXPORT_SYMBOL(wait_for_completion_killable); + static long __sched sleep_on_common(wait_queue_head_t *q, int state, long timeout) { @@ -4945,19 +4953,15 @@ EXPORT_SYMBOL(_cond_resched); */ int cond_resched_lock(spinlock_t *lock) { + int resched = need_resched() && system_state == SYSTEM_RUNNING; int ret = 0; - if (need_lockbreak(lock)) { + if (spin_needbreak(lock) || resched) { spin_unlock(lock); - cpu_relax(); - ret = 1; - spin_lock(lock); - } - if (need_resched() && system_state == SYSTEM_RUNNING) { - spin_release(&lock->dep_map, 1, _THIS_IP_); - _raw_spin_unlock(lock); - preempt_enable_no_resched(); - __cond_resched(); + if (resched && need_resched()) + __cond_resched(); + else + cpu_relax(); ret = 1; spin_lock(lock); }