]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched.c
[PATCH] Keys: Pass session keyring to call_usermodehelper()
[linux-2.6-omap-h63xx.git] / kernel / sched.c
index 0dc3158667a2c7aba86f65fad19989e32a1baf8e..76080d142e3d9c08897e7b713d23dd0e2f809db0 100644 (file)
@@ -2576,7 +2576,7 @@ void fastcall add_preempt_count(int val)
        /*
         * Underflow?
         */
-       BUG_ON(((int)preempt_count() < 0));
+       BUG_ON((preempt_count() < 0));
        preempt_count() += val;
        /*
         * Spinlock count overflowing soon?
@@ -2869,7 +2869,7 @@ need_resched:
 
 int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key)
 {
-       task_t *p = curr->task;
+       task_t *p = curr->private;
        return try_to_wake_up(p, mode, sync);
 }
 
@@ -3755,19 +3755,22 @@ EXPORT_SYMBOL(cond_resched);
  */
 int cond_resched_lock(spinlock_t * lock)
 {
+       int ret = 0;
+
        if (need_lockbreak(lock)) {
                spin_unlock(lock);
                cpu_relax();
+               ret = 1;
                spin_lock(lock);
        }
        if (need_resched()) {
                _raw_spin_unlock(lock);
                preempt_enable_no_resched();
                __cond_resched();
+               ret = 1;
                spin_lock(lock);
-               return 1;
        }
-       return 0;
+       return ret;
 }
 
 EXPORT_SYMBOL(cond_resched_lock);
@@ -3811,7 +3814,7 @@ EXPORT_SYMBOL(yield);
  */
 void __sched io_schedule(void)
 {
-       struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id());
+       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
 
        atomic_inc(&rq->nr_iowait);
        schedule();
@@ -3822,7 +3825,7 @@ EXPORT_SYMBOL(io_schedule);
 
 long __sched io_schedule_timeout(long timeout)
 {
-       struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id());
+       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
        long ret;
 
        atomic_inc(&rq->nr_iowait);
@@ -4243,7 +4246,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
 
        /* No more Mr. Nice Guy. */
        if (dest_cpu == NR_CPUS) {
-               tsk->cpus_allowed = cpuset_cpus_allowed(tsk);
+               cpus_setall(tsk->cpus_allowed);
                dest_cpu = any_online_cpu(tsk->cpus_allowed);
 
                /*