]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/sched.c
[PATCH] lockdep: annotate timer base locks
[linux-2.6-omap-h63xx.git] / kernel / sched.c
index 2629c1711fd62be84574153e0ae62077895f3b36..ae4db0185bb2dec2f863acda5490a784e6af3a3c 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/capability.h>
 #include <linux/completion.h>
 #include <linux/kernel_stat.h>
+#include <linux/debug_locks.h>
 #include <linux/security.h>
 #include <linux/notifier.h>
 #include <linux/profile.h>
@@ -307,6 +308,13 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
        /* this is a valid case when another task releases the spinlock */
        rq->lock.owner = current;
 #endif
+       /*
+        * If we are tracking spinlock dependencies then we have to
+        * fix up the runqueue lock - which gets 'carried over' from
+        * prev into current:
+        */
+       spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+
        spin_unlock_irq(&rq->lock);
 }
 
@@ -1777,6 +1785,7 @@ task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
                WARN_ON(rq->prev_mm);
                rq->prev_mm = oldmm;
        }
+       spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
 
        /* Here we just switch the register state and the stack. */
        switch_to(prev, next, prev);
@@ -3142,12 +3151,13 @@ void fastcall add_preempt_count(int val)
        /*
         * Underflow?
         */
-       BUG_ON((preempt_count() < 0));
+       if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
+               return;
        preempt_count() += val;
        /*
         * Spinlock count overflowing soon?
         */
-       BUG_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10);
+       DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10);
 }
 EXPORT_SYMBOL(add_preempt_count);
 
@@ -3156,11 +3166,15 @@ void fastcall sub_preempt_count(int val)
        /*
         * Underflow?
         */
-       BUG_ON(val > preempt_count());
+       if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
+               return;
        /*
         * Is the spinlock portion underflowing?
         */
-       BUG_ON((val < PREEMPT_MASK) && !(preempt_count() & PREEMPT_MASK));
+       if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
+                       !(preempt_count() & PREEMPT_MASK)))
+               return;
+
        preempt_count() -= val;
 }
 EXPORT_SYMBOL(sub_preempt_count);
@@ -4378,6 +4392,7 @@ asmlinkage long sys_sched_yield(void)
         * no need to preempt or enable interrupts:
         */
        __release(rq->lock);
+       spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
        _raw_spin_unlock(&rq->lock);
        preempt_enable_no_resched();
 
@@ -4386,7 +4401,16 @@ asmlinkage long sys_sched_yield(void)
        return 0;
 }
 
-static inline void __cond_resched(void)
+static inline int __resched_legal(void)
+{
+       if (unlikely(preempt_count()))
+               return 0;
+       if (unlikely(system_state != SYSTEM_RUNNING))
+               return 0;
+       return 1;
+}
+
+static void __cond_resched(void)
 {
 #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
        __might_sleep(__FILE__, __LINE__);
@@ -4396,10 +4420,6 @@ static inline void __cond_resched(void)
         * PREEMPT_ACTIVE, which could trigger a second
         * cond_resched() call.
         */
-       if (unlikely(preempt_count()))
-               return;
-       if (unlikely(system_state != SYSTEM_RUNNING))
-               return;
        do {
                add_preempt_count(PREEMPT_ACTIVE);
                schedule();
@@ -4409,13 +4429,12 @@ static inline void __cond_resched(void)
 
 int __sched cond_resched(void)
 {
-       if (need_resched()) {
+       if (need_resched() && __resched_legal()) {
                __cond_resched();
                return 1;
        }
        return 0;
 }
-
 EXPORT_SYMBOL(cond_resched);
 
 /*
@@ -4436,7 +4455,8 @@ int cond_resched_lock(spinlock_t *lock)
                ret = 1;
                spin_lock(lock);
        }
-       if (need_resched()) {
+       if (need_resched() && __resched_legal()) {
+               spin_release(&lock->dep_map, 1, _THIS_IP_);
                _raw_spin_unlock(lock);
                preempt_enable_no_resched();
                __cond_resched();
@@ -4445,25 +4465,24 @@ int cond_resched_lock(spinlock_t *lock)
        }
        return ret;
 }
-
 EXPORT_SYMBOL(cond_resched_lock);
 
 int __sched cond_resched_softirq(void)
 {
        BUG_ON(!in_softirq());
 
-       if (need_resched()) {
-               __local_bh_enable();
+       if (need_resched() && __resched_legal()) {
+               raw_local_irq_disable();
+               _local_bh_enable();
+               raw_local_irq_enable();
                __cond_resched();
                local_bh_disable();
                return 1;
        }
        return 0;
 }
-
 EXPORT_SYMBOL(cond_resched_softirq);
 
-
 /**
  * yield - yield the current processor to other threads.
  *
@@ -4689,7 +4708,7 @@ void show_state(void)
        } while_each_thread(g, p);
 
        read_unlock(&tasklist_lock);
-       mutex_debug_show_all_locks();
+       debug_show_all_locks();
 }
 
 /**