__raw_spin_unlock(&lockdep_lock);
return 0;
}
+ /* prevent any recursions within lockdep from causing deadlocks */
+ current->lockdep_recursion++;
return 1;
}
if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
return DEBUG_LOCKS_WARN_ON(1);
+ current->lockdep_recursion--;
__raw_spin_unlock(&lockdep_lock);
return 0;
}
((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
(key2))
-notrace void lockdep_off(void)
+void lockdep_off(void)
{
current->lockdep_recursion++;
}
EXPORT_SYMBOL(lockdep_off);
-notrace void lockdep_on(void)
+void lockdep_on(void)
{
current->lockdep_recursion--;
}
* Return 1 otherwise and keep <backwards_match> unchanged.
* Return 0 on error.
*/
-static noinline notrace int
+static noinline int
find_usage_backwards(struct lock_class *source, unsigned int depth)
{
struct lock_list *entry;
}
unsigned long nr_lock_chains;
-static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
+struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
+int nr_chain_hlocks;
+static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
+
+struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
+{
+ return lock_classes + chain_hlocks[chain->base + i];
+}
/*
* Look up a dependency chain. If the key is not present yet then
* validated. If the key is already hashed, return 0.
* (On return with 1 graph_lock is held.)
*/
-static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
+static inline int lookup_chain_cache(struct task_struct *curr,
+ struct held_lock *hlock,
+ u64 chain_key)
{
+ struct lock_class *class = hlock->class;
struct list_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
+ struct held_lock *hlock_curr, *hlock_next;
+ int i, j, n, cn;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
}
chain = lock_chains + nr_lock_chains++;
chain->chain_key = chain_key;
+ chain->irq_context = hlock->irq_context;
+ /* Find the first held_lock of current chain */
+ hlock_next = hlock;
+ for (i = curr->lockdep_depth - 1; i >= 0; i--) {
+ hlock_curr = curr->held_locks + i;
+ if (hlock_curr->irq_context != hlock_next->irq_context)
+ break;
+ hlock_next = hlock;
+ }
+ i++;
+ chain->depth = curr->lockdep_depth + 1 - i;
+ cn = nr_chain_hlocks;
+ while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
+ n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
+ if (n == cn)
+ break;
+ cn = n;
+ }
+ if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
+ chain->base = cn;
+ for (j = 0; j < chain->depth - 1; j++, i++) {
+ int lock_id = curr->held_locks[i].class - lock_classes;
+ chain_hlocks[chain->base + j] = lock_id;
+ }
+ chain_hlocks[chain->base + j] = class - lock_classes;
+ }
list_add_tail_rcu(&chain->entry, hash_head);
debug_atomic_inc(&chain_lookup_misses);
inc_chains();
* graph_lock for us)
*/
if (!hlock->trylock && (hlock->check == 2) &&
- lookup_chain_cache(chain_key, hlock->class)) {
+ lookup_chain_cache(curr, hlock, chain_key)) {
/*
* Check whether last held lock:
*
* We are building curr_chain_key incrementally, so double-check
* it from scratch, to make sure that it's done correctly:
*/
-static notrace void check_chain_key(struct task_struct *curr)
+static void check_chain_key(struct task_struct *curr)
{
#ifdef CONFIG_DEBUG_LOCKDEP
struct held_lock *hlock, *prev_hlock = NULL;
/*
* Mark all held locks with a usage bit:
*/
-static notrace int
+static int
mark_held_locks(struct task_struct *curr, int hardirq)
{
enum lock_usage_bit usage_bit;
/*
* Hardirqs will be enabled:
*/
-void notrace trace_hardirqs_on_caller(unsigned long a0)
+void trace_hardirqs_on_caller(unsigned long a0)
{
struct task_struct *curr = current;
unsigned long ip;
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
-void notrace trace_hardirqs_on(void)
+void trace_hardirqs_on(void)
{
trace_hardirqs_on_caller(CALLER_ADDR0);
}
/*
* Hardirqs were disabled:
*/
-void notrace trace_hardirqs_off_caller(unsigned long a0)
+void trace_hardirqs_off_caller(unsigned long a0)
{
struct task_struct *curr = current;
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
-void notrace trace_hardirqs_off(void)
+void trace_hardirqs_off(void)
{
trace_hardirqs_off_caller(CALLER_ADDR0);
}
/*
* Mark a lock with a usage bit, and validate the state transition:
*/
-static notrace int mark_lock(struct task_struct *curr, struct held_lock *this,
+static int mark_lock(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit)
{
unsigned int new_mask = 1 << new_bit, ret = 1;
/*
* Check whether we follow the irq-flags state precisely:
*/
-static notrace void check_flags(unsigned long flags)
+static void check_flags(unsigned long flags)
{
-#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS)
+#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
+ defined(CONFIG_TRACE_IRQFLAGS)
if (!debug_locks)
return;
* We are not always called with irqs disabled - do that here,
* and also avoid lockdep recursion:
*/
-notrace void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check, unsigned long ip)
{
unsigned long flags;
EXPORT_SYMBOL_GPL(lock_acquire);
-notrace void lock_release(struct lockdep_map *lock, int nested,
+void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip)
{
unsigned long flags;