4 * Runtime locking correctness validator
6 * Started by Ingo Molnar:
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
11 * this code maps all the lock dependencies as they occur in a live kernel
12 * and will warn about the following classes of locking bugs:
14 * - lock inversion scenarios
15 * - circular lock dependencies
16 * - hardirq/softirq safe/unsafe locking bugs
18 * Bugs are reported even if the current locking scenario does not cause
19 * any deadlock at this point.
21 * I.e. if anytime in the past two locks were taken in a different order,
22 * even if it happened for another task, even if those were different
23 * locks (but of the same class as this lock), this code will detect it.
25 * Thanks to Arjan van de Ven for coming up with the initial idea of
26 * mapping lock dependencies runtime.
28 #define DISABLE_BRANCH_PROFILING
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/delay.h>
32 #include <linux/module.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/spinlock.h>
36 #include <linux/kallsyms.h>
37 #include <linux/interrupt.h>
38 #include <linux/stacktrace.h>
39 #include <linux/debug_locks.h>
40 #include <linux/irqflags.h>
41 #include <linux/utsname.h>
42 #include <linux/hash.h>
43 #include <linux/ftrace.h>
45 #include <asm/sections.h>
47 #include "lockdep_internals.h"
49 #ifdef CONFIG_PROVE_LOCKING
50 int prove_locking = 1;
51 module_param(prove_locking, int, 0644);
53 #define prove_locking 0
56 #ifdef CONFIG_LOCK_STAT
58 module_param(lock_stat, int, 0644);
64 * lockdep_lock: protects the lockdep graph, the hashes and the
65 * class/list/hash allocators.
67 * This is one of the rare exceptions where it's justified
68 * to use a raw spinlock - we really dont want the spinlock
69 * code to recurse back into the lockdep code...
71 static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
73 static int graph_lock(void)
75 __raw_spin_lock(&lockdep_lock);
77 * Make sure that if another CPU detected a bug while
78 * walking the graph we dont change it (while the other
79 * CPU is busy printing out stuff with the graph lock
83 __raw_spin_unlock(&lockdep_lock);
86 /* prevent any recursions within lockdep from causing deadlocks */
87 current->lockdep_recursion++;
91 static inline int graph_unlock(void)
93 if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
94 return DEBUG_LOCKS_WARN_ON(1);
96 current->lockdep_recursion--;
97 __raw_spin_unlock(&lockdep_lock);
102 * Turn lock debugging off and return with 0 if it was off already,
103 * and also release the graph lock:
105 static inline int debug_locks_off_graph_unlock(void)
107 int ret = debug_locks_off();
109 __raw_spin_unlock(&lockdep_lock);
114 static int lockdep_initialized;
116 unsigned long nr_list_entries;
117 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
120 * All data structures here are protected by the global debug_lock.
122 * Mutex key structs only get allocated, once during bootup, and never
123 * get freed - this significantly simplifies the debugging code.
125 unsigned long nr_lock_classes;
126 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
128 static inline struct lock_class *hlock_class(struct held_lock *hlock)
130 if (!hlock->class_idx) {
131 DEBUG_LOCKS_WARN_ON(1);
134 return lock_classes + hlock->class_idx - 1;
137 #ifdef CONFIG_LOCK_STAT
138 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
140 static int lock_point(unsigned long points[], unsigned long ip)
144 for (i = 0; i < LOCKSTAT_POINTS; i++) {
145 if (points[i] == 0) {
156 static void lock_time_inc(struct lock_time *lt, s64 time)
161 if (time < lt->min || !lt->min)
168 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
170 dst->min += src->min;
171 dst->max += src->max;
172 dst->total += src->total;
176 struct lock_class_stats lock_stats(struct lock_class *class)
178 struct lock_class_stats stats;
181 memset(&stats, 0, sizeof(struct lock_class_stats));
182 for_each_possible_cpu(cpu) {
183 struct lock_class_stats *pcs =
184 &per_cpu(lock_stats, cpu)[class - lock_classes];
186 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
187 stats.contention_point[i] += pcs->contention_point[i];
189 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
190 stats.contending_point[i] += pcs->contending_point[i];
192 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
193 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
195 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
196 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
198 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
199 stats.bounces[i] += pcs->bounces[i];
205 void clear_lock_stats(struct lock_class *class)
209 for_each_possible_cpu(cpu) {
210 struct lock_class_stats *cpu_stats =
211 &per_cpu(lock_stats, cpu)[class - lock_classes];
213 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
215 memset(class->contention_point, 0, sizeof(class->contention_point));
216 memset(class->contending_point, 0, sizeof(class->contending_point));
219 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
221 return &get_cpu_var(lock_stats)[class - lock_classes];
224 static void put_lock_stats(struct lock_class_stats *stats)
226 put_cpu_var(lock_stats);
229 static void lock_release_holdtime(struct held_lock *hlock)
231 struct lock_class_stats *stats;
237 holdtime = sched_clock() - hlock->holdtime_stamp;
239 stats = get_lock_stats(hlock_class(hlock));
241 lock_time_inc(&stats->read_holdtime, holdtime);
243 lock_time_inc(&stats->write_holdtime, holdtime);
244 put_lock_stats(stats);
247 static inline void lock_release_holdtime(struct held_lock *hlock)
253 * We keep a global list of all lock classes. The list only grows,
254 * never shrinks. The list is only accessed with the lockdep
255 * spinlock lock held.
257 LIST_HEAD(all_lock_classes);
260 * The lockdep classes are in a hash-table as well, for fast lookup:
262 #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
263 #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
264 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
265 #define classhashentry(key) (classhash_table + __classhashfn((key)))
267 static struct list_head classhash_table[CLASSHASH_SIZE];
270 * We put the lock dependency chains into a hash-table as well, to cache
273 #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
274 #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
275 #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
276 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
278 static struct list_head chainhash_table[CHAINHASH_SIZE];
281 * The hash key of the lock dependency chains is a hash itself too:
282 * it's a hash of all locks taken up to that lock, including that lock.
283 * It's a 64-bit hash, because it's important for the keys to be
286 #define iterate_chain_key(key1, key2) \
287 (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
288 ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
291 void lockdep_off(void)
293 current->lockdep_recursion++;
296 EXPORT_SYMBOL(lockdep_off);
298 void lockdep_on(void)
300 current->lockdep_recursion--;
303 EXPORT_SYMBOL(lockdep_on);
306 * Debugging switches:
310 #define VERY_VERBOSE 0
313 # define HARDIRQ_VERBOSE 1
314 # define SOFTIRQ_VERBOSE 1
316 # define HARDIRQ_VERBOSE 0
317 # define SOFTIRQ_VERBOSE 0
320 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
322 * Quick filtering for interesting events:
324 static int class_filter(struct lock_class *class)
328 if (class->name_version == 1 &&
329 !strcmp(class->name, "lockname"))
331 if (class->name_version == 1 &&
332 !strcmp(class->name, "&struct->lockfield"))
335 /* Filter everything else. 1 would be to allow everything else */
340 static int verbose(struct lock_class *class)
343 return class_filter(class);
349 * Stack-trace: tightly packed array of stack backtrace
350 * addresses. Protected by the graph_lock.
352 unsigned long nr_stack_trace_entries;
353 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
355 static int save_trace(struct stack_trace *trace)
357 trace->nr_entries = 0;
358 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
359 trace->entries = stack_trace + nr_stack_trace_entries;
363 save_stack_trace(trace);
365 trace->max_entries = trace->nr_entries;
367 nr_stack_trace_entries += trace->nr_entries;
369 if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
370 if (!debug_locks_off_graph_unlock())
373 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
374 printk("turning off the locking correctness validator.\n");
383 unsigned int nr_hardirq_chains;
384 unsigned int nr_softirq_chains;
385 unsigned int nr_process_chains;
386 unsigned int max_lockdep_depth;
387 unsigned int max_recursion_depth;
389 static unsigned int lockdep_dependency_gen_id;
391 static bool lockdep_dependency_visit(struct lock_class *source,
395 lockdep_dependency_gen_id++;
396 if (source->dep_gen_id == lockdep_dependency_gen_id)
398 source->dep_gen_id = lockdep_dependency_gen_id;
402 #ifdef CONFIG_DEBUG_LOCKDEP
404 * We cannot printk in early bootup code. Not even early_printk()
405 * might work. So we mark any initialization errors and printk
406 * about it later on, in lockdep_info().
408 static int lockdep_init_error;
409 static unsigned long lockdep_init_trace_data[20];
410 static struct stack_trace lockdep_init_trace = {
411 .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
412 .entries = lockdep_init_trace_data,
416 * Various lockdep statistics:
418 atomic_t chain_lookup_hits;
419 atomic_t chain_lookup_misses;
420 atomic_t hardirqs_on_events;
421 atomic_t hardirqs_off_events;
422 atomic_t redundant_hardirqs_on;
423 atomic_t redundant_hardirqs_off;
424 atomic_t softirqs_on_events;
425 atomic_t softirqs_off_events;
426 atomic_t redundant_softirqs_on;
427 atomic_t redundant_softirqs_off;
428 atomic_t nr_unused_locks;
429 atomic_t nr_cyclic_checks;
430 atomic_t nr_cyclic_check_recursions;
431 atomic_t nr_find_usage_forwards_checks;
432 atomic_t nr_find_usage_forwards_recursions;
433 atomic_t nr_find_usage_backwards_checks;
434 atomic_t nr_find_usage_backwards_recursions;
435 # define debug_atomic_inc(ptr) atomic_inc(ptr)
436 # define debug_atomic_dec(ptr) atomic_dec(ptr)
437 # define debug_atomic_read(ptr) atomic_read(ptr)
439 # define debug_atomic_inc(ptr) do { } while (0)
440 # define debug_atomic_dec(ptr) do { } while (0)
441 # define debug_atomic_read(ptr) 0
448 static const char *usage_str[] =
450 [LOCK_USED] = "initial-use ",
451 [LOCK_USED_IN_HARDIRQ] = "in-hardirq-W",
452 [LOCK_USED_IN_SOFTIRQ] = "in-softirq-W",
453 [LOCK_ENABLED_SOFTIRQS] = "softirq-on-W",
454 [LOCK_ENABLED_HARDIRQS] = "hardirq-on-W",
455 [LOCK_USED_IN_HARDIRQ_READ] = "in-hardirq-R",
456 [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R",
457 [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R",
458 [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R",
461 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
463 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
467 get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
469 *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
471 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
474 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
477 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
480 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
483 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
485 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
487 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
491 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
493 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
495 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
500 static void print_lock_name(struct lock_class *class)
502 char str[KSYM_NAME_LEN], c1, c2, c3, c4;
505 get_usage_chars(class, &c1, &c2, &c3, &c4);
509 name = __get_key_name(class->key, str);
510 printk(" (%s", name);
512 printk(" (%s", name);
513 if (class->name_version > 1)
514 printk("#%d", class->name_version);
516 printk("/%d", class->subclass);
518 printk("){%c%c%c%c}", c1, c2, c3, c4);
521 static void print_lockdep_cache(struct lockdep_map *lock)
524 char str[KSYM_NAME_LEN];
528 name = __get_key_name(lock->key->subkeys, str);
533 static void print_lock(struct held_lock *hlock)
535 print_lock_name(hlock_class(hlock));
537 print_ip_sym(hlock->acquire_ip);
540 static void lockdep_print_held_locks(struct task_struct *curr)
542 int i, depth = curr->lockdep_depth;
545 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
548 printk("%d lock%s held by %s/%d:\n",
549 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
551 for (i = 0; i < depth; i++) {
553 print_lock(curr->held_locks + i);
557 static void print_lock_class_header(struct lock_class *class, int depth)
561 printk("%*s->", depth, "");
562 print_lock_name(class);
563 printk(" ops: %lu", class->ops);
566 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
567 if (class->usage_mask & (1 << bit)) {
570 len += printk("%*s %s", depth, "", usage_str[bit]);
571 len += printk(" at:\n");
572 print_stack_trace(class->usage_traces + bit, len);
575 printk("%*s }\n", depth, "");
577 printk("%*s ... key at: ",depth,"");
578 print_ip_sym((unsigned long)class->key);
582 * printk all lock dependencies starting at <entry>:
584 static void print_lock_dependencies(struct lock_class *class, int depth)
586 struct lock_list *entry;
588 if (lockdep_dependency_visit(class, depth))
591 if (DEBUG_LOCKS_WARN_ON(depth >= 20))
594 print_lock_class_header(class, depth);
596 list_for_each_entry(entry, &class->locks_after, entry) {
597 if (DEBUG_LOCKS_WARN_ON(!entry->class))
600 print_lock_dependencies(entry->class, depth + 1);
602 printk("%*s ... acquired at:\n",depth,"");
603 print_stack_trace(&entry->trace, 2);
608 static void print_kernel_version(void)
610 printk("%s %.*s\n", init_utsname()->release,
611 (int)strcspn(init_utsname()->version, " "),
612 init_utsname()->version);
615 static int very_verbose(struct lock_class *class)
618 return class_filter(class);
624 * Is this the address of a static object:
626 static int static_obj(void *obj)
628 unsigned long start = (unsigned long) &_stext,
629 end = (unsigned long) &_end,
630 addr = (unsigned long) obj;
638 if ((addr >= start) && (addr < end))
645 for_each_possible_cpu(i) {
646 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
647 end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
650 if ((addr >= start) && (addr < end))
658 return is_module_address(addr);
662 * To make lock name printouts unique, we calculate a unique
663 * class->name_version generation counter:
665 static int count_matching_names(struct lock_class *new_class)
667 struct lock_class *class;
670 if (!new_class->name)
673 list_for_each_entry(class, &all_lock_classes, lock_entry) {
674 if (new_class->key - new_class->subclass == class->key)
675 return class->name_version;
676 if (class->name && !strcmp(class->name, new_class->name))
677 count = max(count, class->name_version);
684 * Register a lock's class in the hash-table, if the class is not present
685 * yet. Otherwise we look it up. We cache the result in the lock object
686 * itself, so actual lookup of the hash should be once per lock object.
688 static inline struct lock_class *
689 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
691 struct lockdep_subclass_key *key;
692 struct list_head *hash_head;
693 struct lock_class *class;
695 #ifdef CONFIG_DEBUG_LOCKDEP
697 * If the architecture calls into lockdep before initializing
698 * the hashes then we'll warn about it later. (we cannot printk
701 if (unlikely(!lockdep_initialized)) {
703 lockdep_init_error = 1;
704 save_stack_trace(&lockdep_init_trace);
709 * Static locks do not have their class-keys yet - for them the key
710 * is the lock object itself:
712 if (unlikely(!lock->key))
713 lock->key = (void *)lock;
716 * NOTE: the class-key must be unique. For dynamic locks, a static
717 * lock_class_key variable is passed in through the mutex_init()
718 * (or spin_lock_init()) call - which acts as the key. For static
719 * locks we use the lock object itself as the key.
721 BUILD_BUG_ON(sizeof(struct lock_class_key) >
722 sizeof(struct lockdep_map));
724 key = lock->key->subkeys + subclass;
726 hash_head = classhashentry(key);
729 * We can walk the hash lockfree, because the hash only
730 * grows, and we are careful when adding entries to the end:
732 list_for_each_entry(class, hash_head, hash_entry) {
733 if (class->key == key) {
734 WARN_ON_ONCE(class->name != lock->name);
743 * Register a lock's class in the hash-table, if the class is not present
744 * yet. Otherwise we look it up. We cache the result in the lock object
745 * itself, so actual lookup of the hash should be once per lock object.
747 static inline struct lock_class *
748 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
750 struct lockdep_subclass_key *key;
751 struct list_head *hash_head;
752 struct lock_class *class;
755 class = look_up_lock_class(lock, subclass);
760 * Debug-check: all keys must be persistent!
762 if (!static_obj(lock->key)) {
764 printk("INFO: trying to register non-static key.\n");
765 printk("the code is fine but needs lockdep annotation.\n");
766 printk("turning off the locking correctness validator.\n");
772 key = lock->key->subkeys + subclass;
773 hash_head = classhashentry(key);
775 raw_local_irq_save(flags);
777 raw_local_irq_restore(flags);
781 * We have to do the hash-walk again, to avoid races
784 list_for_each_entry(class, hash_head, hash_entry)
785 if (class->key == key)
788 * Allocate a new key from the static array, and add it to
791 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
792 if (!debug_locks_off_graph_unlock()) {
793 raw_local_irq_restore(flags);
796 raw_local_irq_restore(flags);
798 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
799 printk("turning off the locking correctness validator.\n");
802 class = lock_classes + nr_lock_classes++;
803 debug_atomic_inc(&nr_unused_locks);
805 class->name = lock->name;
806 class->subclass = subclass;
807 INIT_LIST_HEAD(&class->lock_entry);
808 INIT_LIST_HEAD(&class->locks_before);
809 INIT_LIST_HEAD(&class->locks_after);
810 class->name_version = count_matching_names(class);
812 * We use RCU's safe list-add method to make
813 * parallel walking of the hash-list safe:
815 list_add_tail_rcu(&class->hash_entry, hash_head);
817 * Add it to the global list of classes:
819 list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
821 if (verbose(class)) {
823 raw_local_irq_restore(flags);
825 printk("\nnew class %p: %s", class->key, class->name);
826 if (class->name_version > 1)
827 printk("#%d", class->name_version);
831 raw_local_irq_save(flags);
833 raw_local_irq_restore(flags);
839 raw_local_irq_restore(flags);
841 if (!subclass || force)
842 lock->class_cache = class;
844 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
850 #ifdef CONFIG_PROVE_LOCKING
852 * Allocate a lockdep entry. (assumes the graph_lock held, returns
853 * with NULL on failure)
855 static struct lock_list *alloc_list_entry(void)
857 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
858 if (!debug_locks_off_graph_unlock())
861 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
862 printk("turning off the locking correctness validator.\n");
865 return list_entries + nr_list_entries++;
869 * Add a new dependency to the head of the list:
871 static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
872 struct list_head *head, unsigned long ip, int distance)
874 struct lock_list *entry;
876 * Lock not present yet - get a new dependency struct and
877 * add it to the list:
879 entry = alloc_list_entry();
883 if (!save_trace(&entry->trace))
887 entry->distance = distance;
889 * Since we never remove from the dependency list, the list can
890 * be walked lockless by other CPUs, it's only allocation
891 * that must be protected by the spinlock. But this also means
892 * we must make new entries visible only once writes to the
893 * entry become visible - hence the RCU op:
895 list_add_tail_rcu(&entry->entry, head);
901 * Recursive, forwards-direction lock-dependency checking, used for
902 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
905 * (to keep the stackframe of the recursive functions small we
906 * use these global variables, and we also mark various helper
907 * functions as noinline.)
909 static struct held_lock *check_source, *check_target;
912 * Print a dependency chain entry (this is only done when a deadlock
913 * has been detected):
916 print_circular_bug_entry(struct lock_list *target, unsigned int depth)
918 if (debug_locks_silent)
920 printk("\n-> #%u", depth);
921 print_lock_name(target->class);
923 print_stack_trace(&target->trace, 6);
929 * When a circular dependency is detected, print the
933 print_circular_bug_header(struct lock_list *entry, unsigned int depth)
935 struct task_struct *curr = current;
937 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
940 printk("\n=======================================================\n");
941 printk( "[ INFO: possible circular locking dependency detected ]\n");
942 print_kernel_version();
943 printk( "-------------------------------------------------------\n");
944 printk("%s/%d is trying to acquire lock:\n",
945 curr->comm, task_pid_nr(curr));
946 print_lock(check_source);
947 printk("\nbut task is already holding lock:\n");
948 print_lock(check_target);
949 printk("\nwhich lock already depends on the new lock.\n\n");
950 printk("\nthe existing dependency chain (in reverse order) is:\n");
952 print_circular_bug_entry(entry, depth);
957 static noinline int print_circular_bug_tail(void)
959 struct task_struct *curr = current;
960 struct lock_list this;
962 if (debug_locks_silent)
965 this.class = hlock_class(check_source);
966 if (!save_trace(&this.trace))
969 print_circular_bug_entry(&this, 0);
971 printk("\nother info that might help us debug this:\n\n");
972 lockdep_print_held_locks(curr);
974 printk("\nstack backtrace:\n");
980 #define RECURSION_LIMIT 40
982 static int noinline print_infinite_recursion_bug(void)
984 if (!debug_locks_off_graph_unlock())
992 unsigned long __lockdep_count_forward_deps(struct lock_class *class,
995 struct lock_list *entry;
996 unsigned long ret = 1;
998 if (lockdep_dependency_visit(class, depth))
1002 * Recurse this class's dependency list:
1004 list_for_each_entry(entry, &class->locks_after, entry)
1005 ret += __lockdep_count_forward_deps(entry->class, depth + 1);
1010 unsigned long lockdep_count_forward_deps(struct lock_class *class)
1012 unsigned long ret, flags;
1014 local_irq_save(flags);
1015 __raw_spin_lock(&lockdep_lock);
1016 ret = __lockdep_count_forward_deps(class, 0);
1017 __raw_spin_unlock(&lockdep_lock);
1018 local_irq_restore(flags);
1023 unsigned long __lockdep_count_backward_deps(struct lock_class *class,
1026 struct lock_list *entry;
1027 unsigned long ret = 1;
1029 if (lockdep_dependency_visit(class, depth))
1032 * Recurse this class's dependency list:
1034 list_for_each_entry(entry, &class->locks_before, entry)
1035 ret += __lockdep_count_backward_deps(entry->class, depth + 1);
1040 unsigned long lockdep_count_backward_deps(struct lock_class *class)
1042 unsigned long ret, flags;
1044 local_irq_save(flags);
1045 __raw_spin_lock(&lockdep_lock);
1046 ret = __lockdep_count_backward_deps(class, 0);
1047 __raw_spin_unlock(&lockdep_lock);
1048 local_irq_restore(flags);
1054 * Prove that the dependency graph starting at <entry> can not
1055 * lead to <target>. Print an error and return 0 if it does.
1058 check_noncircular(struct lock_class *source, unsigned int depth)
1060 struct lock_list *entry;
1062 if (lockdep_dependency_visit(source, depth))
1065 debug_atomic_inc(&nr_cyclic_check_recursions);
1066 if (depth > max_recursion_depth)
1067 max_recursion_depth = depth;
1068 if (depth >= RECURSION_LIMIT)
1069 return print_infinite_recursion_bug();
1071 * Check this lock's dependency list:
1073 list_for_each_entry(entry, &source->locks_after, entry) {
1074 if (entry->class == hlock_class(check_target))
1075 return print_circular_bug_header(entry, depth+1);
1076 debug_atomic_inc(&nr_cyclic_checks);
1077 if (!check_noncircular(entry->class, depth+1))
1078 return print_circular_bug_entry(entry, depth+1);
1083 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1085 * Forwards and backwards subgraph searching, for the purposes of
1086 * proving that two subgraphs can be connected by a new dependency
1087 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1089 static enum lock_usage_bit find_usage_bit;
1090 static struct lock_class *forwards_match, *backwards_match;
1093 * Find a node in the forwards-direction dependency sub-graph starting
1094 * at <source> that matches <find_usage_bit>.
1096 * Return 2 if such a node exists in the subgraph, and put that node
1097 * into <forwards_match>.
1099 * Return 1 otherwise and keep <forwards_match> unchanged.
1100 * Return 0 on error.
1103 find_usage_forwards(struct lock_class *source, unsigned int depth)
1105 struct lock_list *entry;
1108 if (lockdep_dependency_visit(source, depth))
1111 if (depth > max_recursion_depth)
1112 max_recursion_depth = depth;
1113 if (depth >= RECURSION_LIMIT)
1114 return print_infinite_recursion_bug();
1116 debug_atomic_inc(&nr_find_usage_forwards_checks);
1117 if (source->usage_mask & (1 << find_usage_bit)) {
1118 forwards_match = source;
1123 * Check this lock's dependency list:
1125 list_for_each_entry(entry, &source->locks_after, entry) {
1126 debug_atomic_inc(&nr_find_usage_forwards_recursions);
1127 ret = find_usage_forwards(entry->class, depth+1);
1128 if (ret == 2 || ret == 0)
1135 * Find a node in the backwards-direction dependency sub-graph starting
1136 * at <source> that matches <find_usage_bit>.
1138 * Return 2 if such a node exists in the subgraph, and put that node
1139 * into <backwards_match>.
1141 * Return 1 otherwise and keep <backwards_match> unchanged.
1142 * Return 0 on error.
1145 find_usage_backwards(struct lock_class *source, unsigned int depth)
1147 struct lock_list *entry;
1150 if (lockdep_dependency_visit(source, depth))
1153 if (!__raw_spin_is_locked(&lockdep_lock))
1154 return DEBUG_LOCKS_WARN_ON(1);
1156 if (depth > max_recursion_depth)
1157 max_recursion_depth = depth;
1158 if (depth >= RECURSION_LIMIT)
1159 return print_infinite_recursion_bug();
1161 debug_atomic_inc(&nr_find_usage_backwards_checks);
1162 if (source->usage_mask & (1 << find_usage_bit)) {
1163 backwards_match = source;
1167 if (!source && debug_locks_off_graph_unlock()) {
1173 * Check this lock's dependency list:
1175 list_for_each_entry(entry, &source->locks_before, entry) {
1176 debug_atomic_inc(&nr_find_usage_backwards_recursions);
1177 ret = find_usage_backwards(entry->class, depth+1);
1178 if (ret == 2 || ret == 0)
1185 print_bad_irq_dependency(struct task_struct *curr,
1186 struct held_lock *prev,
1187 struct held_lock *next,
1188 enum lock_usage_bit bit1,
1189 enum lock_usage_bit bit2,
1190 const char *irqclass)
1192 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1195 printk("\n======================================================\n");
1196 printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1197 irqclass, irqclass);
1198 print_kernel_version();
1199 printk( "------------------------------------------------------\n");
1200 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1201 curr->comm, task_pid_nr(curr),
1202 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1203 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1204 curr->hardirqs_enabled,
1205 curr->softirqs_enabled);
1208 printk("\nand this task is already holding:\n");
1210 printk("which would create a new lock dependency:\n");
1211 print_lock_name(hlock_class(prev));
1213 print_lock_name(hlock_class(next));
1216 printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1218 print_lock_name(backwards_match);
1219 printk("\n... which became %s-irq-safe at:\n", irqclass);
1221 print_stack_trace(backwards_match->usage_traces + bit1, 1);
1223 printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1224 print_lock_name(forwards_match);
1225 printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1228 print_stack_trace(forwards_match->usage_traces + bit2, 1);
1230 printk("\nother info that might help us debug this:\n\n");
1231 lockdep_print_held_locks(curr);
1233 printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
1234 print_lock_dependencies(backwards_match, 0);
1236 printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
1237 print_lock_dependencies(forwards_match, 0);
1239 printk("\nstack backtrace:\n");
1246 check_usage(struct task_struct *curr, struct held_lock *prev,
1247 struct held_lock *next, enum lock_usage_bit bit_backwards,
1248 enum lock_usage_bit bit_forwards, const char *irqclass)
1252 find_usage_bit = bit_backwards;
1253 /* fills in <backwards_match> */
1254 ret = find_usage_backwards(hlock_class(prev), 0);
1255 if (!ret || ret == 1)
1258 find_usage_bit = bit_forwards;
1259 ret = find_usage_forwards(hlock_class(next), 0);
1260 if (!ret || ret == 1)
1263 return print_bad_irq_dependency(curr, prev, next,
1264 bit_backwards, bit_forwards, irqclass);
1268 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1269 struct held_lock *next)
1272 * Prove that the new dependency does not connect a hardirq-safe
1273 * lock with a hardirq-unsafe lock - to achieve this we search
1274 * the backwards-subgraph starting at <prev>, and the
1275 * forwards-subgraph starting at <next>:
1277 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
1278 LOCK_ENABLED_HARDIRQS, "hard"))
1282 * Prove that the new dependency does not connect a hardirq-safe-read
1283 * lock with a hardirq-unsafe lock - to achieve this we search
1284 * the backwards-subgraph starting at <prev>, and the
1285 * forwards-subgraph starting at <next>:
1287 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
1288 LOCK_ENABLED_HARDIRQS, "hard-read"))
1292 * Prove that the new dependency does not connect a softirq-safe
1293 * lock with a softirq-unsafe lock - to achieve this we search
1294 * the backwards-subgraph starting at <prev>, and the
1295 * forwards-subgraph starting at <next>:
1297 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
1298 LOCK_ENABLED_SOFTIRQS, "soft"))
1301 * Prove that the new dependency does not connect a softirq-safe-read
1302 * lock with a softirq-unsafe lock - to achieve this we search
1303 * the backwards-subgraph starting at <prev>, and the
1304 * forwards-subgraph starting at <next>:
1306 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
1307 LOCK_ENABLED_SOFTIRQS, "soft"))
1313 static void inc_chains(void)
1315 if (current->hardirq_context)
1316 nr_hardirq_chains++;
1318 if (current->softirq_context)
1319 nr_softirq_chains++;
1321 nr_process_chains++;
1328 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1329 struct held_lock *next)
1334 static inline void inc_chains(void)
1336 nr_process_chains++;
1342 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1343 struct held_lock *next)
1345 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1348 printk("\n=============================================\n");
1349 printk( "[ INFO: possible recursive locking detected ]\n");
1350 print_kernel_version();
1351 printk( "---------------------------------------------\n");
1352 printk("%s/%d is trying to acquire lock:\n",
1353 curr->comm, task_pid_nr(curr));
1355 printk("\nbut task is already holding lock:\n");
1358 printk("\nother info that might help us debug this:\n");
1359 lockdep_print_held_locks(curr);
1361 printk("\nstack backtrace:\n");
1368 * Check whether we are holding such a class already.
1370 * (Note that this has to be done separately, because the graph cannot
1371 * detect such classes of deadlocks.)
1373 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1376 check_deadlock(struct task_struct *curr, struct held_lock *next,
1377 struct lockdep_map *next_instance, int read)
1379 struct held_lock *prev;
1380 struct held_lock *nest = NULL;
1383 for (i = 0; i < curr->lockdep_depth; i++) {
1384 prev = curr->held_locks + i;
1386 if (prev->instance == next->nest_lock)
1389 if (hlock_class(prev) != hlock_class(next))
1393 * Allow read-after-read recursion of the same
1394 * lock class (i.e. read_lock(lock)+read_lock(lock)):
1396 if ((read == 2) && prev->read)
1400 * We're holding the nest_lock, which serializes this lock's
1401 * nesting behaviour.
1406 return print_deadlock_bug(curr, prev, next);
1412 * There was a chain-cache miss, and we are about to add a new dependency
1413 * to a previous lock. We recursively validate the following rules:
1415 * - would the adding of the <prev> -> <next> dependency create a
1416 * circular dependency in the graph? [== circular deadlock]
1418 * - does the new prev->next dependency connect any hardirq-safe lock
1419 * (in the full backwards-subgraph starting at <prev>) with any
1420 * hardirq-unsafe lock (in the full forwards-subgraph starting at
1421 * <next>)? [== illegal lock inversion with hardirq contexts]
1423 * - does the new prev->next dependency connect any softirq-safe lock
1424 * (in the full backwards-subgraph starting at <prev>) with any
1425 * softirq-unsafe lock (in the full forwards-subgraph starting at
1426 * <next>)? [== illegal lock inversion with softirq contexts]
1428 * any of these scenarios could lead to a deadlock.
1430 * Then if all the validations pass, we add the forwards and backwards
1434 check_prev_add(struct task_struct *curr, struct held_lock *prev,
1435 struct held_lock *next, int distance)
1437 struct lock_list *entry;
1441 * Prove that the new <prev> -> <next> dependency would not
1442 * create a circular dependency in the graph. (We do this by
1443 * forward-recursing into the graph starting at <next>, and
1444 * checking whether we can reach <prev>.)
1446 * We are using global variables to control the recursion, to
1447 * keep the stackframe size of the recursive functions low:
1449 check_source = next;
1450 check_target = prev;
1451 if (!(check_noncircular(hlock_class(next), 0)))
1452 return print_circular_bug_tail();
1454 if (!check_prev_add_irq(curr, prev, next))
1458 * For recursive read-locks we do all the dependency checks,
1459 * but we dont store read-triggered dependencies (only
1460 * write-triggered dependencies). This ensures that only the
1461 * write-side dependencies matter, and that if for example a
1462 * write-lock never takes any other locks, then the reads are
1463 * equivalent to a NOP.
1465 if (next->read == 2 || prev->read == 2)
1468 * Is the <prev> -> <next> dependency already present?
1470 * (this may occur even though this is a new chain: consider
1471 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1472 * chains - the second one will be new, but L1 already has
1473 * L2 added to its dependency list, due to the first chain.)
1475 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1476 if (entry->class == hlock_class(next)) {
1478 entry->distance = 1;
1484 * Ok, all validations passed, add the new lock
1485 * to the previous lock's dependency list:
1487 ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1488 &hlock_class(prev)->locks_after,
1489 next->acquire_ip, distance);
1494 ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1495 &hlock_class(next)->locks_before,
1496 next->acquire_ip, distance);
1501 * Debugging printouts:
1503 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1505 printk("\n new dependency: ");
1506 print_lock_name(hlock_class(prev));
1508 print_lock_name(hlock_class(next));
1511 return graph_lock();
1517 * Add the dependency to all directly-previous locks that are 'relevant'.
1518 * The ones that are relevant are (in increasing distance from curr):
1519 * all consecutive trylock entries and the final non-trylock entry - or
1520 * the end of this context's lock-chain - whichever comes first.
1523 check_prevs_add(struct task_struct *curr, struct held_lock *next)
1525 int depth = curr->lockdep_depth;
1526 struct held_lock *hlock;
1531 * Depth must not be zero for a non-head lock:
1536 * At least two relevant locks must exist for this
1539 if (curr->held_locks[depth].irq_context !=
1540 curr->held_locks[depth-1].irq_context)
1544 int distance = curr->lockdep_depth - depth + 1;
1545 hlock = curr->held_locks + depth-1;
1547 * Only non-recursive-read entries get new dependencies
1550 if (hlock->read != 2) {
1551 if (!check_prev_add(curr, hlock, next, distance))
1554 * Stop after the first non-trylock entry,
1555 * as non-trylock entries have added their
1556 * own direct dependencies already, so this
1557 * lock is connected to them indirectly:
1559 if (!hlock->trylock)
1564 * End of lock-stack?
1569 * Stop the search if we cross into another context:
1571 if (curr->held_locks[depth].irq_context !=
1572 curr->held_locks[depth-1].irq_context)
1577 if (!debug_locks_off_graph_unlock())
1585 unsigned long nr_lock_chains;
1586 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1587 int nr_chain_hlocks;
1588 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1590 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1592 return lock_classes + chain_hlocks[chain->base + i];
1596 * Look up a dependency chain. If the key is not present yet then
1597 * add it and return 1 - in this case the new dependency chain is
1598 * validated. If the key is already hashed, return 0.
1599 * (On return with 1 graph_lock is held.)
1601 static inline int lookup_chain_cache(struct task_struct *curr,
1602 struct held_lock *hlock,
1605 struct lock_class *class = hlock_class(hlock);
1606 struct list_head *hash_head = chainhashentry(chain_key);
1607 struct lock_chain *chain;
1608 struct held_lock *hlock_curr, *hlock_next;
1611 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1614 * We can walk it lock-free, because entries only get added
1617 list_for_each_entry(chain, hash_head, entry) {
1618 if (chain->chain_key == chain_key) {
1620 debug_atomic_inc(&chain_lookup_hits);
1621 if (very_verbose(class))
1622 printk("\nhash chain already cached, key: "
1623 "%016Lx tail class: [%p] %s\n",
1624 (unsigned long long)chain_key,
1625 class->key, class->name);
1629 if (very_verbose(class))
1630 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
1631 (unsigned long long)chain_key, class->key, class->name);
1633 * Allocate a new chain entry from the static array, and add
1639 * We have to walk the chain again locked - to avoid duplicates:
1641 list_for_each_entry(chain, hash_head, entry) {
1642 if (chain->chain_key == chain_key) {
1647 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1648 if (!debug_locks_off_graph_unlock())
1651 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1652 printk("turning off the locking correctness validator.\n");
1655 chain = lock_chains + nr_lock_chains++;
1656 chain->chain_key = chain_key;
1657 chain->irq_context = hlock->irq_context;
1658 /* Find the first held_lock of current chain */
1660 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1661 hlock_curr = curr->held_locks + i;
1662 if (hlock_curr->irq_context != hlock_next->irq_context)
1667 chain->depth = curr->lockdep_depth + 1 - i;
1668 cn = nr_chain_hlocks;
1669 while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
1670 n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
1675 if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1677 for (j = 0; j < chain->depth - 1; j++, i++) {
1678 int lock_id = curr->held_locks[i].class_idx - 1;
1679 chain_hlocks[chain->base + j] = lock_id;
1681 chain_hlocks[chain->base + j] = class - lock_classes;
1683 list_add_tail_rcu(&chain->entry, hash_head);
1684 debug_atomic_inc(&chain_lookup_misses);
1690 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1691 struct held_lock *hlock, int chain_head, u64 chain_key)
1694 * Trylock needs to maintain the stack of held locks, but it
1695 * does not add new dependencies, because trylock can be done
1698 * We look up the chain_key and do the O(N^2) check and update of
1699 * the dependencies only if this is a new dependency chain.
1700 * (If lookup_chain_cache() returns with 1 it acquires
1701 * graph_lock for us)
1703 if (!hlock->trylock && (hlock->check == 2) &&
1704 lookup_chain_cache(curr, hlock, chain_key)) {
1706 * Check whether last held lock:
1708 * - is irq-safe, if this lock is irq-unsafe
1709 * - is softirq-safe, if this lock is hardirq-unsafe
1711 * And check whether the new lock's dependency graph
1712 * could lead back to the previous lock.
1714 * any of these scenarios could lead to a deadlock. If
1717 int ret = check_deadlock(curr, hlock, lock, hlock->read);
1722 * Mark recursive read, as we jump over it when
1723 * building dependencies (just like we jump over
1729 * Add dependency only if this lock is not the head
1730 * of the chain, and if it's not a secondary read-lock:
1732 if (!chain_head && ret != 2)
1733 if (!check_prevs_add(curr, hlock))
1737 /* after lookup_chain_cache(): */
1738 if (unlikely(!debug_locks))
1744 static inline int validate_chain(struct task_struct *curr,
1745 struct lockdep_map *lock, struct held_lock *hlock,
1746 int chain_head, u64 chain_key)
1753 * We are building curr_chain_key incrementally, so double-check
1754 * it from scratch, to make sure that it's done correctly:
1756 static void check_chain_key(struct task_struct *curr)
1758 #ifdef CONFIG_DEBUG_LOCKDEP
1759 struct held_lock *hlock, *prev_hlock = NULL;
1763 for (i = 0; i < curr->lockdep_depth; i++) {
1764 hlock = curr->held_locks + i;
1765 if (chain_key != hlock->prev_chain_key) {
1767 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
1768 curr->lockdep_depth, i,
1769 (unsigned long long)chain_key,
1770 (unsigned long long)hlock->prev_chain_key);
1773 id = hlock->class_idx - 1;
1774 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1777 if (prev_hlock && (prev_hlock->irq_context !=
1778 hlock->irq_context))
1780 chain_key = iterate_chain_key(chain_key, id);
1783 if (chain_key != curr->curr_chain_key) {
1785 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
1786 curr->lockdep_depth, i,
1787 (unsigned long long)chain_key,
1788 (unsigned long long)curr->curr_chain_key);
1794 print_usage_bug(struct task_struct *curr, struct held_lock *this,
1795 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1797 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1800 printk("\n=================================\n");
1801 printk( "[ INFO: inconsistent lock state ]\n");
1802 print_kernel_version();
1803 printk( "---------------------------------\n");
1805 printk("inconsistent {%s} -> {%s} usage.\n",
1806 usage_str[prev_bit], usage_str[new_bit]);
1808 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1809 curr->comm, task_pid_nr(curr),
1810 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1811 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1812 trace_hardirqs_enabled(curr),
1813 trace_softirqs_enabled(curr));
1816 printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1817 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
1819 print_irqtrace_events(curr);
1820 printk("\nother info that might help us debug this:\n");
1821 lockdep_print_held_locks(curr);
1823 printk("\nstack backtrace:\n");
1830 * Print out an error if an invalid bit is set:
1833 valid_state(struct task_struct *curr, struct held_lock *this,
1834 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1836 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
1837 return print_usage_bug(curr, this, bad_bit, new_bit);
1841 static int mark_lock(struct task_struct *curr, struct held_lock *this,
1842 enum lock_usage_bit new_bit);
1844 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1847 * print irq inversion bug:
1850 print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1851 struct held_lock *this, int forwards,
1852 const char *irqclass)
1854 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1857 printk("\n=========================================================\n");
1858 printk( "[ INFO: possible irq lock inversion dependency detected ]\n");
1859 print_kernel_version();
1860 printk( "---------------------------------------------------------\n");
1861 printk("%s/%d just changed the state of lock:\n",
1862 curr->comm, task_pid_nr(curr));
1865 printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
1867 printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
1868 print_lock_name(other);
1869 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1871 printk("\nother info that might help us debug this:\n");
1872 lockdep_print_held_locks(curr);
1874 printk("\nthe first lock's dependencies:\n");
1875 print_lock_dependencies(hlock_class(this), 0);
1877 printk("\nthe second lock's dependencies:\n");
1878 print_lock_dependencies(other, 0);
1880 printk("\nstack backtrace:\n");
1887 * Prove that in the forwards-direction subgraph starting at <this>
1888 * there is no lock matching <mask>:
1891 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1892 enum lock_usage_bit bit, const char *irqclass)
1896 find_usage_bit = bit;
1897 /* fills in <forwards_match> */
1898 ret = find_usage_forwards(hlock_class(this), 0);
1899 if (!ret || ret == 1)
1902 return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
1906 * Prove that in the backwards-direction subgraph starting at <this>
1907 * there is no lock matching <mask>:
1910 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1911 enum lock_usage_bit bit, const char *irqclass)
1915 find_usage_bit = bit;
1916 /* fills in <backwards_match> */
1917 ret = find_usage_backwards(hlock_class(this), 0);
1918 if (!ret || ret == 1)
1921 return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1924 void print_irqtrace_events(struct task_struct *curr)
1926 printk("irq event stamp: %u\n", curr->irq_events);
1927 printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
1928 print_ip_sym(curr->hardirq_enable_ip);
1929 printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
1930 print_ip_sym(curr->hardirq_disable_ip);
1931 printk("softirqs last enabled at (%u): ", curr->softirq_enable_event);
1932 print_ip_sym(curr->softirq_enable_ip);
1933 printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
1934 print_ip_sym(curr->softirq_disable_ip);
1937 static int hardirq_verbose(struct lock_class *class)
1940 return class_filter(class);
1945 static int softirq_verbose(struct lock_class *class)
1948 return class_filter(class);
1953 #define STRICT_READ_CHECKS 1
1955 static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1956 enum lock_usage_bit new_bit)
1961 case LOCK_USED_IN_HARDIRQ:
1962 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1964 if (!valid_state(curr, this, new_bit,
1965 LOCK_ENABLED_HARDIRQS_READ))
1968 * just marked it hardirq-safe, check that this lock
1969 * took no hardirq-unsafe lock in the past:
1971 if (!check_usage_forwards(curr, this,
1972 LOCK_ENABLED_HARDIRQS, "hard"))
1974 #if STRICT_READ_CHECKS
1976 * just marked it hardirq-safe, check that this lock
1977 * took no hardirq-unsafe-read lock in the past:
1979 if (!check_usage_forwards(curr, this,
1980 LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
1983 if (hardirq_verbose(hlock_class(this)))
1986 case LOCK_USED_IN_SOFTIRQ:
1987 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1989 if (!valid_state(curr, this, new_bit,
1990 LOCK_ENABLED_SOFTIRQS_READ))
1993 * just marked it softirq-safe, check that this lock
1994 * took no softirq-unsafe lock in the past:
1996 if (!check_usage_forwards(curr, this,
1997 LOCK_ENABLED_SOFTIRQS, "soft"))
1999 #if STRICT_READ_CHECKS
2001 * just marked it softirq-safe, check that this lock
2002 * took no softirq-unsafe-read lock in the past:
2004 if (!check_usage_forwards(curr, this,
2005 LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
2008 if (softirq_verbose(hlock_class(this)))
2011 case LOCK_USED_IN_HARDIRQ_READ:
2012 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
2015 * just marked it hardirq-read-safe, check that this lock
2016 * took no hardirq-unsafe lock in the past:
2018 if (!check_usage_forwards(curr, this,
2019 LOCK_ENABLED_HARDIRQS, "hard"))
2021 if (hardirq_verbose(hlock_class(this)))
2024 case LOCK_USED_IN_SOFTIRQ_READ:
2025 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
2028 * just marked it softirq-read-safe, check that this lock
2029 * took no softirq-unsafe lock in the past:
2031 if (!check_usage_forwards(curr, this,
2032 LOCK_ENABLED_SOFTIRQS, "soft"))
2034 if (softirq_verbose(hlock_class(this)))
2037 case LOCK_ENABLED_HARDIRQS:
2038 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2040 if (!valid_state(curr, this, new_bit,
2041 LOCK_USED_IN_HARDIRQ_READ))
2044 * just marked it hardirq-unsafe, check that no hardirq-safe
2045 * lock in the system ever took it in the past:
2047 if (!check_usage_backwards(curr, this,
2048 LOCK_USED_IN_HARDIRQ, "hard"))
2050 #if STRICT_READ_CHECKS
2052 * just marked it hardirq-unsafe, check that no
2053 * hardirq-safe-read lock in the system ever took
2056 if (!check_usage_backwards(curr, this,
2057 LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
2060 if (hardirq_verbose(hlock_class(this)))
2063 case LOCK_ENABLED_SOFTIRQS:
2064 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2066 if (!valid_state(curr, this, new_bit,
2067 LOCK_USED_IN_SOFTIRQ_READ))
2070 * just marked it softirq-unsafe, check that no softirq-safe
2071 * lock in the system ever took it in the past:
2073 if (!check_usage_backwards(curr, this,
2074 LOCK_USED_IN_SOFTIRQ, "soft"))
2076 #if STRICT_READ_CHECKS
2078 * just marked it softirq-unsafe, check that no
2079 * softirq-safe-read lock in the system ever took
2082 if (!check_usage_backwards(curr, this,
2083 LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
2086 if (softirq_verbose(hlock_class(this)))
2089 case LOCK_ENABLED_HARDIRQS_READ:
2090 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2092 #if STRICT_READ_CHECKS
2094 * just marked it hardirq-read-unsafe, check that no
2095 * hardirq-safe lock in the system ever took it in the past:
2097 if (!check_usage_backwards(curr, this,
2098 LOCK_USED_IN_HARDIRQ, "hard"))
2101 if (hardirq_verbose(hlock_class(this)))
2104 case LOCK_ENABLED_SOFTIRQS_READ:
2105 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2107 #if STRICT_READ_CHECKS
2109 * just marked it softirq-read-unsafe, check that no
2110 * softirq-safe lock in the system ever took it in the past:
2112 if (!check_usage_backwards(curr, this,
2113 LOCK_USED_IN_SOFTIRQ, "soft"))
2116 if (softirq_verbose(hlock_class(this)))
2128 * Mark all held locks with a usage bit:
2131 mark_held_locks(struct task_struct *curr, int hardirq)
2133 enum lock_usage_bit usage_bit;
2134 struct held_lock *hlock;
2137 for (i = 0; i < curr->lockdep_depth; i++) {
2138 hlock = curr->held_locks + i;
2142 usage_bit = LOCK_ENABLED_HARDIRQS_READ;
2144 usage_bit = LOCK_ENABLED_HARDIRQS;
2147 usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
2149 usage_bit = LOCK_ENABLED_SOFTIRQS;
2151 if (!mark_lock(curr, hlock, usage_bit))
2159 * Debugging helper: via this flag we know that we are in
2160 * 'early bootup code', and will warn about any invalid irqs-on event:
2162 static int early_boot_irqs_enabled;
2164 void early_boot_irqs_off(void)
2166 early_boot_irqs_enabled = 0;
2169 void early_boot_irqs_on(void)
2171 early_boot_irqs_enabled = 1;
2175 * Hardirqs will be enabled:
2177 void trace_hardirqs_on_caller(unsigned long ip)
2179 struct task_struct *curr = current;
2181 time_hardirqs_on(CALLER_ADDR0, ip);
2183 if (unlikely(!debug_locks || current->lockdep_recursion))
2186 if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
2189 if (unlikely(curr->hardirqs_enabled)) {
2190 debug_atomic_inc(&redundant_hardirqs_on);
2193 /* we'll do an OFF -> ON transition: */
2194 curr->hardirqs_enabled = 1;
2196 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2198 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2201 * We are going to turn hardirqs on, so set the
2202 * usage bit for all held locks:
2204 if (!mark_held_locks(curr, 1))
2207 * If we have softirqs enabled, then set the usage
2208 * bit for all held locks. (disabled hardirqs prevented
2209 * this bit from being set before)
2211 if (curr->softirqs_enabled)
2212 if (!mark_held_locks(curr, 0))
2215 curr->hardirq_enable_ip = ip;
2216 curr->hardirq_enable_event = ++curr->irq_events;
2217 debug_atomic_inc(&hardirqs_on_events);
2219 EXPORT_SYMBOL(trace_hardirqs_on_caller);
2221 void trace_hardirqs_on(void)
2223 trace_hardirqs_on_caller(CALLER_ADDR0);
2225 EXPORT_SYMBOL(trace_hardirqs_on);
2228 * Hardirqs were disabled:
2230 void trace_hardirqs_off_caller(unsigned long ip)
2232 struct task_struct *curr = current;
2234 time_hardirqs_off(CALLER_ADDR0, ip);
2236 if (unlikely(!debug_locks || current->lockdep_recursion))
2239 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2242 if (curr->hardirqs_enabled) {
2244 * We have done an ON -> OFF transition:
2246 curr->hardirqs_enabled = 0;
2247 curr->hardirq_disable_ip = ip;
2248 curr->hardirq_disable_event = ++curr->irq_events;
2249 debug_atomic_inc(&hardirqs_off_events);
2251 debug_atomic_inc(&redundant_hardirqs_off);
2253 EXPORT_SYMBOL(trace_hardirqs_off_caller);
2255 void trace_hardirqs_off(void)
2257 trace_hardirqs_off_caller(CALLER_ADDR0);
2259 EXPORT_SYMBOL(trace_hardirqs_off);
2262 * Softirqs will be enabled:
2264 void trace_softirqs_on(unsigned long ip)
2266 struct task_struct *curr = current;
2268 if (unlikely(!debug_locks))
2271 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2274 if (curr->softirqs_enabled) {
2275 debug_atomic_inc(&redundant_softirqs_on);
2280 * We'll do an OFF -> ON transition:
2282 curr->softirqs_enabled = 1;
2283 curr->softirq_enable_ip = ip;
2284 curr->softirq_enable_event = ++curr->irq_events;
2285 debug_atomic_inc(&softirqs_on_events);
2287 * We are going to turn softirqs on, so set the
2288 * usage bit for all held locks, if hardirqs are
2291 if (curr->hardirqs_enabled)
2292 mark_held_locks(curr, 0);
2296 * Softirqs were disabled:
2298 void trace_softirqs_off(unsigned long ip)
2300 struct task_struct *curr = current;
2302 if (unlikely(!debug_locks))
2305 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2308 if (curr->softirqs_enabled) {
2310 * We have done an ON -> OFF transition:
2312 curr->softirqs_enabled = 0;
2313 curr->softirq_disable_ip = ip;
2314 curr->softirq_disable_event = ++curr->irq_events;
2315 debug_atomic_inc(&softirqs_off_events);
2316 DEBUG_LOCKS_WARN_ON(!softirq_count());
2318 debug_atomic_inc(&redundant_softirqs_off);
2321 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2324 * If non-trylock use in a hardirq or softirq context, then
2325 * mark the lock as used in these contexts:
2327 if (!hlock->trylock) {
2329 if (curr->hardirq_context)
2330 if (!mark_lock(curr, hlock,
2331 LOCK_USED_IN_HARDIRQ_READ))
2333 if (curr->softirq_context)
2334 if (!mark_lock(curr, hlock,
2335 LOCK_USED_IN_SOFTIRQ_READ))
2338 if (curr->hardirq_context)
2339 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2341 if (curr->softirq_context)
2342 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2346 if (!hlock->hardirqs_off) {
2348 if (!mark_lock(curr, hlock,
2349 LOCK_ENABLED_HARDIRQS_READ))
2351 if (curr->softirqs_enabled)
2352 if (!mark_lock(curr, hlock,
2353 LOCK_ENABLED_SOFTIRQS_READ))
2356 if (!mark_lock(curr, hlock,
2357 LOCK_ENABLED_HARDIRQS))
2359 if (curr->softirqs_enabled)
2360 if (!mark_lock(curr, hlock,
2361 LOCK_ENABLED_SOFTIRQS))
2369 static int separate_irq_context(struct task_struct *curr,
2370 struct held_lock *hlock)
2372 unsigned int depth = curr->lockdep_depth;
2375 * Keep track of points where we cross into an interrupt context:
2377 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2378 curr->softirq_context;
2380 struct held_lock *prev_hlock;
2382 prev_hlock = curr->held_locks + depth-1;
2384 * If we cross into another context, reset the
2385 * hash key (this also prevents the checking and the
2386 * adding of the dependency to 'prev'):
2388 if (prev_hlock->irq_context != hlock->irq_context)
2397 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2398 enum lock_usage_bit new_bit)
2404 static inline int mark_irqflags(struct task_struct *curr,
2405 struct held_lock *hlock)
2410 static inline int separate_irq_context(struct task_struct *curr,
2411 struct held_lock *hlock)
2419 * Mark a lock with a usage bit, and validate the state transition:
2421 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2422 enum lock_usage_bit new_bit)
2424 unsigned int new_mask = 1 << new_bit, ret = 1;
2427 * If already set then do not dirty the cacheline,
2428 * nor do any checks:
2430 if (likely(hlock_class(this)->usage_mask & new_mask))
2436 * Make sure we didnt race:
2438 if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2443 hlock_class(this)->usage_mask |= new_mask;
2445 if (!save_trace(hlock_class(this)->usage_traces + new_bit))
2449 case LOCK_USED_IN_HARDIRQ:
2450 case LOCK_USED_IN_SOFTIRQ:
2451 case LOCK_USED_IN_HARDIRQ_READ:
2452 case LOCK_USED_IN_SOFTIRQ_READ:
2453 case LOCK_ENABLED_HARDIRQS:
2454 case LOCK_ENABLED_SOFTIRQS:
2455 case LOCK_ENABLED_HARDIRQS_READ:
2456 case LOCK_ENABLED_SOFTIRQS_READ:
2457 ret = mark_lock_irq(curr, this, new_bit);
2462 debug_atomic_dec(&nr_unused_locks);
2465 if (!debug_locks_off_graph_unlock())
2474 * We must printk outside of the graph_lock:
2477 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2479 print_irqtrace_events(curr);
2487 * Initialize a lock instance's lock-class mapping info:
2489 void lockdep_init_map(struct lockdep_map *lock, const char *name,
2490 struct lock_class_key *key, int subclass)
2492 if (unlikely(!debug_locks))
2495 if (DEBUG_LOCKS_WARN_ON(!key))
2497 if (DEBUG_LOCKS_WARN_ON(!name))
2500 * Sanity check, the lock-class key must be persistent:
2502 if (!static_obj(key)) {
2503 printk("BUG: key %p not in .data!\n", key);
2504 DEBUG_LOCKS_WARN_ON(1);
2509 lock->class_cache = NULL;
2510 #ifdef CONFIG_LOCK_STAT
2511 lock->cpu = raw_smp_processor_id();
2514 register_lock_class(lock, subclass, 1);
2517 EXPORT_SYMBOL_GPL(lockdep_init_map);
2520 * This gets called for every mutex_lock*()/spin_lock*() operation.
2521 * We maintain the dependency maps and validate the locking attempt:
2523 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2524 int trylock, int read, int check, int hardirqs_off,
2525 struct lockdep_map *nest_lock, unsigned long ip)
2527 struct task_struct *curr = current;
2528 struct lock_class *class = NULL;
2529 struct held_lock *hlock;
2530 unsigned int depth, id;
2537 if (unlikely(!debug_locks))
2540 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2543 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
2545 printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2546 printk("turning off the locking correctness validator.\n");
2551 class = lock->class_cache;
2553 * Not cached yet or subclass?
2555 if (unlikely(!class)) {
2556 class = register_lock_class(lock, subclass, 0);
2560 debug_atomic_inc((atomic_t *)&class->ops);
2561 if (very_verbose(class)) {
2562 printk("\nacquire class [%p] %s", class->key, class->name);
2563 if (class->name_version > 1)
2564 printk("#%d", class->name_version);
2570 * Add the lock to the list of currently held locks.
2571 * (we dont increase the depth just yet, up until the
2572 * dependency checks are done)
2574 depth = curr->lockdep_depth;
2575 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2578 hlock = curr->held_locks + depth;
2579 if (DEBUG_LOCKS_WARN_ON(!class))
2581 hlock->class_idx = class - lock_classes + 1;
2582 hlock->acquire_ip = ip;
2583 hlock->instance = lock;
2584 hlock->nest_lock = nest_lock;
2585 hlock->trylock = trylock;
2587 hlock->check = check;
2588 hlock->hardirqs_off = !!hardirqs_off;
2589 #ifdef CONFIG_LOCK_STAT
2590 hlock->waittime_stamp = 0;
2591 hlock->holdtime_stamp = sched_clock();
2594 if (check == 2 && !mark_irqflags(curr, hlock))
2597 /* mark it as used: */
2598 if (!mark_lock(curr, hlock, LOCK_USED))
2602 * Calculate the chain hash: it's the combined hash of all the
2603 * lock keys along the dependency chain. We save the hash value
2604 * at every step so that we can get the current hash easily
2605 * after unlock. The chain hash is then used to cache dependency
2608 * The 'key ID' is what is the most compact key value to drive
2609 * the hash, not class->key.
2611 id = class - lock_classes;
2612 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2615 chain_key = curr->curr_chain_key;
2617 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2622 hlock->prev_chain_key = chain_key;
2623 if (separate_irq_context(curr, hlock)) {
2627 chain_key = iterate_chain_key(chain_key, id);
2629 if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
2632 curr->curr_chain_key = chain_key;
2633 curr->lockdep_depth++;
2634 check_chain_key(curr);
2635 #ifdef CONFIG_DEBUG_LOCKDEP
2636 if (unlikely(!debug_locks))
2639 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2641 printk("BUG: MAX_LOCK_DEPTH too low!\n");
2642 printk("turning off the locking correctness validator.\n");
2646 if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2647 max_lockdep_depth = curr->lockdep_depth;
2653 print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2656 if (!debug_locks_off())
2658 if (debug_locks_silent)
2661 printk("\n=====================================\n");
2662 printk( "[ BUG: bad unlock balance detected! ]\n");
2663 printk( "-------------------------------------\n");
2664 printk("%s/%d is trying to release lock (",
2665 curr->comm, task_pid_nr(curr));
2666 print_lockdep_cache(lock);
2669 printk("but there are no more locks to release!\n");
2670 printk("\nother info that might help us debug this:\n");
2671 lockdep_print_held_locks(curr);
2673 printk("\nstack backtrace:\n");
2680 * Common debugging checks for both nested and non-nested unlock:
2682 static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2685 if (unlikely(!debug_locks))
2687 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2690 if (curr->lockdep_depth <= 0)
2691 return print_unlock_inbalance_bug(curr, lock, ip);
2697 __lock_set_subclass(struct lockdep_map *lock,
2698 unsigned int subclass, unsigned long ip)
2700 struct task_struct *curr = current;
2701 struct held_lock *hlock, *prev_hlock;
2702 struct lock_class *class;
2706 depth = curr->lockdep_depth;
2707 if (DEBUG_LOCKS_WARN_ON(!depth))
2711 for (i = depth-1; i >= 0; i--) {
2712 hlock = curr->held_locks + i;
2714 * We must not cross into another context:
2716 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2718 if (hlock->instance == lock)
2722 return print_unlock_inbalance_bug(curr, lock, ip);
2725 class = register_lock_class(lock, subclass, 0);
2726 hlock->class_idx = class - lock_classes + 1;
2728 curr->lockdep_depth = i;
2729 curr->curr_chain_key = hlock->prev_chain_key;
2731 for (; i < depth; i++) {
2732 hlock = curr->held_locks + i;
2733 if (!__lock_acquire(hlock->instance,
2734 hlock_class(hlock)->subclass, hlock->trylock,
2735 hlock->read, hlock->check, hlock->hardirqs_off,
2736 hlock->nest_lock, hlock->acquire_ip))
2740 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
2746 * Remove the lock to the list of currently held locks in a
2747 * potentially non-nested (out of order) manner. This is a
2748 * relatively rare operation, as all the unlock APIs default
2749 * to nested mode (which uses lock_release()):
2752 lock_release_non_nested(struct task_struct *curr,
2753 struct lockdep_map *lock, unsigned long ip)
2755 struct held_lock *hlock, *prev_hlock;
2760 * Check whether the lock exists in the current stack
2763 depth = curr->lockdep_depth;
2764 if (DEBUG_LOCKS_WARN_ON(!depth))
2768 for (i = depth-1; i >= 0; i--) {
2769 hlock = curr->held_locks + i;
2771 * We must not cross into another context:
2773 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2775 if (hlock->instance == lock)
2779 return print_unlock_inbalance_bug(curr, lock, ip);
2782 lock_release_holdtime(hlock);
2785 * We have the right lock to unlock, 'hlock' points to it.
2786 * Now we remove it from the stack, and add back the other
2787 * entries (if any), recalculating the hash along the way:
2789 curr->lockdep_depth = i;
2790 curr->curr_chain_key = hlock->prev_chain_key;
2792 for (i++; i < depth; i++) {
2793 hlock = curr->held_locks + i;
2794 if (!__lock_acquire(hlock->instance,
2795 hlock_class(hlock)->subclass, hlock->trylock,
2796 hlock->read, hlock->check, hlock->hardirqs_off,
2797 hlock->nest_lock, hlock->acquire_ip))
2801 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
2807 * Remove the lock to the list of currently held locks - this gets
2808 * called on mutex_unlock()/spin_unlock*() (or on a failed
2809 * mutex_lock_interruptible()). This is done for unlocks that nest
2810 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2812 static int lock_release_nested(struct task_struct *curr,
2813 struct lockdep_map *lock, unsigned long ip)
2815 struct held_lock *hlock;
2819 * Pop off the top of the lock stack:
2821 depth = curr->lockdep_depth - 1;
2822 hlock = curr->held_locks + depth;
2825 * Is the unlock non-nested:
2827 if (hlock->instance != lock)
2828 return lock_release_non_nested(curr, lock, ip);
2829 curr->lockdep_depth--;
2831 if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
2834 curr->curr_chain_key = hlock->prev_chain_key;
2836 lock_release_holdtime(hlock);
2838 #ifdef CONFIG_DEBUG_LOCKDEP
2839 hlock->prev_chain_key = 0;
2840 hlock->class_idx = 0;
2841 hlock->acquire_ip = 0;
2842 hlock->irq_context = 0;
2848 * Remove the lock to the list of currently held locks - this gets
2849 * called on mutex_unlock()/spin_unlock*() (or on a failed
2850 * mutex_lock_interruptible()). This is done for unlocks that nest
2851 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2854 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2856 struct task_struct *curr = current;
2858 if (!check_unlock(curr, lock, ip))
2862 if (!lock_release_nested(curr, lock, ip))
2865 if (!lock_release_non_nested(curr, lock, ip))
2869 check_chain_key(curr);
2873 * Check whether we follow the irq-flags state precisely:
2875 static void check_flags(unsigned long flags)
2877 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
2878 defined(CONFIG_TRACE_IRQFLAGS)
2882 if (irqs_disabled_flags(flags)) {
2883 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
2884 printk("possible reason: unannotated irqs-off.\n");
2887 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
2888 printk("possible reason: unannotated irqs-on.\n");
2893 * We dont accurately track softirq state in e.g.
2894 * hardirq contexts (such as on 4KSTACKS), so only
2895 * check if not in hardirq contexts:
2897 if (!hardirq_count()) {
2898 if (softirq_count())
2899 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
2901 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
2905 print_irqtrace_events(current);
2910 lock_set_subclass(struct lockdep_map *lock,
2911 unsigned int subclass, unsigned long ip)
2913 unsigned long flags;
2915 if (unlikely(current->lockdep_recursion))
2918 raw_local_irq_save(flags);
2919 current->lockdep_recursion = 1;
2921 if (__lock_set_subclass(lock, subclass, ip))
2922 check_chain_key(current);
2923 current->lockdep_recursion = 0;
2924 raw_local_irq_restore(flags);
2927 EXPORT_SYMBOL_GPL(lock_set_subclass);
2930 * We are not always called with irqs disabled - do that here,
2931 * and also avoid lockdep recursion:
2933 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2934 int trylock, int read, int check,
2935 struct lockdep_map *nest_lock, unsigned long ip)
2937 unsigned long flags;
2939 if (unlikely(current->lockdep_recursion))
2942 raw_local_irq_save(flags);
2945 current->lockdep_recursion = 1;
2946 __lock_acquire(lock, subclass, trylock, read, check,
2947 irqs_disabled_flags(flags), nest_lock, ip);
2948 current->lockdep_recursion = 0;
2949 raw_local_irq_restore(flags);
2952 EXPORT_SYMBOL_GPL(lock_acquire);
2954 void lock_release(struct lockdep_map *lock, int nested,
2957 unsigned long flags;
2959 if (unlikely(current->lockdep_recursion))
2962 raw_local_irq_save(flags);
2964 current->lockdep_recursion = 1;
2965 __lock_release(lock, nested, ip);
2966 current->lockdep_recursion = 0;
2967 raw_local_irq_restore(flags);
2970 EXPORT_SYMBOL_GPL(lock_release);
2972 #ifdef CONFIG_LOCK_STAT
2974 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
2977 if (!debug_locks_off())
2979 if (debug_locks_silent)
2982 printk("\n=================================\n");
2983 printk( "[ BUG: bad contention detected! ]\n");
2984 printk( "---------------------------------\n");
2985 printk("%s/%d is trying to contend lock (",
2986 curr->comm, task_pid_nr(curr));
2987 print_lockdep_cache(lock);
2990 printk("but there are no locks held!\n");
2991 printk("\nother info that might help us debug this:\n");
2992 lockdep_print_held_locks(curr);
2994 printk("\nstack backtrace:\n");
3001 __lock_contended(struct lockdep_map *lock, unsigned long ip)
3003 struct task_struct *curr = current;
3004 struct held_lock *hlock, *prev_hlock;
3005 struct lock_class_stats *stats;
3007 int i, contention_point, contending_point;
3009 depth = curr->lockdep_depth;
3010 if (DEBUG_LOCKS_WARN_ON(!depth))
3014 for (i = depth-1; i >= 0; i--) {
3015 hlock = curr->held_locks + i;
3017 * We must not cross into another context:
3019 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3021 if (hlock->instance == lock)
3025 print_lock_contention_bug(curr, lock, ip);
3029 hlock->waittime_stamp = sched_clock();
3031 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3032 contending_point = lock_point(hlock_class(hlock)->contending_point,
3035 stats = get_lock_stats(hlock_class(hlock));
3036 if (contention_point < LOCKSTAT_POINTS)
3037 stats->contention_point[contention_point]++;
3038 if (contending_point < LOCKSTAT_POINTS)
3039 stats->contending_point[contending_point]++;
3040 if (lock->cpu != smp_processor_id())
3041 stats->bounces[bounce_contended + !!hlock->read]++;
3042 put_lock_stats(stats);
3046 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3048 struct task_struct *curr = current;
3049 struct held_lock *hlock, *prev_hlock;
3050 struct lock_class_stats *stats;
3056 depth = curr->lockdep_depth;
3057 if (DEBUG_LOCKS_WARN_ON(!depth))
3061 for (i = depth-1; i >= 0; i--) {
3062 hlock = curr->held_locks + i;
3064 * We must not cross into another context:
3066 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3068 if (hlock->instance == lock)
3072 print_lock_contention_bug(curr, lock, _RET_IP_);
3076 cpu = smp_processor_id();
3077 if (hlock->waittime_stamp) {
3078 now = sched_clock();
3079 waittime = now - hlock->waittime_stamp;
3080 hlock->holdtime_stamp = now;
3083 stats = get_lock_stats(hlock_class(hlock));
3086 lock_time_inc(&stats->read_waittime, waittime);
3088 lock_time_inc(&stats->write_waittime, waittime);
3090 if (lock->cpu != cpu)
3091 stats->bounces[bounce_acquired + !!hlock->read]++;
3092 put_lock_stats(stats);
3098 void lock_contended(struct lockdep_map *lock, unsigned long ip)
3100 unsigned long flags;
3102 if (unlikely(!lock_stat))
3105 if (unlikely(current->lockdep_recursion))
3108 raw_local_irq_save(flags);
3110 current->lockdep_recursion = 1;
3111 __lock_contended(lock, ip);
3112 current->lockdep_recursion = 0;
3113 raw_local_irq_restore(flags);
3115 EXPORT_SYMBOL_GPL(lock_contended);
3117 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3119 unsigned long flags;
3121 if (unlikely(!lock_stat))
3124 if (unlikely(current->lockdep_recursion))
3127 raw_local_irq_save(flags);
3129 current->lockdep_recursion = 1;
3130 __lock_acquired(lock, ip);
3131 current->lockdep_recursion = 0;
3132 raw_local_irq_restore(flags);
3134 EXPORT_SYMBOL_GPL(lock_acquired);
3138 * Used by the testsuite, sanitize the validator state
3139 * after a simulated failure:
3142 void lockdep_reset(void)
3144 unsigned long flags;
3147 raw_local_irq_save(flags);
3148 current->curr_chain_key = 0;
3149 current->lockdep_depth = 0;
3150 current->lockdep_recursion = 0;
3151 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
3152 nr_hardirq_chains = 0;
3153 nr_softirq_chains = 0;
3154 nr_process_chains = 0;
3156 for (i = 0; i < CHAINHASH_SIZE; i++)
3157 INIT_LIST_HEAD(chainhash_table + i);
3158 raw_local_irq_restore(flags);
3161 static void zap_class(struct lock_class *class)
3166 * Remove all dependencies this lock is
3169 for (i = 0; i < nr_list_entries; i++) {
3170 if (list_entries[i].class == class)
3171 list_del_rcu(&list_entries[i].entry);
3174 * Unhash the class and remove it from the all_lock_classes list:
3176 list_del_rcu(&class->hash_entry);
3177 list_del_rcu(&class->lock_entry);
3182 static inline int within(const void *addr, void *start, unsigned long size)
3184 return addr >= start && addr < start + size;
3187 void lockdep_free_key_range(void *start, unsigned long size)
3189 struct lock_class *class, *next;
3190 struct list_head *head;
3191 unsigned long flags;
3195 raw_local_irq_save(flags);
3196 locked = graph_lock();
3199 * Unhash all classes that were created by this module:
3201 for (i = 0; i < CLASSHASH_SIZE; i++) {
3202 head = classhash_table + i;
3203 if (list_empty(head))
3205 list_for_each_entry_safe(class, next, head, hash_entry) {
3206 if (within(class->key, start, size))
3208 else if (within(class->name, start, size))
3215 raw_local_irq_restore(flags);
3218 void lockdep_reset_lock(struct lockdep_map *lock)
3220 struct lock_class *class, *next;
3221 struct list_head *head;
3222 unsigned long flags;
3226 raw_local_irq_save(flags);
3229 * Remove all classes this lock might have:
3231 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3233 * If the class exists we look it up and zap it:
3235 class = look_up_lock_class(lock, j);
3240 * Debug check: in the end all mapped classes should
3243 locked = graph_lock();
3244 for (i = 0; i < CLASSHASH_SIZE; i++) {
3245 head = classhash_table + i;
3246 if (list_empty(head))
3248 list_for_each_entry_safe(class, next, head, hash_entry) {
3249 if (unlikely(class == lock->class_cache)) {
3250 if (debug_locks_off_graph_unlock())
3260 raw_local_irq_restore(flags);
3263 void lockdep_init(void)
3268 * Some architectures have their own start_kernel()
3269 * code which calls lockdep_init(), while we also
3270 * call lockdep_init() from the start_kernel() itself,
3271 * and we want to initialize the hashes only once:
3273 if (lockdep_initialized)
3276 for (i = 0; i < CLASSHASH_SIZE; i++)
3277 INIT_LIST_HEAD(classhash_table + i);
3279 for (i = 0; i < CHAINHASH_SIZE; i++)
3280 INIT_LIST_HEAD(chainhash_table + i);
3282 lockdep_initialized = 1;
3285 void __init lockdep_info(void)
3287 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3289 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
3290 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
3291 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
3292 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
3293 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
3294 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
3295 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
3297 printk(" memory used by lock dependency info: %lu kB\n",
3298 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
3299 sizeof(struct list_head) * CLASSHASH_SIZE +
3300 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3301 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3302 sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
3304 printk(" per task-struct memory footprint: %lu bytes\n",
3305 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
3307 #ifdef CONFIG_DEBUG_LOCKDEP
3308 if (lockdep_init_error) {
3309 printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
3310 printk("Call stack leading to lockdep invocation was:\n");
3311 print_stack_trace(&lockdep_init_trace, 0);
3317 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3318 const void *mem_to, struct held_lock *hlock)
3320 if (!debug_locks_off())
3322 if (debug_locks_silent)
3325 printk("\n=========================\n");
3326 printk( "[ BUG: held lock freed! ]\n");
3327 printk( "-------------------------\n");
3328 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
3329 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
3331 lockdep_print_held_locks(curr);
3333 printk("\nstack backtrace:\n");
3337 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
3338 const void* lock_from, unsigned long lock_len)
3340 return lock_from + lock_len <= mem_from ||
3341 mem_from + mem_len <= lock_from;
3345 * Called when kernel memory is freed (or unmapped), or if a lock
3346 * is destroyed or reinitialized - this code checks whether there is
3347 * any held lock in the memory range of <from> to <to>:
3349 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
3351 struct task_struct *curr = current;
3352 struct held_lock *hlock;
3353 unsigned long flags;
3356 if (unlikely(!debug_locks))
3359 local_irq_save(flags);
3360 for (i = 0; i < curr->lockdep_depth; i++) {
3361 hlock = curr->held_locks + i;
3363 if (not_in_range(mem_from, mem_len, hlock->instance,
3364 sizeof(*hlock->instance)))
3367 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
3370 local_irq_restore(flags);
3372 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
3374 static void print_held_locks_bug(struct task_struct *curr)
3376 if (!debug_locks_off())
3378 if (debug_locks_silent)
3381 printk("\n=====================================\n");
3382 printk( "[ BUG: lock held at task exit time! ]\n");
3383 printk( "-------------------------------------\n");
3384 printk("%s/%d is exiting with locks still held!\n",
3385 curr->comm, task_pid_nr(curr));
3386 lockdep_print_held_locks(curr);
3388 printk("\nstack backtrace:\n");
3392 void debug_check_no_locks_held(struct task_struct *task)
3394 if (unlikely(task->lockdep_depth > 0))
3395 print_held_locks_bug(task);
3398 void debug_show_all_locks(void)
3400 struct task_struct *g, *p;
3404 if (unlikely(!debug_locks)) {
3405 printk("INFO: lockdep is turned off.\n");
3408 printk("\nShowing all locks held in the system:\n");
3411 * Here we try to get the tasklist_lock as hard as possible,
3412 * if not successful after 2 seconds we ignore it (but keep
3413 * trying). This is to enable a debug printout even if a
3414 * tasklist_lock-holding task deadlocks or crashes.
3417 if (!read_trylock(&tasklist_lock)) {
3419 printk("hm, tasklist_lock locked, retrying... ");
3422 printk(" #%d", 10-count);
3426 printk(" ignoring it.\n");
3430 printk(KERN_CONT " locked it.\n");
3433 do_each_thread(g, p) {
3435 * It's not reliable to print a task's held locks
3436 * if it's not sleeping (or if it's not the current
3439 if (p->state == TASK_RUNNING && p != current)
3441 if (p->lockdep_depth)
3442 lockdep_print_held_locks(p);
3444 if (read_trylock(&tasklist_lock))
3446 } while_each_thread(g, p);
3449 printk("=============================================\n\n");
3452 read_unlock(&tasklist_lock);
3455 EXPORT_SYMBOL_GPL(debug_show_all_locks);
3458 * Careful: only use this function if you are sure that
3459 * the task cannot run in parallel!
3461 void __debug_show_held_locks(struct task_struct *task)
3463 if (unlikely(!debug_locks)) {
3464 printk("INFO: lockdep is turned off.\n");
3467 lockdep_print_held_locks(task);
3469 EXPORT_SYMBOL_GPL(__debug_show_held_locks);
3471 void debug_show_held_locks(struct task_struct *task)
3473 __debug_show_held_locks(task);
3476 EXPORT_SYMBOL_GPL(debug_show_held_locks);
3478 void lockdep_sys_exit(void)
3480 struct task_struct *curr = current;
3482 if (unlikely(curr->lockdep_depth)) {
3483 if (!debug_locks_off())
3485 printk("\n================================================\n");
3486 printk( "[ BUG: lock held when returning to user space! ]\n");
3487 printk( "------------------------------------------------\n");
3488 printk("%s/%d is leaving the kernel with locks still held!\n",
3489 curr->comm, curr->pid);
3490 lockdep_print_held_locks(curr);