X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=include%2Flinux%2Flockdep.h;h=0e843bf658777114a3a664e1c978588ee623cc3f;hb=547307420931344a868275bd7ea7a30f117a15a9;hp=ea097dddc44f791d34a599aa12b3a251413aa47d;hpb=ee2fae03d68e702866a8661fbee7ff2f2f3754d7;p=linux-2.6-omap-h63xx.git diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index ea097dddc44..0e843bf6587 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -1,13 +1,17 @@ /* * Runtime locking correctness validator * - * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * see Documentation/lockdep-design.txt for more details. */ #ifndef __LINUX_LOCKDEP_H #define __LINUX_LOCKDEP_H +struct task_struct; +struct lockdep_map; + #ifdef CONFIG_LOCKDEP #include @@ -112,8 +116,44 @@ struct lock_class { const char *name; int name_version; + +#ifdef CONFIG_LOCK_STAT + unsigned long contention_point[4]; +#endif }; +#ifdef CONFIG_LOCK_STAT +struct lock_time { + s64 min; + s64 max; + s64 total; + unsigned long nr; +}; + +enum bounce_type { + bounce_acquired_write, + bounce_acquired_read, + bounce_contended_write, + bounce_contended_read, + nr_bounce_types, + + bounce_acquired = bounce_acquired_write, + bounce_contended = bounce_contended_write, +}; + +struct lock_class_stats { + unsigned long contention_point[4]; + struct lock_time read_waittime; + struct lock_time write_waittime; + struct lock_time read_holdtime; + struct lock_time write_holdtime; + unsigned long bounces[nr_bounce_types]; +}; + +struct lock_class_stats lock_stats(struct lock_class *class); +void clear_lock_stats(struct lock_class *class); +#endif + /* * Map the lock object (the lock instance) to the lock-class object. * This is embedded into specific lock instances: @@ -122,6 +162,9 @@ struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache; const char *name; +#ifdef CONFIG_LOCK_STAT + int cpu; +#endif }; /* @@ -132,6 +175,7 @@ struct lock_list { struct list_head entry; struct lock_class *class; struct stack_trace trace; + int distance; }; /* @@ -162,6 +206,10 @@ struct held_lock { unsigned long acquire_ip; struct lockdep_map *instance; +#ifdef CONFIG_LOCK_STAT + u64 waittime_stamp; + u64 holdtime_stamp; +#endif /* * The lock-stack is unified in that the lock chains of interrupt * contexts nest ontop of process context chains, but we 'separate' @@ -242,7 +290,7 @@ extern void lock_release(struct lockdep_map *lock, int nested, # define INIT_LOCKDEP .lockdep_recursion = 0, -#define lockdep_depth(tsk) ((tsk)->lockdep_depth) +#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) #else /* !LOCKDEP */ @@ -278,6 +326,30 @@ struct lock_class_key { }; #endif /* !LOCKDEP */ +#ifdef CONFIG_LOCK_STAT + +extern void lock_contended(struct lockdep_map *lock, unsigned long ip); +extern void lock_acquired(struct lockdep_map *lock); + +#define LOCK_CONTENDED(_lock, try, lock) \ +do { \ + if (!try(_lock)) { \ + lock_contended(&(_lock)->dep_map, _RET_IP_); \ + lock(_lock); \ + } \ + lock_acquired(&(_lock)->dep_map); \ +} while (0) + +#else /* CONFIG_LOCK_STAT */ + +#define lock_contended(lockdep_map, ip) do {} while (0) +#define lock_acquired(lockdep_map) do {} while (0) + +#define LOCK_CONTENDED(_lock, try, lock) \ + lock(_lock) + +#endif /* CONFIG_LOCK_STAT */ + #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) extern void early_init_irq_lock_class(void); #else