X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Flockdep.h;h=0e843bf658777114a3a664e1c978588ee623cc3f;hb=46f555f2731a14545a09ec06d27bd18e8e07069f;hp=06fe93a3e916d882fa09fe4275ed9cbf922ac8c0;hpb=b0138a6cb7923a997d278b47c176778534d1095b;p=linux-2.6-omap-h63xx.git diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 06fe93a3e91..0e843bf6587 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -1,7 +1,8 @@ /* * Runtime locking correctness validator * - * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra * * see Documentation/lockdep-design.txt for more details. */ @@ -9,6 +10,7 @@ #define __LINUX_LOCKDEP_H struct task_struct; +struct lockdep_map; #ifdef CONFIG_LOCKDEP @@ -114,8 +116,44 @@ struct lock_class { const char *name; int name_version; + +#ifdef CONFIG_LOCK_STAT + unsigned long contention_point[4]; +#endif +}; + +#ifdef CONFIG_LOCK_STAT +struct lock_time { + s64 min; + s64 max; + s64 total; + unsigned long nr; +}; + +enum bounce_type { + bounce_acquired_write, + bounce_acquired_read, + bounce_contended_write, + bounce_contended_read, + nr_bounce_types, + + bounce_acquired = bounce_acquired_write, + bounce_contended = bounce_contended_write, }; +struct lock_class_stats { + unsigned long contention_point[4]; + struct lock_time read_waittime; + struct lock_time write_waittime; + struct lock_time read_holdtime; + struct lock_time write_holdtime; + unsigned long bounces[nr_bounce_types]; +}; + +struct lock_class_stats lock_stats(struct lock_class *class); +void clear_lock_stats(struct lock_class *class); +#endif + /* * Map the lock object (the lock instance) to the lock-class object. * This is embedded into specific lock instances: @@ -124,6 +162,9 @@ struct lockdep_map { struct lock_class_key *key; struct lock_class *class_cache; const char *name; +#ifdef CONFIG_LOCK_STAT + int cpu; +#endif }; /* @@ -165,6 +206,10 @@ struct held_lock { unsigned long acquire_ip; struct lockdep_map *instance; +#ifdef CONFIG_LOCK_STAT + u64 waittime_stamp; + u64 holdtime_stamp; +#endif /* * The lock-stack is unified in that the lock chains of interrupt * contexts nest ontop of process context chains, but we 'separate' @@ -245,7 +290,7 @@ extern void lock_release(struct lockdep_map *lock, int nested, # define INIT_LOCKDEP .lockdep_recursion = 0, -#define lockdep_depth(tsk) ((tsk)->lockdep_depth) +#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) #else /* !LOCKDEP */ @@ -281,6 +326,30 @@ struct lock_class_key { }; #endif /* !LOCKDEP */ +#ifdef CONFIG_LOCK_STAT + +extern void lock_contended(struct lockdep_map *lock, unsigned long ip); +extern void lock_acquired(struct lockdep_map *lock); + +#define LOCK_CONTENDED(_lock, try, lock) \ +do { \ + if (!try(_lock)) { \ + lock_contended(&(_lock)->dep_map, _RET_IP_); \ + lock(_lock); \ + } \ + lock_acquired(&(_lock)->dep_map); \ +} while (0) + +#else /* CONFIG_LOCK_STAT */ + +#define lock_contended(lockdep_map, ip) do {} while (0) +#define lock_acquired(lockdep_map) do {} while (0) + +#define LOCK_CONTENDED(_lock, try, lock) \ + lock(_lock) + +#endif /* CONFIG_LOCK_STAT */ + #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) extern void early_init_irq_lock_class(void); #else