diff options
-rw-r--r-- | include/linux/lockdep.h | 7 | ||||
-rw-r--r-- | kernel/locking/lockdep.c | 11 | ||||
-rw-r--r-- | kernel/locking/lockdep_internals.h | 27 | ||||
-rw-r--r-- | kernel/locking/lockdep_proc.c | 2 |
4 files changed, 37 insertions, 10 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index b0d0b51c4d85..1fd82ff99c65 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -99,13 +99,8 @@ struct lock_class { */ unsigned int version; - /* - * Statistics counter: - */ - unsigned long ops; - - const char *name; int name_version; + const char *name; #ifdef CONFIG_LOCK_STAT unsigned long contention_point[LOCKSTAT_POINTS]; diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 511d30f88bce..a0f83058d6aa 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -139,7 +139,7 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; * get freed - this significantly simplifies the debugging code. */ unsigned long nr_lock_classes; -static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; +struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; static inline struct lock_class *hlock_class(struct held_lock *hlock) { @@ -436,6 +436,7 @@ unsigned int max_lockdep_depth; * Various lockdep statistics: */ DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); +DEFINE_PER_CPU(unsigned long [MAX_LOCKDEP_KEYS], lock_class_ops); #endif /* @@ -1392,7 +1393,9 @@ static void print_lock_class_header(struct lock_class *class, int depth) printk("%*s->", depth, ""); print_lock_name(class); - printk(KERN_CONT " ops: %lu", class->ops); +#ifdef CONFIG_DEBUG_LOCKDEP + printk(KERN_CONT " ops: %lu", debug_class_ops_read(class)); +#endif printk(KERN_CONT " {\n"); for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { @@ -3227,7 +3230,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, if (!class) return 0; } - atomic_inc((atomic_t *)&class->ops); + + debug_class_ops_inc(class); + if (very_verbose(class)) { printk("\nacquire class [%px] %s", class->key, class->name); if (class->name_version > 1) diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index d459d624ba2a..88c847a41c8a 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -152,9 +152,15 @@ struct lockdep_stats { int nr_find_usage_forwards_recursions; int nr_find_usage_backwards_checks; int nr_find_usage_backwards_recursions; + + /* + * Per lock class locking operation stat counts + */ + unsigned long lock_class_ops[MAX_LOCKDEP_KEYS]; }; DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); +extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; #define __debug_atomic_inc(ptr) \ this_cpu_inc(lockdep_stats.ptr); @@ -179,9 +185,30 @@ DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); } \ __total; \ }) + +static inline void debug_class_ops_inc(struct lock_class *class) +{ + int idx; + + idx = class - lock_classes; + __debug_atomic_inc(lock_class_ops[idx]); +} + +static inline unsigned long debug_class_ops_read(struct lock_class *class) +{ + int idx, cpu; + unsigned long ops = 0; + + idx = class - lock_classes; + for_each_possible_cpu(cpu) + ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu); + return ops; +} + #else # define __debug_atomic_inc(ptr) do { } while (0) # define debug_atomic_inc(ptr) do { } while (0) # define debug_atomic_dec(ptr) do { } while (0) # define debug_atomic_read(ptr) 0 +# define debug_class_ops_inc(ptr) do { } while (0) #endif diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index 3dd980dfba2d..3d31f9b0059e 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -68,7 +68,7 @@ static int l_show(struct seq_file *m, void *v) seq_printf(m, "%p", class->key); #ifdef CONFIG_DEBUG_LOCKDEP - seq_printf(m, " OPS:%8ld", class->ops); + seq_printf(m, " OPS:%8ld", debug_class_ops_read(class)); #endif #ifdef CONFIG_PROVE_LOCKING seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class)); |