summaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2019-02-14 15:00:42 -0800
committerIngo Molnar <mingo@kernel.org>2019-02-28 07:55:41 +0100
commitfeb0a3865ed2f7d66a1f2686f7ad784422c249ad (patch)
tree5ac8689b20d9798c8e4d710dee2691978ddb5121 /kernel/locking
parent86cffb80a525f7b8f969c8c79669d383e02f17d1 (diff)
downloadlinux-feb0a3865ed2f7d66a1f2686f7ad784422c249ad.tar.bz2
locking/lockdep: Initialize the locks_before and locks_after lists earlier
This patch does not change any functionality. A later patch will reuse lock classes that have been freed. In combination with that patch this patch wil have the effect of initializing lock class order lists once instead of every time a lock class structure is reinitialized. Signed-off-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman Long <longman@redhat.com> Cc: Will Deacon <will.deacon@arm.com> Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-8-bvanassche@acm.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/lockdep.c29
1 files changed, 27 insertions, 2 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 28fbeb2a10cc..d1a6daf1f51f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -736,6 +736,25 @@ static bool assign_lock_key(struct lockdep_map *lock)
}
/*
+ * Initialize the lock_classes[] array elements.
+ */
+static void init_data_structures_once(void)
+{
+ static bool initialization_happened;
+ int i;
+
+ if (likely(initialization_happened))
+ return;
+
+ initialization_happened = true;
+
+ for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
+ INIT_LIST_HEAD(&lock_classes[i].locks_after);
+ INIT_LIST_HEAD(&lock_classes[i].locks_before);
+ }
+}
+
+/*
* Register a lock's class in the hash-table, if the class is not present
* yet. Otherwise we look it up. We cache the result in the lock object
* itself, so actual lookup of the hash should be once per lock object.
@@ -775,6 +794,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
goto out_unlock_set;
}
+ init_data_structures_once();
+
/*
* Allocate a new key from the static array, and add it to
* the hash:
@@ -793,8 +814,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
class->key = key;
class->name = lock->name;
class->subclass = subclass;
- INIT_LIST_HEAD(&class->locks_before);
- INIT_LIST_HEAD(&class->locks_after);
+ WARN_ON_ONCE(!list_empty(&class->locks_before));
+ WARN_ON_ONCE(!list_empty(&class->locks_after));
class->name_version = count_matching_names(class);
/*
* We use RCU's safe list-add method to make
@@ -4155,6 +4176,8 @@ void lockdep_free_key_range(void *start, unsigned long size)
int i;
int locked;
+ init_data_structures_once();
+
raw_local_irq_save(flags);
locked = graph_lock();
@@ -4218,6 +4241,8 @@ void lockdep_reset_lock(struct lockdep_map *lock)
unsigned long flags;
int j, locked;
+ init_data_structures_once();
+
raw_local_irq_save(flags);
locked = graph_lock();