diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2020-05-27 22:11:14 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-05-28 10:31:09 +0200 |
commit | cfa6705d89b6562f79c40c249f8d94073c4276e4 (patch) | |
tree | 5279a46bfde01387460f5f23756435c7ad8a2a2f /lib | |
parent | 91710728d1725de51d06b40674abf6e860d592c7 (diff) | |
download | linux-cfa6705d89b6562f79c40c249f8d94073c4276e4.tar.bz2 |
radix-tree: Use local_lock for protection
The radix-tree and idr preload mechanisms use preempt_disable() to protect
the complete operation between xxx_preload() and xxx_preload_end().
As the code inside the preempt disabled section acquires regular spinlocks,
which are converted to 'sleeping' spinlocks on a PREEMPT_RT kernel and
eventually calls into a memory allocator, this conflicts with the RT
semantics.
Convert it to a local_lock which allows RT kernels to substitute them with
a real per CPU lock. On non RT kernels this maps to preempt_disable() as
before, but provides also lockdep coverage of the critical region.
No functional change.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20200527201119.1692513-3-bigeasy@linutronix.de
Diffstat (limited to 'lib')
-rw-r--r-- | lib/radix-tree.c | 20 |
1 files changed, 9 insertions, 11 deletions
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 2ee6ae3b0ade..34e406fe561f 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -20,6 +20,7 @@ #include <linux/kernel.h> #include <linux/kmemleak.h> #include <linux/percpu.h> +#include <linux/local_lock.h> #include <linux/preempt.h> /* in_interrupt() */ #include <linux/radix-tree.h> #include <linux/rcupdate.h> @@ -27,7 +28,6 @@ #include <linux/string.h> #include <linux/xarray.h> - /* * Radix tree node cache. */ @@ -58,12 +58,10 @@ struct kmem_cache *radix_tree_node_cachep; /* * Per-cpu pool of preloaded nodes */ -struct radix_tree_preload { - unsigned nr; - /* nodes->parent points to next preallocated node */ - struct radix_tree_node *nodes; +DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { + .lock = INIT_LOCAL_LOCK(lock), }; -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; +EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads); static inline struct radix_tree_node *entry_to_node(void *ptr) { @@ -332,14 +330,14 @@ static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) */ gfp_mask &= ~__GFP_ACCOUNT; - preempt_disable(); + local_lock(&radix_tree_preloads.lock); rtp = this_cpu_ptr(&radix_tree_preloads); while (rtp->nr < nr) { - preempt_enable(); + local_unlock(&radix_tree_preloads.lock); node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); if (node == NULL) goto out; - preempt_disable(); + local_lock(&radix_tree_preloads.lock); rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr < nr) { node->parent = rtp->nodes; @@ -381,7 +379,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) if (gfpflags_allow_blocking(gfp_mask)) return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); /* Preloading doesn't help anything with this gfp mask, skip it */ - preempt_disable(); + local_lock(&radix_tree_preloads.lock); return 0; } EXPORT_SYMBOL(radix_tree_maybe_preload); @@ -1470,7 +1468,7 @@ EXPORT_SYMBOL(radix_tree_tagged); void idr_preload(gfp_t gfp_mask) { if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) - preempt_disable(); + local_lock(&radix_tree_preloads.lock); } EXPORT_SYMBOL(idr_preload); |