summaryrefslogtreecommitdiffstats
path: root/kernel/locking/percpu-rwsem.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2019-10-31 12:34:23 +0100
committerIngo Molnar <mingo@kernel.org>2020-02-11 13:10:55 +0100
commit75ff64572e497578e238fefbdff221c96f29067a (patch)
tree1ad12e94e5eee534c451c298642444d79a4c082b /kernel/locking/percpu-rwsem.c
parent71365d40232110f7b029befc9033ea311d680611 (diff)
downloadlinux-75ff64572e497578e238fefbdff221c96f29067a.tar.bz2
locking/percpu-rwsem: Extract __percpu_down_read_trylock()
In preparation for removing the embedded rwsem and building a custom lock, extract the read-trylock primitive. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Davidlohr Bueso <dbueso@suse.de> Acked-by: Will Deacon <will@kernel.org> Acked-by: Waiman Long <longman@redhat.com> Tested-by: Juri Lelli <juri.lelli@redhat.com> Link: https://lkml.kernel.org/r/20200131151540.098485539@infradead.org
Diffstat (limited to 'kernel/locking/percpu-rwsem.c')
-rw-r--r--kernel/locking/percpu-rwsem.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
index becf925b27b5..b155e8e7ac39 100644
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -45,7 +45,7 @@ void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
}
EXPORT_SYMBOL_GPL(percpu_free_rwsem);
-bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
+static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
__this_cpu_inc(*sem->read_count);
@@ -73,11 +73,18 @@ bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
if (likely(!smp_load_acquire(&sem->readers_block)))
return true;
- /*
- * Per the above comment; we still have preemption disabled and
- * will thus decrement on the same CPU as we incremented.
- */
- __percpu_up_read(sem);
+ __this_cpu_dec(*sem->read_count);
+
+ /* Prod writer to re-evaluate readers_active_check() */
+ rcuwait_wake_up(&sem->writer);
+
+ return false;
+}
+
+bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
+{
+ if (__percpu_down_read_trylock(sem))
+ return true;
if (try)
return false;