summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/lib
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2022-11-26 19:59:27 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2022-12-02 17:48:50 +1100
commitf61ab43cc1a6146d6eef7e0713a452c3677ad13e (patch)
tree5c492373691342daebbb8422523b23aed23a3171 /arch/powerpc/lib
parentbe742c573fdafcfa1752642ca1c7aaf08c258128 (diff)
downloadlinux-f61ab43cc1a6146d6eef7e0713a452c3677ad13e.tar.bz2
powerpc/qspinlock: allow lock stealing in trylock and lock fastpath
This change allows trylock to steal the lock. It also allows the initial lock attempt to steal the lock rather than bailing out and going to the slow path. This gives trylock more strength: without this a continually-contended lock will never permit a trylock to succeed. With this change, the trylock has a small but non-zero chance. It also gives the lock fastpath most of the benefit of passing the reservation back through to the steal loop in the slow path without the complexity. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20221126095932.1234527-13-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r--arch/powerpc/lib/qspinlock.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 2f6c0bed25ea..8e5b8bc3f094 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -24,7 +24,11 @@ struct qnodes {
/* Tuning parameters */
static int steal_spins __read_mostly = (1 << 5);
+#if _Q_SPIN_TRY_LOCK_STEAL == 1
+static const bool maybe_stealers = true;
+#else
static bool maybe_stealers __read_mostly = true;
+#endif
static int head_spins __read_mostly = (1 << 8);
static bool pv_yield_owner __read_mostly = true;
@@ -483,6 +487,10 @@ void pv_spinlocks_init(void)
#include <linux/debugfs.h>
static int steal_spins_set(void *data, u64 val)
{
+#if _Q_SPIN_TRY_LOCK_STEAL == 1
+ /* MAYBE_STEAL remains true */
+ steal_spins = val;
+#else
static DEFINE_MUTEX(lock);
/*
@@ -507,6 +515,7 @@ static int steal_spins_set(void *data, u64 val)
steal_spins = val;
}
mutex_unlock(&lock);
+#endif
return 0;
}