summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2022-11-26 19:59:27 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2022-12-02 17:48:50 +1100
commitf61ab43cc1a6146d6eef7e0713a452c3677ad13e (patch)
tree5c492373691342daebbb8422523b23aed23a3171 /arch/powerpc/include
parentbe742c573fdafcfa1752642ca1c7aaf08c258128 (diff)
downloadlinux-f61ab43cc1a6146d6eef7e0713a452c3677ad13e.tar.bz2
powerpc/qspinlock: allow lock stealing in trylock and lock fastpath
This change allows trylock to steal the lock. It also allows the initial lock attempt to steal the lock rather than bailing out and going to the slow path. This gives trylock more strength: without this a continually-contended lock will never permit a trylock to succeed. With this change, the trylock has a small but non-zero chance. It also gives the lock fastpath most of the benefit of passing the reservation back through to the steal loop in the slow path without the complexity. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20221126095932.1234527-13-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/qspinlock.h22
1 files changed, 20 insertions, 2 deletions
diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
index 9572a2ef974d..93b1c976db8a 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -6,6 +6,15 @@
#include <asm/qspinlock_types.h>
#include <asm/paravirt.h>
+/*
+ * The trylock itself may steal. This makes trylocks slightly stronger, and
+ * might make spin locks slightly more efficient when stealing.
+ *
+ * This is compile-time, so if true then there may always be stealers, so the
+ * nosteal paths become unused.
+ */
+#define _Q_SPIN_TRY_LOCK_STEAL 1
+
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
return READ_ONCE(lock->val);
@@ -27,13 +36,14 @@ static __always_inline u32 queued_spin_encode_locked_val(void)
return _Q_LOCKED_VAL | (smp_processor_id() << _Q_OWNER_CPU_OFFSET);
}
-static __always_inline int queued_spin_trylock(struct qspinlock *lock)
+static __always_inline int __queued_spin_trylock_nosteal(struct qspinlock *lock)
{
u32 new = queued_spin_encode_locked_val();
u32 prev;
+ /* Trylock succeeds only when unlocked and no queued nodes */
asm volatile(
-"1: lwarx %0,0,%1,%3 # queued_spin_trylock \n"
+"1: lwarx %0,0,%1,%3 # __queued_spin_trylock_nosteal \n"
" cmpwi 0,%0,0 \n"
" bne- 2f \n"
" stwcx. %2,0,%1 \n"
@@ -72,6 +82,14 @@ static __always_inline int __queued_spin_trylock_steal(struct qspinlock *lock)
return likely(!(prev & ~_Q_TAIL_CPU_MASK));
}
+static __always_inline int queued_spin_trylock(struct qspinlock *lock)
+{
+ if (!_Q_SPIN_TRY_LOCK_STEAL)
+ return __queued_spin_trylock_nosteal(lock);
+ else
+ return __queued_spin_trylock_steal(lock);
+}
+
void queued_spin_lock_slowpath(struct qspinlock *lock);
static __always_inline void queued_spin_lock(struct qspinlock *lock)