summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2022-11-26 19:59:21 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2022-12-02 17:48:49 +1100
commite1a31e7fd7130628cfd229253da2b4630e7a809c (patch)
tree3c328928ed0e74716e71ccee0458f3f75ccd7131 /arch/powerpc/include/asm
parent0944534ef4d5cf39c8133575524be0be3337dd62 (diff)
downloadlinux-e1a31e7fd7130628cfd229253da2b4630e7a809c.tar.bz2
powerpc/qspinlock: store owner CPU in lock word
Store the owner CPU number in the lock word so it may be yielded to, as powerpc's paravirtualised simple spinlocks do. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20221126095932.1234527-7-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/qspinlock.h12
-rw-r--r--arch/powerpc/include/asm/qspinlock_types.h12
2 files changed, 21 insertions, 3 deletions
diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
index cebd2c89c08d..9572a2ef974d 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -21,8 +21,15 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
return !!(READ_ONCE(lock->val) & _Q_TAIL_CPU_MASK);
}
+static __always_inline u32 queued_spin_encode_locked_val(void)
+{
+ /* XXX: make this use lock value in paca like simple spinlocks? */
+ return _Q_LOCKED_VAL | (smp_processor_id() << _Q_OWNER_CPU_OFFSET);
+}
+
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
{
+ u32 new = queued_spin_encode_locked_val();
u32 prev;
asm volatile(
@@ -34,7 +41,7 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock)
"\t" PPC_ACQUIRE_BARRIER " \n"
"2: \n"
: "=&r" (prev)
- : "r" (&lock->val), "r" (_Q_LOCKED_VAL),
+ : "r" (&lock->val), "r" (new),
"i" (IS_ENABLED(CONFIG_PPC64))
: "cr0", "memory");
@@ -43,6 +50,7 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock)
static __always_inline int __queued_spin_trylock_steal(struct qspinlock *lock)
{
+ u32 new = queued_spin_encode_locked_val();
u32 prev, tmp;
/* Trylock may get ahead of queued nodes if it finds unlocked */
@@ -57,7 +65,7 @@ static __always_inline int __queued_spin_trylock_steal(struct qspinlock *lock)
"\t" PPC_ACQUIRE_BARRIER " \n"
"2: \n"
: "=&r" (prev), "=&r" (tmp)
- : "r" (&lock->val), "r" (_Q_LOCKED_VAL), "r" (_Q_TAIL_CPU_MASK),
+ : "r" (&lock->val), "r" (new), "r" (_Q_TAIL_CPU_MASK),
"i" (IS_ENABLED(CONFIG_PPC64))
: "cr0", "memory");
diff --git a/arch/powerpc/include/asm/qspinlock_types.h b/arch/powerpc/include/asm/qspinlock_types.h
index 1911a8a16237..adfeed4aa495 100644
--- a/arch/powerpc/include/asm/qspinlock_types.h
+++ b/arch/powerpc/include/asm/qspinlock_types.h
@@ -29,7 +29,8 @@ typedef struct qspinlock {
* Bitfields in the lock word:
*
* 0: locked bit
- * 1-15: unused bits
+ * 1-14: lock holder cpu
+ * 15: unused bit
* 16: must queue bit
* 17-31: tail cpu (+1)
*/
@@ -40,6 +41,15 @@ typedef struct qspinlock {
#define _Q_LOCKED_BITS 1
#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
+/* 0x00007ffe */
+#define _Q_OWNER_CPU_OFFSET 1
+#define _Q_OWNER_CPU_BITS 14
+#define _Q_OWNER_CPU_MASK _Q_SET_MASK(OWNER_CPU)
+
+#if CONFIG_NR_CPUS > (1U << _Q_OWNER_CPU_BITS)
+#error "qspinlock does not support such large CONFIG_NR_CPUS"
+#endif
+
/* 0x00010000 */
#define _Q_MUST_Q_OFFSET 16
#define _Q_MUST_Q_BITS 1