summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2021-03-08 17:59:49 -0800
committerMichael Ellerman <mpe@ellerman.id.au>2021-03-26 23:19:43 +1100
commit66f60522138c2e0d8a3518edd4979df11a2d7525 (patch)
treee5d9e3abaa84b75a49555ab45ee6a20ccf889fe6 /arch
parent2bf3604c415c9d75311141b8eb6ac8780ef74420 (diff)
downloadlinux-66f60522138c2e0d8a3518edd4979df11a2d7525.tar.bz2
powerpc/spinlock: Unserialize spin_is_locked
c6f5d02b6a0f (locking/spinlocks/arm64: Remove smp_mb() from arch_spin_is_locked()) made it pretty official that the call semantics do not imply any sort of barriers, and any user that gets creative must explicitly do any serialization. This creativity, however, is nowadays pretty limited: 1. spin_unlock_wait() has been removed from the kernel in favor of a lock/unlock combo. Furthermore, queued spinlocks have now for a number of years no longer relied on _Q_LOCKED_VAL for the call, but any non-zero value to indicate a locked state. There were cases where the delayed locked store could lead to breaking mutual exclusion with crossed locking; such as with sysv ipc and netfilter being the most extreme. 2. The auditing Andrea did in verified that remaining spin_is_locked() no longer rely on such semantics. Most callers just use it to assert a lock is taken, in a debug nature. The only user that gets cute is NOLOCK qdisc, as of: 96009c7d500e (sched: replace __QDISC_STATE_RUNNING bit with a spin lock) ... which ironically went in the next day after c6f5d02b6a0f. This change replaces test_bit() with spin_is_locked() to know whether to take the busylock heuristic to reduce contention on the main qdisc lock. So any races against spin_is_locked() for archs that use LL/SC for spin_lock() will be benign and not break any mutual exclusion; furthermore, both the seqlock and busylock have the same scope. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210309015950.27688-3-dave@stgolabs.net
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/qspinlock.h12
-rw-r--r--arch/powerpc/include/asm/simple_spinlock.h3
2 files changed, 1 insertions, 14 deletions
diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
index 3ce1a0bee4fe..b052b0624816 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -44,18 +44,6 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
}
#define queued_spin_lock queued_spin_lock
-static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
-{
- /*
- * This barrier was added to simple spinlocks by commit 51d7d5205d338,
- * but it should now be possible to remove it, asm arm64 has done with
- * commit c6f5d02b6a0f.
- */
- smp_mb();
- return atomic_read(&lock->val);
-}
-#define queued_spin_is_locked queued_spin_is_locked
-
#ifdef CONFIG_PARAVIRT_SPINLOCKS
#define SPIN_THRESHOLD (1<<15) /* not tuned */
diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
index da5d40cb8de0..552f325412cc 100644
--- a/arch/powerpc/include/asm/simple_spinlock.h
+++ b/arch/powerpc/include/asm/simple_spinlock.h
@@ -38,8 +38,7 @@ static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- smp_mb();
- return !arch_spin_value_unlocked(*lock);
+ return !arch_spin_value_unlocked(READ_ONCE(*lock));
}
/*