diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/atomic-long.h | 19 | ||||
-rw-r--r-- | include/asm-generic/barrier.h | 27 | ||||
-rw-r--r-- | include/asm-generic/qspinlock.h | 4 | ||||
-rw-r--r-- | include/asm-generic/qspinlock_types.h | 32 | ||||
-rw-r--r-- | include/linux/atomic.h | 2 | ||||
-rw-r--r-- | include/linux/delayacct.h | 2 | ||||
-rw-r--r-- | include/linux/mutex.h | 3 | ||||
-rw-r--r-- | include/linux/spinlock.h | 18 |
8 files changed, 92 insertions, 15 deletions
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 34a028a7bcc5..87d14476edc2 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -25,6 +25,7 @@ typedef atomic64_t atomic_long_t; #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) #define ATOMIC_LONG_PFX(x) atomic64 ## x +#define ATOMIC_LONG_TYPE s64 #else @@ -32,6 +33,7 @@ typedef atomic_t atomic_long_t; #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) #define ATOMIC_LONG_PFX(x) atomic ## x +#define ATOMIC_LONG_TYPE int #endif @@ -90,6 +92,21 @@ ATOMIC_LONG_ADD_SUB_OP(sub, _release) #define atomic_long_cmpxchg(l, old, new) \ (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new))) + +#define atomic_long_try_cmpxchg_relaxed(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) +#define atomic_long_try_cmpxchg_acquire(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) +#define atomic_long_try_cmpxchg_release(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) +#define atomic_long_try_cmpxchg(l, old, new) \ + (ATOMIC_LONG_PFX(_try_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), \ + (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new))) + + #define atomic_long_xchg_relaxed(v, new) \ (ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new))) #define atomic_long_xchg_acquire(v, new) \ @@ -244,6 +261,8 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) #define atomic_long_inc_not_zero(l) \ ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l)) +#define atomic_long_cond_read_relaxed(v, c) \ + ATOMIC_LONG_PFX(_cond_read_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (c)) #define atomic_long_cond_read_acquire(v, c) \ ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c)) diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 29458bbb2fa0..2cafdbb9ae4c 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -221,18 +221,17 @@ do { \ #endif /** - * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering + * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees * @ptr: pointer to the variable to wait on * @cond: boolean expression to wait for * - * Equivalent to using smp_load_acquire() on the condition variable but employs - * the control dependency of the wait to reduce the barrier on many platforms. + * Equivalent to using READ_ONCE() on the condition variable. * * Due to C lacking lambda expressions we load the value of *ptr into a * pre-named variable @VAL to be used in @cond. */ -#ifndef smp_cond_load_acquire -#define smp_cond_load_acquire(ptr, cond_expr) ({ \ +#ifndef smp_cond_load_relaxed +#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ typeof(ptr) __PTR = (ptr); \ typeof(*ptr) VAL; \ for (;;) { \ @@ -241,10 +240,26 @@ do { \ break; \ cpu_relax(); \ } \ - smp_acquire__after_ctrl_dep(); \ VAL; \ }) #endif +/** + * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering + * @ptr: pointer to the variable to wait on + * @cond: boolean expression to wait for + * + * Equivalent to using smp_load_acquire() on the condition variable but employs + * the control dependency of the wait to reduce the barrier on many platforms. + */ +#ifndef smp_cond_load_acquire +#define smp_cond_load_acquire(ptr, cond_expr) ({ \ + typeof(*ptr) _val; \ + _val = smp_cond_load_relaxed(ptr, cond_expr); \ + smp_acquire__after_ctrl_dep(); \ + _val; \ +}) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index b37b4ad7eb94..9cc457597ddf 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -26,7 +26,6 @@ * @lock: Pointer to queued spinlock structure * Return: 1 if it is locked, 0 otherwise */ -#ifndef queued_spin_is_locked static __always_inline int queued_spin_is_locked(struct qspinlock *lock) { /* @@ -35,7 +34,6 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock) */ return atomic_read(&lock->val); } -#endif /** * queued_spin_value_unlocked - is the spinlock structure unlocked? @@ -100,7 +98,7 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock) /* * unlock() needs release semantics: */ - (void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val); + smp_store_release(&lock->locked, 0); } #endif diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h index 034acd0c4956..0763f065b975 100644 --- a/include/asm-generic/qspinlock_types.h +++ b/include/asm-generic/qspinlock_types.h @@ -29,13 +29,41 @@ #endif typedef struct qspinlock { - atomic_t val; + union { + atomic_t val; + + /* + * By using the whole 2nd least significant byte for the + * pending bit, we can allow better optimization of the lock + * acquisition for the pending bit holder. + */ +#ifdef __LITTLE_ENDIAN + struct { + u8 locked; + u8 pending; + }; + struct { + u16 locked_pending; + u16 tail; + }; +#else + struct { + u16 tail; + u16 locked_pending; + }; + struct { + u8 reserved[2]; + u8 pending; + u8 locked; + }; +#endif + }; } arch_spinlock_t; /* * Initializier */ -#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) } +#define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) } /* * Bitfields in the atomic value: diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 8b276fd9a127..01ce3997cb42 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -654,6 +654,7 @@ static inline int atomic_dec_if_positive(atomic_t *v) } #endif +#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) #ifdef CONFIG_GENERIC_ATOMIC64 @@ -1075,6 +1076,7 @@ static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v } #endif +#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) #include <asm-generic/atomic-long.h> diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 5e335b6203f4..e6c0448ebcc7 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -29,7 +29,7 @@ #ifdef CONFIG_TASK_DELAY_ACCT struct task_delay_info { - spinlock_t lock; + raw_spinlock_t lock; unsigned int flags; /* Private per-task flags */ /* For each stat XXX, add following, aligned appropriately diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 14bc0d5d0ee5..3093dd162424 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -146,9 +146,6 @@ extern void __mutex_init(struct mutex *lock, const char *name, */ static inline bool mutex_is_locked(struct mutex *lock) { - /* - * XXX think about spin_is_locked - */ return __mutex_owner(lock) != NULL; } diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 4894d322d258..1e8a46435838 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -380,6 +380,24 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock) raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ }) +/** + * spin_is_locked() - Check whether a spinlock is locked. + * @lock: Pointer to the spinlock. + * + * This function is NOT required to provide any memory ordering + * guarantees; it could be used for debugging purposes or, when + * additional synchronization is needed, accompanied with other + * constructs (memory barriers) enforcing the synchronization. + * + * Returns: 1 if @lock is locked, 0 otherwise. + * + * Note that the function only tells you that the spinlock is + * seen to be locked, not that it is locked on your CPU. + * + * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n, + * the return value is always 0 (see include/linux/spinlock_up.h). + * Therefore you should not rely heavily on the return value. + */ static __always_inline int spin_is_locked(spinlock_t *lock) { return raw_spin_is_locked(&lock->rlock); |