diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-28 11:45:29 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-28 11:45:29 -0700 |
commit | a15286c63d113d4296c58867994cd266a28f5d6d (patch) | |
tree | aaebbc86918c0c1943c26115d5bb1dd6c2fc2d9b /arch/arc | |
parent | b89c07dea16137696d0f2d479ef665ef7c1022ab (diff) | |
parent | 0e8a89d49d45197770f2e57fb15f1bc9ded96eb0 (diff) | |
download | linux-a15286c63d113d4296c58867994cd266a28f5d6d.tar.bz2 |
Merge tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
- Core locking & atomics:
- Convert all architectures to ARCH_ATOMIC: move every architecture
to ARCH_ATOMIC, then get rid of ARCH_ATOMIC and all the
transitory facilities and #ifdefs.
Much reduction in complexity from that series:
63 files changed, 756 insertions(+), 4094 deletions(-)
- Self-test enhancements
- Futexes:
- Add the new FUTEX_LOCK_PI2 ABI, which is a variant that doesn't
set FLAGS_CLOCKRT (.e. uses CLOCK_MONOTONIC).
[ The temptation to repurpose FUTEX_LOCK_PI's implicit setting of
FLAGS_CLOCKRT & invert the flag's meaning to avoid having to
introduce a new variant was resisted successfully. ]
- Enhance futex self-tests
- Lockdep:
- Fix dependency path printouts
- Optimize trace saving
- Broaden & fix wait-context checks
- Misc cleanups and fixes.
* tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits)
locking/lockdep: Correct the description error for check_redundant()
futex: Provide FUTEX_LOCK_PI2 to support clock selection
futex: Prepare futex_lock_pi() for runtime clock selection
lockdep/selftest: Remove wait-type RCU_CALLBACK tests
lockdep/selftests: Fix selftests vs PROVE_RAW_LOCK_NESTING
lockdep: Fix wait-type for empty stack
locking/selftests: Add a selftest for check_irq_usage()
lockding/lockdep: Avoid to find wrong lock dep path in check_irq_usage()
locking/lockdep: Remove the unnecessary trace saving
locking/lockdep: Fix the dep path printing for backwards BFS
selftests: futex: Add futex compare requeue test
selftests: futex: Add futex wait test
seqlock: Remove trailing semicolon in macros
locking/lockdep: Reduce LOCKDEP dependency list
locking/lockdep,doc: Improve readability of the block matrix
locking/atomics: atomic-instrumented: simplify ifdeffery
locking/atomic: delete !ARCH_ATOMIC remnants
locking/atomic: xtensa: move to ARCH_ATOMIC
locking/atomic: sparc: move to ARCH_ATOMIC
locking/atomic: sh: move to ARCH_ATOMIC
...
Diffstat (limited to 'arch/arc')
-rw-r--r-- | arch/arc/include/asm/atomic.h | 60 | ||||
-rw-r--r-- | arch/arc/include/asm/cmpxchg.h | 10 |
2 files changed, 35 insertions, 35 deletions
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 5afc79c9b2f5..7a36d79b5b2f 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -14,14 +14,14 @@ #include <asm/barrier.h> #include <asm/smp.h> -#define atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic_read(v) READ_ONCE((v)->counter) #ifdef CONFIG_ARC_HAS_LLSC -#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) +#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #define ATOMIC_OP(op, c_op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ unsigned int val; \ \ @@ -37,7 +37,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned int val; \ \ @@ -63,7 +63,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ } #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned int val, orig; \ \ @@ -94,11 +94,11 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ #ifndef CONFIG_SMP /* violating atomic_xxx API locking protocol in UP for optimization sake */ -#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) +#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #else -static inline void atomic_set(atomic_t *v, int i) +static inline void arch_atomic_set(atomic_t *v, int i) { /* * Independent of hardware support, all of the atomic_xxx() APIs need @@ -116,7 +116,7 @@ static inline void atomic_set(atomic_t *v, int i) atomic_ops_unlock(flags); } -#define atomic_set_release(v, i) atomic_set((v), (i)) +#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i)) #endif @@ -126,7 +126,7 @@ static inline void atomic_set(atomic_t *v, int i) */ #define ATOMIC_OP(op, c_op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ \ @@ -136,7 +136,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ } #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ +static inline int arch_atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned long flags; \ unsigned long temp; \ @@ -154,7 +154,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ } #define ATOMIC_FETCH_OP(op, c_op, asm_op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \ { \ unsigned long flags; \ unsigned long orig; \ @@ -180,9 +180,6 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) -#define atomic_andnot atomic_andnot -#define atomic_fetch_andnot atomic_fetch_andnot - #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ @@ -193,6 +190,9 @@ ATOMIC_OPS(andnot, &= ~, bic) ATOMIC_OPS(or, |=, or) ATOMIC_OPS(xor, ^=, xor) +#define arch_atomic_andnot arch_atomic_andnot +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot + #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN @@ -220,7 +220,7 @@ typedef struct { #define ATOMIC64_INIT(a) { (a) } -static inline s64 atomic64_read(const atomic64_t *v) +static inline s64 arch_atomic64_read(const atomic64_t *v) { s64 val; @@ -232,7 +232,7 @@ static inline s64 atomic64_read(const atomic64_t *v) return val; } -static inline void atomic64_set(atomic64_t *v, s64 a) +static inline void arch_atomic64_set(atomic64_t *v, s64 a) { /* * This could have been a simple assignment in "C" but would need @@ -253,7 +253,7 @@ static inline void atomic64_set(atomic64_t *v, s64 a) } #define ATOMIC64_OP(op, op1, op2) \ -static inline void atomic64_##op(s64 a, atomic64_t *v) \ +static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \ { \ s64 val; \ \ @@ -270,7 +270,7 @@ static inline void atomic64_##op(s64 a, atomic64_t *v) \ } \ #define ATOMIC64_OP_RETURN(op, op1, op2) \ -static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \ +static inline s64 arch_atomic64_##op##_return(s64 a, atomic64_t *v) \ { \ s64 val; \ \ @@ -293,7 +293,7 @@ static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \ } #define ATOMIC64_FETCH_OP(op, op1, op2) \ -static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \ +static inline s64 arch_atomic64_fetch_##op(s64 a, atomic64_t *v) \ { \ s64 val, orig; \ \ @@ -320,9 +320,6 @@ static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \ ATOMIC64_OP_RETURN(op, op1, op2) \ ATOMIC64_FETCH_OP(op, op1, op2) -#define atomic64_andnot atomic64_andnot -#define atomic64_fetch_andnot atomic64_fetch_andnot - ATOMIC64_OPS(add, add.f, adc) ATOMIC64_OPS(sub, sub.f, sbc) ATOMIC64_OPS(and, and, and) @@ -330,13 +327,16 @@ ATOMIC64_OPS(andnot, bic, bic) ATOMIC64_OPS(or, or, or) ATOMIC64_OPS(xor, xor, xor) +#define arch_atomic64_andnot arch_atomic64_andnot +#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot + #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP static inline s64 -atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new) +arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new) { s64 prev; @@ -358,7 +358,7 @@ atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new) return prev; } -static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new) +static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new) { s64 prev; @@ -379,14 +379,14 @@ static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new) } /** - * atomic64_dec_if_positive - decrement by 1 if old value positive + * arch_atomic64_dec_if_positive - decrement by 1 if old value positive * @v: pointer of type atomic64_t * * The function returns the old value of *v minus 1, even if * the atomic variable, v, was not decremented. */ -static inline s64 atomic64_dec_if_positive(atomic64_t *v) +static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) { s64 val; @@ -408,10 +408,10 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v) return val; } -#define atomic64_dec_if_positive atomic64_dec_if_positive +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive /** - * atomic64_fetch_add_unless - add unless the number is a given value + * arch_atomic64_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -419,7 +419,7 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v) * Atomically adds @a to @v, if it was not @u. * Returns the old value of @v */ -static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { s64 old, temp; @@ -443,7 +443,7 @@ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) return old; } -#define atomic64_fetch_add_unless atomic64_fetch_add_unless +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless #endif /* !CONFIG_GENERIC_ATOMIC64 */ diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index dfeffa25499b..d42917e803e1 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -63,7 +63,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) #endif -#define cmpxchg(ptr, o, n) ({ \ +#define arch_cmpxchg(ptr, o, n) ({ \ (typeof(*(ptr)))__cmpxchg((ptr), \ (unsigned long)(o), \ (unsigned long)(n)); \ @@ -75,7 +75,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) * !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee * semantics, and this lock also happens to be used by atomic_*() */ -#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n))) /* @@ -123,7 +123,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP) -#define xchg(ptr, with) \ +#define arch_xchg(ptr, with) \ ({ \ unsigned long flags; \ typeof(*(ptr)) old_val; \ @@ -136,7 +136,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, #else -#define xchg(ptr, with) _xchg(ptr, with) +#define arch_xchg(ptr, with) _xchg(ptr, with) #endif @@ -153,6 +153,6 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, * can't be clobbered by others. Thus no serialization required when * atomic_xchg is involved. */ -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) #endif |