diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-13 12:23:39 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-13 12:23:39 -0700 |
commit | de5d1b39ea0b38a9f4dfb08966042b7b91e2df30 (patch) | |
tree | 3591bdac4fe6756b4e3dc68b2ed1c792c4104218 /arch/arc/include | |
parent | 1c594774283a7cfe6dc0f8ffdfb2dbfc497502c4 (diff) | |
parent | fd2efaa4eb5317c3a86357a83a7d456a1b86a0ac (diff) | |
download | linux-de5d1b39ea0b38a9f4dfb08966042b7b91e2df30.tar.bz2 |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking/atomics update from Thomas Gleixner:
"The locking, atomics and memory model brains delivered:
- A larger update to the atomics code which reworks the ordering
barriers, consolidates the atomic primitives, provides the new
atomic64_fetch_add_unless() primitive and cleans up the include
hell.
- Simplify cmpxchg() instrumentation and add instrumentation for
xchg() and cmpxchg_double().
- Updates to the memory model and documentation"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (48 commits)
locking/atomics: Rework ordering barriers
locking/atomics: Instrument cmpxchg_double*()
locking/atomics: Instrument xchg()
locking/atomics: Simplify cmpxchg() instrumentation
locking/atomics/x86: Reduce arch_cmpxchg64*() instrumentation
tools/memory-model: Rename litmus tests to comply to norm7
tools/memory-model/Documentation: Fix typo, smb->smp
sched/Documentation: Update wake_up() & co. memory-barrier guarantees
locking/spinlock, sched/core: Clarify requirements for smp_mb__after_spinlock()
sched/core: Use smp_mb() in wake_woken_function()
tools/memory-model: Add informal LKMM documentation to MAINTAINERS
locking/atomics/Documentation: Describe atomic_set() as a write operation
tools/memory-model: Make scripts executable
tools/memory-model: Remove ACCESS_ONCE() from model
tools/memory-model: Remove ACCESS_ONCE() from recipes
locking/memory-barriers.txt/kokr: Update Korean translation to fix broken DMA vs. MMIO ordering example
MAINTAINERS: Add Daniel Lustig as an LKMM reviewer
tools/memory-model: Fix ISA2+pooncelock+pooncelock+pombonce name
tools/memory-model: Add litmus test for full multicopy atomicity
locking/refcount: Always allow checked forms
...
Diffstat (limited to 'arch/arc/include')
-rw-r--r-- | arch/arc/include/asm/atomic.h | 86 |
1 files changed, 17 insertions, 69 deletions
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 11859287c52a..4e0072730241 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -187,7 +187,8 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ ATOMIC_OPS(add, +=, add) ATOMIC_OPS(sub, -=, sub) -#define atomic_andnot atomic_andnot +#define atomic_andnot atomic_andnot +#define atomic_fetch_andnot atomic_fetch_andnot #undef ATOMIC_OPS #define ATOMIC_OPS(op, c_op, asm_op) \ @@ -296,8 +297,6 @@ ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3) ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3) -#define atomic_andnot(mask, v) atomic_and(~(mask), (v)) -#define atomic_fetch_andnot(mask, v) atomic_fetch_and(~(mask), (v)) ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3) ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) @@ -308,48 +307,6 @@ ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3) #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -/** - * __atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns the old value of @v - */ -#define __atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - \ - /* \ - * Explicit full memory barrier needed before/after as \ - * LLOCK/SCOND thmeselves don't provide any such semantics \ - */ \ - smp_mb(); \ - \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\ - c = old; \ - \ - smp_mb(); \ - \ - c; \ -}) - -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) - -#define atomic_inc(v) atomic_add(1, v) -#define atomic_dec(v) atomic_sub(1, v) - -#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) -#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) -#define atomic_inc_return(v) atomic_add_return(1, (v)) -#define atomic_dec_return(v) atomic_sub_return(1, (v)) -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) - -#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) - - #ifdef CONFIG_GENERIC_ATOMIC64 #include <asm-generic/atomic64.h> @@ -472,7 +429,8 @@ static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \ ATOMIC64_OP_RETURN(op, op1, op2) \ ATOMIC64_FETCH_OP(op, op1, op2) -#define atomic64_andnot atomic64_andnot +#define atomic64_andnot atomic64_andnot +#define atomic64_fetch_andnot atomic64_fetch_andnot ATOMIC64_OPS(add, add.f, adc) ATOMIC64_OPS(sub, sub.f, sbc) @@ -559,53 +517,43 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) return val; } +#define atomic64_dec_if_positive atomic64_dec_if_positive /** - * atomic64_add_unless - add unless the number is a given value + * atomic64_fetch_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * - * if (v != u) { v += a; ret = 1} else {ret = 0} - * Returns 1 iff @v was not @u (i.e. if add actually happened) + * Atomically adds @a to @v, if it was not @u. + * Returns the old value of @v */ -static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) +static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, + long long u) { - long long val; - int op_done; + long long old, temp; smp_mb(); __asm__ __volatile__( "1: llockd %0, [%2] \n" - " mov %1, 1 \n" " brne %L0, %L4, 2f # continue to add since v != u \n" " breq.d %H0, %H4, 3f # return since v == u \n" - " mov %1, 0 \n" "2: \n" - " add.f %L0, %L0, %L3 \n" - " adc %H0, %H0, %H3 \n" - " scondd %0, [%2] \n" + " add.f %L1, %L0, %L3 \n" + " adc %H1, %H0, %H3 \n" + " scondd %1, [%2] \n" " bnz 1b \n" "3: \n" - : "=&r"(val), "=&r" (op_done) + : "=&r"(old), "=&r" (temp) : "r"(&v->counter), "r"(a), "r"(u) : "cc"); /* memory clobber comes from smp_mb() */ smp_mb(); - return op_done; + return old; } - -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) -#define atomic64_inc(v) atomic64_add(1LL, (v)) -#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) -#define atomic64_dec(v) atomic64_sub(1LL, (v)) -#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) -#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) +#define atomic64_fetch_add_unless atomic64_fetch_add_unless #endif /* !CONFIG_GENERIC_ATOMIC64 */ |