From 356701329fb391184618eda7b7fb68cb35271506 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 21 Jun 2018 13:13:17 +0100 Subject: atomics/treewide: Make atomic64_fetch_add_unless() optional Architectures with atomic64_fetch_add_unless() provide a preprocessor symbol if they do so, and all other architectures have trivial C implementations of atomic64_add_unless() which are near-identical. Let's unify the trivial definitions of atomic64_fetch_add_unless() in , so that we always have both atomic64_fetch_add_unless() and atomic64_add_unless() with less boilerplate code. This means that atomic64_add_unless() is always implemented in core code, and the instrumented atomics are updated accordingly. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Cc: Boqun Feng Cc: Linus Torvalds Cc: Thomas Gleixner Link: https://lore.kernel.org/lkml/20180621121321.4761-15-mark.rutland@arm.com Signed-off-by: Ingo Molnar --- include/asm-generic/atomic-instrumented.h | 6 ------ include/linux/atomic.h | 26 ++++++++++++++++++++++++-- 2 files changed, 24 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h index 444bf2f9d54d..2b487f28ef35 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/asm-generic/atomic-instrumented.h @@ -100,12 +100,6 @@ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u kasan_check_write(v, sizeof(*v)); return arch_atomic64_fetch_add_unless(v, a, u); } -#else -static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) -{ - kasan_check_write(v, sizeof(*v)); - return arch_atomic64_add_unless(v, a, u); -} #endif static __always_inline void atomic_inc(atomic_t *v) diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 3c03de648007..530562ac7909 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -1042,6 +1042,30 @@ static inline int atomic_dec_if_positive(atomic_t *v) #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg #endif /* atomic64_try_cmpxchg */ +/** + * atomic64_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns the original value of @v. + */ +#ifndef atomic64_fetch_add_unless +static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, + long long u) +{ + long long c = atomic64_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!atomic64_try_cmpxchg(v, &c, c + a)); + + return c; +} +#endif + /** * atomic64_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t @@ -1051,12 +1075,10 @@ static inline int atomic_dec_if_positive(atomic_t *v) * Atomically adds @a to @v, if @v was not already @u. * Returns true if the addition was done. */ -#ifdef atomic64_fetch_add_unless static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u) { return atomic64_fetch_add_unless(v, a, u) != u; } -#endif /** * atomic64_inc_not_zero - increment unless the number is zero -- cgit v1.2.3