From 18cc1814d4e7560412c9c8c6d28f9d6782c8b402 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 21 Jun 2018 13:13:18 +0100 Subject: atomics/treewide: Make test ops optional Some of the atomics return the result of a test applied after the atomic operation, and almost all architectures implement these as trivial wrappers around the underlying atomic. Specifically: * _inc_and_test(v) is (_inc_return(v) == 0) * _dec_and_test(v) is (_dec_return(v) == 0) * _sub_and_test(i, v) is (_sub_return(i, v) == 0) * _add_negative(i, v) is (_add_return(i, v) < 0) Rather than have these definitions duplicated in all architectures, with minor inconsistencies in formatting and documentation, let's make these operations optional, with default fallbacks as above. Implementations must now provide a preprocessor symbol. The instrumented atomics are updated accordingly. Both x86 and m68k have custom implementations, which are left as-is, given preprocessor symbols to avoid being overridden. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Will Deacon Acked-by: Geert Uytterhoeven Acked-by: Peter Zijlstra (Intel) Acked-by: Palmer Dabbelt Cc: Boqun Feng Cc: Linus Torvalds Cc: Thomas Gleixner Link: https://lore.kernel.org/lkml/20180621121321.4761-16-mark.rutland@arm.com Signed-off-by: Ingo Molnar --- include/asm-generic/atomic-instrumented.h | 24 ++++++++++++++++++++++++ include/asm-generic/atomic.h | 9 --------- include/asm-generic/atomic64.h | 4 ---- 3 files changed, 24 insertions(+), 13 deletions(-) (limited to 'include/asm-generic') diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h index 2b487f28ef35..6b64c200de73 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/asm-generic/atomic-instrumented.h @@ -225,29 +225,41 @@ static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) return arch_atomic64_dec_if_positive(v); } +#ifdef arch_atomic_dec_and_test +#define atomic_dec_and_test atomic_dec_and_test static __always_inline bool atomic_dec_and_test(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_dec_and_test(v); } +#endif +#ifdef arch_atomic64_dec_and_test +#define atomic64_dec_and_test atomic64_dec_and_test static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_dec_and_test(v); } +#endif +#ifdef arch_atomic_inc_and_test +#define atomic_inc_and_test atomic_inc_and_test static __always_inline bool atomic_inc_and_test(atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_inc_and_test(v); } +#endif +#ifdef arch_atomic64_inc_and_test +#define atomic64_inc_and_test atomic64_inc_and_test static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_inc_and_test(v); } +#endif static __always_inline int atomic_add_return(int i, atomic_t *v) { @@ -333,29 +345,41 @@ static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) return arch_atomic64_fetch_xor(i, v); } +#ifdef arch_atomic_sub_and_test +#define atomic_sub_and_test atomic_sub_and_test static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_sub_and_test(i, v); } +#endif +#ifdef arch_atomic64_sub_and_test +#define atomic64_sub_and_test atomic64_sub_and_test static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_sub_and_test(i, v); } +#endif +#ifdef arch_atomic_add_negative +#define atomic_add_negative atomic_add_negative static __always_inline bool atomic_add_negative(int i, atomic_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic_add_negative(i, v); } +#endif +#ifdef arch_atomic64_add_negative +#define atomic64_add_negative atomic64_add_negative static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { kasan_check_write(v, sizeof(*v)); return arch_atomic64_add_negative(i, v); } +#endif static __always_inline unsigned long cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size) diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 757e45821220..40cab858aaaa 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h @@ -186,11 +186,6 @@ ATOMIC_OP(xor, ^) #include -static inline int atomic_add_negative(int i, atomic_t *v) -{ - return atomic_add_return(i, v) < 0; -} - static inline void atomic_add(int i, atomic_t *v) { atomic_add_return(i, v); @@ -214,10 +209,6 @@ static inline void atomic_dec(atomic_t *v) #define atomic_dec_return(v) atomic_sub_return(1, (v)) #define atomic_inc_return(v) atomic_add_return(1, (v)) -#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) -#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h index 49460107b29a..d3827ab97aa4 100644 --- a/include/asm-generic/atomic64.h +++ b/include/asm-generic/atomic64.h @@ -56,13 +56,9 @@ extern long long atomic64_xchg(atomic64_t *v, long long new); extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u); #define atomic64_fetch_add_unless atomic64_fetch_add_unless -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) #define atomic64_inc(v) atomic64_add(1LL, (v)) #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) #define atomic64_dec(v) atomic64_sub(1LL, (v)) #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) -#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #endif /* _ASM_GENERIC_ATOMIC64_H */ -- cgit v1.2.3