From a9ebf306f52c756c4f9e50ee9a60cd6389d71344 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 1 Feb 2017 16:39:38 +0100 Subject: locking/atomic: Introduce atomic_try_cmpxchg() Add a new cmpxchg interface: bool try_cmpxchg(u{8,16,32,64} *ptr, u{8,16,32,64} *val, u{8,16,32,64} new); Where the boolean returns the result of the compare; and thus if the exchange happened; and in case of failure, the new value of *ptr is returned in *val. This allows simplification/improvement of loops like: for (;;) { new = val $op $imm; old = cmpxchg(ptr, val, new); if (old == val) break; val = old; } into: do { } while (!try_cmpxchg(ptr, &val, val $op $imm)); while also generating better code (GCC6 and onwards). Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/atomic.h | 6 ++++ arch/x86/include/asm/atomic64_64.h | 6 ++++ arch/x86/include/asm/cmpxchg.h | 69 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 81 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 14635c5ea025..8410377c6869 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -186,6 +186,12 @@ static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) return cmpxchg(&v->counter, old, new); } +#define atomic_try_cmpxchg atomic_try_cmpxchg +static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + return try_cmpxchg(&v->counter, old, new); +} + static inline int atomic_xchg(atomic_t *v, int new) { return xchg(&v->counter, new); diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 89ed2f6ae2f7..12fb57413732 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -176,6 +176,12 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) return cmpxchg(&v->counter, old, new); } +#define atomic64_try_cmpxchg atomic64_try_cmpxchg +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, long *old, long new) +{ + return try_cmpxchg(&v->counter, old, new); +} + static inline long atomic64_xchg(atomic64_t *v, long new) { return xchg(&v->counter, new); diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index 97848cdfcb1a..fb961db51a2a 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -153,6 +153,75 @@ extern void __add_wrong_size(void) #define cmpxchg_local(ptr, old, new) \ __cmpxchg_local(ptr, old, new, sizeof(*(ptr))) + +#define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock) \ +({ \ + bool success; \ + __typeof__(_ptr) _old = (_pold); \ + __typeof__(*(_ptr)) __old = *_old; \ + __typeof__(*(_ptr)) __new = (_new); \ + switch (size) { \ + case __X86_CASE_B: \ + { \ + volatile u8 *__ptr = (volatile u8 *)(_ptr); \ + asm volatile(lock "cmpxchgb %[new], %[ptr]" \ + CC_SET(z) \ + : CC_OUT(z) (success), \ + [ptr] "+m" (*__ptr), \ + [old] "+a" (__old) \ + : [new] "q" (__new) \ + : "memory"); \ + break; \ + } \ + case __X86_CASE_W: \ + { \ + volatile u16 *__ptr = (volatile u16 *)(_ptr); \ + asm volatile(lock "cmpxchgw %[new], %[ptr]" \ + CC_SET(z) \ + : CC_OUT(z) (success), \ + [ptr] "+m" (*__ptr), \ + [old] "+a" (__old) \ + : [new] "r" (__new) \ + : "memory"); \ + break; \ + } \ + case __X86_CASE_L: \ + { \ + volatile u32 *__ptr = (volatile u32 *)(_ptr); \ + asm volatile(lock "cmpxchgl %[new], %[ptr]" \ + CC_SET(z) \ + : CC_OUT(z) (success), \ + [ptr] "+m" (*__ptr), \ + [old] "+a" (__old) \ + : [new] "r" (__new) \ + : "memory"); \ + break; \ + } \ + case __X86_CASE_Q: \ + { \ + volatile u64 *__ptr = (volatile u64 *)(_ptr); \ + asm volatile(lock "cmpxchgq %[new], %[ptr]" \ + CC_SET(z) \ + : CC_OUT(z) (success), \ + [ptr] "+m" (*__ptr), \ + [old] "+a" (__old) \ + : [new] "r" (__new) \ + : "memory"); \ + break; \ + } \ + default: \ + __cmpxchg_wrong_size(); \ + } \ + *_old = __old; \ + success; \ +}) + +#define __try_cmpxchg(ptr, pold, new, size) \ + __raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX) + +#define try_cmpxchg(ptr, pold, new) \ + __try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr))) + /* * xadd() adds "inc" to "*ptr" and atomically returns the previous * value of "*ptr". -- cgit v1.2.3 From e6790e4b5d5e97dc287f3496dd2cf2dbabdfdb35 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 17 Mar 2017 20:44:45 +0100 Subject: locking/atomic/x86: Use atomic_try_cmpxchg() Better code generation: text data bss name 10665111 4530096 843776 defconfig-build/vmlinux.3 10655703 4530096 843776 defconfig-build/vmlinux.4 Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/atomic.h | 27 +++++++++---------------- arch/x86/include/asm/atomic64_64.h | 40 +++++++++++++------------------------- 2 files changed, 22 insertions(+), 45 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 8410377c6869..caa5798c92f4 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -207,16 +207,12 @@ static inline void atomic_##op(int i, atomic_t *v) \ } #define ATOMIC_FETCH_OP(op, c_op) \ -static inline int atomic_fetch_##op(int i, atomic_t *v) \ +static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ - int old, val = atomic_read(v); \ - for (;;) { \ - old = atomic_cmpxchg(v, val, val c_op i); \ - if (old == val) \ - break; \ - val = old; \ - } \ - return old; \ + int val = atomic_read(v); \ + do { \ + } while (!atomic_try_cmpxchg(v, &val, val c_op i)); \ + return val; \ } #define ATOMIC_OPS(op, c_op) \ @@ -242,16 +238,11 @@ ATOMIC_OPS(xor, ^) */ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) { - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) + int c = atomic_read(v); + do { + if (unlikely(c == u)) break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } + } while (!atomic_try_cmpxchg(v, &c, c + a)); return c; } diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 12fb57413732..6189a433c9a9 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -198,17 +198,12 @@ static inline long atomic64_xchg(atomic64_t *v, long new) */ static inline bool atomic64_add_unless(atomic64_t *v, long a, long u) { - long c, old; - c = atomic64_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic64_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c != (u); + long c = atomic64_read(v); + do { + if (unlikely(c == u)) + return false; + } while (!atomic64_try_cmpxchg(v, &c, c + a)); + return true; } #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) @@ -222,17 +217,12 @@ static inline bool atomic64_add_unless(atomic64_t *v, long a, long u) */ static inline long atomic64_dec_if_positive(atomic64_t *v) { - long c, old, dec; - c = atomic64_read(v); - for (;;) { + long dec, c = atomic64_read(v); + do { dec = c - 1; if (unlikely(dec < 0)) break; - old = atomic64_cmpxchg((v), c, dec); - if (likely(old == c)) - break; - c = old; - } + } while (!atomic64_try_cmpxchg(v, &c, dec)); return dec; } @@ -248,14 +238,10 @@ static inline void atomic64_##op(long i, atomic64_t *v) \ #define ATOMIC64_FETCH_OP(op, c_op) \ static inline long atomic64_fetch_##op(long i, atomic64_t *v) \ { \ - long old, val = atomic64_read(v); \ - for (;;) { \ - old = atomic64_cmpxchg(v, val, val c_op i); \ - if (old == val) \ - break; \ - val = old; \ - } \ - return old; \ + long val = atomic64_read(v); \ + do { \ + } while (!atomic64_try_cmpxchg(v, &val, val c_op i)); \ + return val; \ } #define ATOMIC64_OPS(op, c_op) \ -- cgit v1.2.3 From 44fe84459faf1a7781595b7c64cd36daf2f2827d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 27 Mar 2017 13:54:38 +0200 Subject: locking/atomic: Fix atomic_try_cmpxchg() semantics Dmitry noted that the new atomic_try_cmpxchg() primitive is broken when the old pointer doesn't point to the local stack. He writes: "Consider a classical lock-free stack push: node->next = atomic_read(&head); do { } while (!atomic_try_cmpxchg(&head, &node->next, node)); This code is broken with the current implementation, the problem is with unconditional update of *__po. In case of success it writes the same value back into *__po, but in case of cmpxchg success we might have lose ownership of some memory locations and potentially over what __po has pointed to. The same holds for the re-read of *__po. " He also points out that this makes it surprisingly different from the similar C/C++ atomic operation. After investigating the code-gen differences caused by this patch; and a number of alternatives (Linus dislikes this interface lots), we arrived at these results (size x86_64-defconfig/vmlinux): GCC-6.3.0: 10735757 cmpxchg 10726413 try_cmpxchg 10730509 try_cmpxchg + patch 10730445 try_cmpxchg-linus GCC-7 (20170327): 10709514 cmpxchg 10704266 try_cmpxchg 10704266 try_cmpxchg + patch 10704394 try_cmpxchg-linus From this we see that the patch has the advantage of better code-gen on GCC-7 and keeps the interface roughly consistent with the C language variant. Reported-by: Dmitry Vyukov Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Fixes: a9ebf306f52c ("locking/atomic: Introduce atomic_try_cmpxchg()") Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cmpxchg.h | 5 +++-- include/linux/atomic.h | 16 ++++++++++------ 2 files changed, 13 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index fb961db51a2a..d90296d061e8 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -212,8 +212,9 @@ extern void __add_wrong_size(void) default: \ __cmpxchg_wrong_size(); \ } \ - *_old = __old; \ - success; \ + if (unlikely(!success)) \ + *_old = __old; \ + likely(success); \ }) #define __try_cmpxchg(ptr, pold, new, size) \ diff --git a/include/linux/atomic.h b/include/linux/atomic.h index aae5953817d6..c56be7410130 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -428,9 +428,11 @@ #define __atomic_try_cmpxchg(type, _p, _po, _n) \ ({ \ typeof(_po) __po = (_po); \ - typeof(*(_po)) __o = *__po; \ - *__po = atomic_cmpxchg##type((_p), __o, (_n)); \ - (*__po == __o); \ + typeof(*(_po)) __r, __o = *__po; \ + __r = atomic_cmpxchg##type((_p), __o, (_n)); \ + if (unlikely(__r != __o)) \ + *__po = __r; \ + likely(__r == __o); \ }) #define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n) @@ -1022,9 +1024,11 @@ static inline int atomic_dec_if_positive(atomic_t *v) #define __atomic64_try_cmpxchg(type, _p, _po, _n) \ ({ \ typeof(_po) __po = (_po); \ - typeof(*(_po)) __o = *__po; \ - *__po = atomic64_cmpxchg##type((_p), __o, (_n)); \ - (*__po == __o); \ + typeof(*(_po)) __r, __o = *__po; \ + __r = atomic64_cmpxchg##type((_p), __o, (_n)); \ + if (unlikely(__r != __o)) \ + *__po = __r; \ + likely(__r == __o); \ }) #define atomic64_try_cmpxchg(_p, _po, _n) __atomic64_try_cmpxchg(, _p, _po, _n) -- cgit v1.2.3