diff options
author | Will Deacon <will.deacon@arm.com> | 2015-05-29 14:47:59 +0100 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2015-07-27 15:28:52 +0100 |
commit | 4e39715f4b5cb3b44576fedb2d38aca87de3cf48 (patch) | |
tree | 1e4c60ef5714188ba60fe309c7feada21dcff076 /arch | |
parent | 0bc671d3f4bee9c31110d096ada0de52380e693d (diff) | |
download | linux-4e39715f4b5cb3b44576fedb2d38aca87de3cf48.tar.bz2 |
arm64: cmpxchg: avoid memory barrier on comparison failure
cmpxchg doesn't require memory barrier semantics when the value
comparison fails, so make the barrier conditional on success.
Reviewed-by: Steve Capper <steve.capper@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/include/asm/atomic_ll_sc.h | 48 |
1 files changed, 22 insertions, 26 deletions
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index c02684d1eab3..5a9fb37272d4 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -97,19 +97,18 @@ __LL_SC_PREFIX(atomic_cmpxchg(atomic_t *ptr, int old, int new)) unsigned long tmp; int oldval; - smp_mb(); - asm volatile("// atomic_cmpxchg\n" "1: ldxr %w1, %2\n" " eor %w0, %w1, %w3\n" " cbnz %w0, 2f\n" -" stxr %w0, %w4, %2\n" +" stlxr %w0, %w4, %2\n" " cbnz %w0, 1b\n" +" dmb ish\n" "2:" : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) - : "Lr" (old), "r" (new)); + : "Lr" (old), "r" (new) + : "memory"); - smp_mb(); return oldval; } __LL_SC_EXPORT(atomic_cmpxchg); @@ -174,19 +173,18 @@ __LL_SC_PREFIX(atomic64_cmpxchg(atomic64_t *ptr, long old, long new)) long oldval; unsigned long res; - smp_mb(); - asm volatile("// atomic64_cmpxchg\n" "1: ldxr %1, %2\n" " eor %0, %1, %3\n" " cbnz %w0, 2f\n" -" stxr %w0, %4, %2\n" +" stlxr %w0, %4, %2\n" " cbnz %w0, 1b\n" +" dmb ish\n" "2:" : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) - : "Lr" (old), "r" (new)); + : "Lr" (old), "r" (new) + : "memory"); - smp_mb(); return oldval; } __LL_SC_EXPORT(atomic64_cmpxchg); @@ -213,7 +211,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) } __LL_SC_EXPORT(atomic64_dec_if_positive); -#define __CMPXCHG_CASE(w, sz, name, mb, cl) \ +#define __CMPXCHG_CASE(w, sz, name, mb, rel, cl) \ __LL_SC_INLINE unsigned long \ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \ unsigned long old, \ @@ -222,11 +220,10 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \ unsigned long tmp, oldval; \ \ asm volatile( \ - " " #mb "\n" \ "1: ldxr" #sz "\t%" #w "[oldval], %[v]\n" \ " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ " cbnz %" #w "[tmp], 2f\n" \ - " stxr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ + " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ " cbnz %w[tmp], 1b\n" \ " " #mb "\n" \ " mov %" #w "[oldval], %" #w "[old]\n" \ @@ -240,18 +237,18 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \ } \ __LL_SC_EXPORT(__cmpxchg_case_##name); -__CMPXCHG_CASE(w, b, 1, , ) -__CMPXCHG_CASE(w, h, 2, , ) -__CMPXCHG_CASE(w, , 4, , ) -__CMPXCHG_CASE( , , 8, , ) -__CMPXCHG_CASE(w, b, mb_1, dmb ish, "memory") -__CMPXCHG_CASE(w, h, mb_2, dmb ish, "memory") -__CMPXCHG_CASE(w, , mb_4, dmb ish, "memory") -__CMPXCHG_CASE( , , mb_8, dmb ish, "memory") +__CMPXCHG_CASE(w, b, 1, , , ) +__CMPXCHG_CASE(w, h, 2, , , ) +__CMPXCHG_CASE(w, , 4, , , ) +__CMPXCHG_CASE( , , 8, , , ) +__CMPXCHG_CASE(w, b, mb_1, dmb ish, l, "memory") +__CMPXCHG_CASE(w, h, mb_2, dmb ish, l, "memory") +__CMPXCHG_CASE(w, , mb_4, dmb ish, l, "memory") +__CMPXCHG_CASE( , , mb_8, dmb ish, l, "memory") #undef __CMPXCHG_CASE -#define __CMPXCHG_DBL(name, mb, cl) \ +#define __CMPXCHG_DBL(name, mb, rel, cl) \ __LL_SC_INLINE int \ __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ unsigned long old2, \ @@ -262,13 +259,12 @@ __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ unsigned long tmp, ret; \ \ asm volatile("// __cmpxchg_double" #name "\n" \ - " " #mb "\n" \ "1: ldxp %0, %1, %2\n" \ " eor %0, %0, %3\n" \ " eor %1, %1, %4\n" \ " orr %1, %0, %1\n" \ " cbnz %1, 2f\n" \ - " stxp %w0, %5, %6, %2\n" \ + " st" #rel "xp %w0, %5, %6, %2\n" \ " cbnz %w0, 1b\n" \ " " #mb "\n" \ "2:" \ @@ -280,8 +276,8 @@ __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ } \ __LL_SC_EXPORT(__cmpxchg_double##name); -__CMPXCHG_DBL( , , ) -__CMPXCHG_DBL(_mb, dmb ish, "memory") +__CMPXCHG_DBL( , , , ) +__CMPXCHG_DBL(_mb, dmb ish, l, "memory") #undef __CMPXCHG_DBL |