summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/atomic.h
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2014-02-04 12:29:12 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2014-02-07 16:45:43 +0000
commit8e86f0b409a44193f1587e87b69c5dcf8f65be67 (patch)
tree6d8bd30d19ce3a392428d86cfd93003057d6aeb8 /arch/arm64/include/asm/atomic.h
parent4a7ac12eedd190cdf071e61145defa73df1675c0 (diff)
downloadlinux-8e86f0b409a44193f1587e87b69c5dcf8f65be67.tar.bz2
arm64: atomics: fix use of acquire + release for full barrier semantics
Linux requires a number of atomic operations to provide full barrier semantics, that is no memory accesses after the operation can be observed before any accesses up to and including the operation in program order. On arm64, these operations have been incorrectly implemented as follows: // A, B, C are independent memory locations <Access [A]> // atomic_op (B) 1: ldaxr x0, [B] // Exclusive load with acquire <op(B)> stlxr w1, x0, [B] // Exclusive store with release cbnz w1, 1b <Access [C]> The assumption here being that two half barriers are equivalent to a full barrier, so the only permitted ordering would be A -> B -> C (where B is the atomic operation involving both a load and a store). Unfortunately, this is not the case by the letter of the architecture and, in fact, the accesses to A and C are permitted to pass their nearest half barrier resulting in orderings such as Bl -> A -> C -> Bs or Bl -> C -> A -> Bs (where Bl is the load-acquire on B and Bs is the store-release on B). This is a clear violation of the full barrier requirement. The simple way to fix this is to implement the same algorithm as ARMv7 using explicit barriers: <Access [A]> // atomic_op (B) dmb ish // Full barrier 1: ldxr x0, [B] // Exclusive load <op(B)> stxr w1, x0, [B] // Exclusive store cbnz w1, 1b dmb ish // Full barrier <Access [C]> but this has the undesirable effect of introducing *two* full barrier instructions. A better approach is actually the following, non-intuitive sequence: <Access [A]> // atomic_op (B) 1: ldxr x0, [B] // Exclusive load <op(B)> stlxr w1, x0, [B] // Exclusive store with release cbnz w1, 1b dmb ish // Full barrier <Access [C]> The simple observations here are: - The dmb ensures that no subsequent accesses (e.g. the access to C) can enter or pass the atomic sequence. - The dmb also ensures that no prior accesses (e.g. the access to A) can pass the atomic sequence. - Therefore, no prior access can pass a subsequent access, or vice-versa (i.e. A is strictly ordered before C). - The stlxr ensures that no prior access can pass the store component of the atomic operation. The only tricky part remaining is the ordering between the ldxr and the access to A, since the absence of the first dmb means that we're now permitting re-ordering between the ldxr and any prior accesses. From an (arbitrary) observer's point of view, there are two scenarios: 1. We have observed the ldxr. This means that if we perform a store to [B], the ldxr will still return older data. If we can observe the ldxr, then we can potentially observe the permitted re-ordering with the access to A, which is clearly an issue when compared to the dmb variant of the code. Thankfully, the exclusive monitor will save us here since it will be cleared as a result of the store and the ldxr will retry. Notice that any use of a later memory observation to imply observation of the ldxr will also imply observation of the access to A, since the stlxr/dmb ensure strict ordering. 2. We have not observed the ldxr. This means we can perform a store and influence the later ldxr. However, that doesn't actually tell us anything about the access to [A], so we've not lost anything here either when compared to the dmb variant. This patch implements this solution for our barriered atomic operations, ensuring that we satisfy the full barrier requirements where they are needed. Cc: <stable@vger.kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/atomic.h')
-rw-r--r--arch/arm64/include/asm/atomic.h29
1 files changed, 20 insertions, 9 deletions
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 01de5aaa3edc..e32893e005d4 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -64,7 +64,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_add_return\n"
-"1: ldaxr %w0, %2\n"
+"1: ldxr %w0, %2\n"
" add %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
@@ -72,6 +72,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
: "Ir" (i)
: "cc", "memory");
+ smp_mb();
return result;
}
@@ -96,7 +97,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_sub_return\n"
-"1: ldaxr %w0, %2\n"
+"1: ldxr %w0, %2\n"
" sub %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
@@ -104,6 +105,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
: "Ir" (i)
: "cc", "memory");
+ smp_mb();
return result;
}
@@ -112,17 +114,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
unsigned long tmp;
int oldval;
+ smp_mb();
+
asm volatile("// atomic_cmpxchg\n"
-"1: ldaxr %w1, %2\n"
+"1: ldxr %w1, %2\n"
" cmp %w1, %w3\n"
" b.ne 2f\n"
-" stlxr %w0, %w4, %2\n"
+" stxr %w0, %w4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
: "cc", "memory");
+ smp_mb();
return oldval;
}
@@ -183,7 +188,7 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_add_return\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" add %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
@@ -191,6 +196,7 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
: "Ir" (i)
: "cc", "memory");
+ smp_mb();
return result;
}
@@ -215,7 +221,7 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_sub_return\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" sub %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
@@ -223,6 +229,7 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
: "Ir" (i)
: "cc", "memory");
+ smp_mb();
return result;
}
@@ -231,17 +238,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
long oldval;
unsigned long res;
+ smp_mb();
+
asm volatile("// atomic64_cmpxchg\n"
-"1: ldaxr %1, %2\n"
+"1: ldxr %1, %2\n"
" cmp %1, %3\n"
" b.ne 2f\n"
-" stlxr %w0, %4, %2\n"
+" stxr %w0, %4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
: "cc", "memory");
+ smp_mb();
return oldval;
}
@@ -253,11 +263,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" subs %0, %0, #1\n"
" b.mi 2f\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n"
+" dmb ish\n"
"2:"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: