summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2017-05-03 16:09:37 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2017-05-09 17:47:17 +0100
commit8997c93452d16aac11d3b0cc53940c94330273a4 (patch)
tree91552b40605f27ca1850a0a18be64c768e83a2f5
parent55de49f9aa17b0b2b144dd2af587177b9aadf429 (diff)
downloadlinux-8997c93452d16aac11d3b0cc53940c94330273a4.tar.bz2
arm64: atomic_lse: match asm register sizes
The LSE atomic code uses asm register variables to ensure that parameters are allocated in specific registers. In the majority of cases we specifically ask for an x register when using 64-bit values, but in a couple of cases we use a w regsiter for a 64-bit value. For asm register variables, the compiler only cares about the register index, with wN and xN having the same meaning. The compiler determines the register size to use based on the type of the variable. Thus, this inconsistency is merely confusing, and not harmful to code generation. For consistency, this patch updates those cases to use the x register alias. There should be no functional change as a result of this patch. Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/include/asm/atomic_lse.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 7457ce082b5f..99fa69c9c3cf 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -322,7 +322,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
{ \
- register long x0 asm ("w0") = i; \
+ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
@@ -394,7 +394,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
{ \
- register long x0 asm ("w0") = i; \
+ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \