From e9a4b795652f654a7870727e5333c1b709b8736c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 14 May 2015 18:05:50 +0100 Subject: arm64: cmpxchg_dbl: patch in lse instructions when supported by the CPU On CPUs which support the LSE atomic instructions introduced in ARMv8.1, it makes sense to use them in preference to ll/sc sequences. This patch introduces runtime patching of our cmpxchg_double primitives so that the LSE casp instruction is used instead. Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/atomic_ll_sc.h | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) (limited to 'arch/arm64/include/asm/atomic_ll_sc.h') diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index 4864158d486e..f89f1e4ba577 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -253,4 +253,38 @@ __CMPXCHG_CASE( , , mb_8, dmb ish, "memory") #undef __CMPXCHG_CASE +#define __CMPXCHG_DBL(name, mb, cl) \ +__LL_SC_INLINE int \ +__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ + unsigned long old2, \ + unsigned long new1, \ + unsigned long new2, \ + volatile void *ptr)) \ +{ \ + unsigned long tmp, ret; \ + \ + asm volatile("// __cmpxchg_double" #name "\n" \ + " " #mb "\n" \ + "1: ldxp %0, %1, %2\n" \ + " eor %0, %0, %3\n" \ + " eor %1, %1, %4\n" \ + " orr %1, %0, %1\n" \ + " cbnz %1, 2f\n" \ + " stxp %w0, %5, %6, %2\n" \ + " cbnz %w0, 1b\n" \ + " " #mb "\n" \ + "2:" \ + : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \ + : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ + : cl); \ + \ + return ret; \ +} \ +__LL_SC_EXPORT(__cmpxchg_double##name); + +__CMPXCHG_DBL( , , ) +__CMPXCHG_DBL(_mb, dmb ish, "memory") + +#undef __CMPXCHG_DBL + #endif /* __ASM_ATOMIC_LL_SC_H */ -- cgit v1.2.3