From 8e6082e94aac6d0338883b5953631b662a5a9188 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 10 Dec 2021 15:14:06 +0000 Subject: arm64: atomics: format whitespace consistently The code for the atomic ops is formatted inconsistently, and while this is not a functional problem it is rather distracting when working on them. Some have ops have consistent indentation, e.g. | #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ | static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ | { \ | u32 tmp; \ | \ | asm volatile( \ | __LSE_PREAMBLE \ | " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ | " add %w[i], %w[i], %w[tmp]" \ | : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ | : "r" (v) \ | : cl); \ | \ | return i; \ | } While others have negative indentation for some lines, and/or have misaligned trailing backslashes, e.g. | static inline void __lse_atomic_##op(int i, atomic_t *v) \ | { \ | asm volatile( \ | __LSE_PREAMBLE \ | " " #asm_op " %w[i], %[v]\n" \ | : [i] "+r" (i), [v] "+Q" (v->counter) \ | : "r" (v)); \ | } This patch makes the indentation consistent and also aligns the trailing backslashes. This makes the code easier to read for those (like myself) who are easily distracted by these inconsistencies. This is intended as a cleanup. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Boqun Feng Cc: Peter Zijlstra Cc: Will Deacon Acked-by: Will Deacon Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20211210151410.2782645-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/atomic_ll_sc.h | 86 +++++++++++++++++------------------ 1 file changed, 43 insertions(+), 43 deletions(-) (limited to 'arch/arm64/include/asm/atomic_ll_sc.h') diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index 13869b76b58c..fe0db8d416fb 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -44,11 +44,11 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \ \ asm volatile("// atomic_" #op "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ldxr %w0, %2\n" \ -" " #asm_op " %w0, %w0, %w3\n" \ -" stxr %w1, %w0, %2\n" \ -" cbnz %w1, 1b\n") \ + " prfm pstl1strm, %2\n" \ + "1: ldxr %w0, %2\n" \ + " " #asm_op " %w0, %w0, %w3\n" \ + " stxr %w1, %w0, %2\n" \ + " cbnz %w1, 1b\n") \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i)); \ } @@ -62,12 +62,12 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ \ asm volatile("// atomic_" #op "_return" #name "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ld" #acq "xr %w0, %2\n" \ -" " #asm_op " %w0, %w0, %w3\n" \ -" st" #rel "xr %w1, %w0, %2\n" \ -" cbnz %w1, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %2\n" \ + "1: ld" #acq "xr %w0, %2\n" \ + " " #asm_op " %w0, %w0, %w3\n" \ + " st" #rel "xr %w1, %w0, %2\n" \ + " cbnz %w1, 1b\n" \ + " " #mb ) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -84,12 +84,12 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \ \ asm volatile("// atomic_fetch_" #op #name "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %3\n" \ -"1: ld" #acq "xr %w0, %3\n" \ -" " #asm_op " %w1, %w0, %w4\n" \ -" st" #rel "xr %w2, %w1, %3\n" \ -" cbnz %w2, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %3\n" \ + "1: ld" #acq "xr %w0, %3\n" \ + " " #asm_op " %w1, %w0, %w4\n" \ + " st" #rel "xr %w2, %w1, %3\n" \ + " cbnz %w2, 1b\n" \ + " " #mb ) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -143,11 +143,11 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \ \ asm volatile("// atomic64_" #op "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ldxr %0, %2\n" \ -" " #asm_op " %0, %0, %3\n" \ -" stxr %w1, %0, %2\n" \ -" cbnz %w1, 1b") \ + " prfm pstl1strm, %2\n" \ + "1: ldxr %0, %2\n" \ + " " #asm_op " %0, %0, %3\n" \ + " stxr %w1, %0, %2\n" \ + " cbnz %w1, 1b") \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i)); \ } @@ -161,12 +161,12 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ \ asm volatile("// atomic64_" #op "_return" #name "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %2\n" \ -"1: ld" #acq "xr %0, %2\n" \ -" " #asm_op " %0, %0, %3\n" \ -" st" #rel "xr %w1, %0, %2\n" \ -" cbnz %w1, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %2\n" \ + "1: ld" #acq "xr %0, %2\n" \ + " " #asm_op " %0, %0, %3\n" \ + " st" #rel "xr %w1, %0, %2\n" \ + " cbnz %w1, 1b\n" \ + " " #mb ) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -176,19 +176,19 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ static inline long \ -__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ +__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ { \ s64 result, val; \ unsigned long tmp; \ \ asm volatile("// atomic64_fetch_" #op #name "\n" \ __LL_SC_FALLBACK( \ -" prfm pstl1strm, %3\n" \ -"1: ld" #acq "xr %0, %3\n" \ -" " #asm_op " %1, %0, %4\n" \ -" st" #rel "xr %w2, %1, %3\n" \ -" cbnz %w2, 1b\n" \ -" " #mb ) \ + " prfm pstl1strm, %3\n" \ + "1: ld" #acq "xr %0, %3\n" \ + " " #asm_op " %1, %0, %4\n" \ + " st" #rel "xr %w2, %1, %3\n" \ + " cbnz %w2, 1b\n" \ + " " #mb ) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : __stringify(constraint) "r" (i) \ : cl); \ @@ -241,14 +241,14 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v) asm volatile("// atomic64_dec_if_positive\n" __LL_SC_FALLBACK( -" prfm pstl1strm, %2\n" -"1: ldxr %0, %2\n" -" subs %0, %0, #1\n" -" b.lt 2f\n" -" stlxr %w1, %0, %2\n" -" cbnz %w1, 1b\n" -" dmb ish\n" -"2:") + " prfm pstl1strm, %2\n" + "1: ldxr %0, %2\n" + " subs %0, %0, #1\n" + " b.lt 2f\n" + " stlxr %w1, %0, %2\n" + " cbnz %w1, 1b\n" + " dmb ish\n" + "2:") : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : : "cc", "memory"); -- cgit v1.2.3