From 77ec462536a13d4b428a1eead725c4818a49f0b1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 18 Mar 2021 17:07:37 +0000 Subject: arm64: vdso: Avoid ISB after reading from cntvct_el0 We can avoid the expensive ISB instruction after reading the counter in the vDSO gettime functions by creating a fake address hazard against a dummy stack read, just like we do inside the kernel. Signed-off-by: Will Deacon Reviewed-by: Vincenzo Frascino Link: https://lore.kernel.org/r/20210318170738.7756-5-will@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/barrier.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch/arm64/include/asm/barrier.h') diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index c3009b0e5239..37d891af8ea5 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -70,6 +70,25 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx, return mask; } +/* + * Ensure that reads of the counter are treated the same as memory reads + * for the purposes of ordering by subsequent memory barriers. + * + * This insanity brought to you by speculative system register reads, + * out-of-order memory accesses, sequence locks and Thomas Gleixner. + * + * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html + */ +#define arch_counter_enforce_ordering(val) do { \ + u64 tmp, _val = (val); \ + \ + asm volatile( \ + " eor %0, %1, %1\n" \ + " add %0, sp, %0\n" \ + " ldr xzr, [%0]" \ + : "=r" (tmp) : "r" (_val)); \ +} while (0) + #define __smp_mb() dmb(ish) #define __smp_rmb() dmb(ishld) #define __smp_wmb() dmb(ishst) -- cgit v1.2.3