summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/barrier.h
diff options
context:
space:
mode:
authorSuzuki K Poulose <suzuki.poulose@arm.com>2021-10-19 17:31:41 +0100
committerWill Deacon <will@kernel.org>2021-10-21 17:49:16 +0100
commitfa82d0b4b833790ac4572377fb777dcea24a9d69 (patch)
tree08780ba656ecb6d78224b14a6434ce25f7c33ded /arch/arm64/include/asm/barrier.h
parentb9d216fcef4298de76519e2baeed69ba482467bd (diff)
downloadlinux-fa82d0b4b833790ac4572377fb777dcea24a9d69.tar.bz2
arm64: errata: Add workaround for TSB flush failures
Arm Neoverse-N2 (#2067961) and Cortex-A710 (#2054223) suffers from errata, where a TSB (trace synchronization barrier) fails to flush the trace data completely, when executed from a trace prohibited region. In Linux we always execute it after we have moved the PE to trace prohibited region. So, we can apply the workaround every time a TSB is executed. The work around is to issue two TSB consecutively. NOTE: This errata is defined as LOCAL_CPU_ERRATUM, implying that a late CPU could be blocked from booting if it is the first CPU that requires the workaround. This is because we do not allow setting a cpu_hwcaps after the SMP boot. The other alternative is to use "this_cpu_has_cap()" instead of the faster system wide check, which may be a bit of an overhead, given we may have to do this in nvhe KVM host before a guest entry. Cc: Will Deacon <will@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Mike Leach <mike.leach@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Marc Zyngier <maz@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Mathieu Poirier <mathieu.poirier@linaro.org> Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> Link: https://lore.kernel.org/r/20211019163153.3692640-4-suzuki.poulose@arm.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/include/asm/barrier.h')
-rw-r--r--arch/arm64/include/asm/barrier.h16
1 files changed, 15 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 451e11e5fd23..1c5a00598458 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -23,7 +23,7 @@
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
#define psb_csync() asm volatile("hint #17" : : : "memory")
-#define tsb_csync() asm volatile("hint #18" : : : "memory")
+#define __tsb_csync() asm volatile("hint #18" : : : "memory")
#define csdb() asm volatile("hint #20" : : : "memory")
#ifdef CONFIG_ARM64_PSEUDO_NMI
@@ -46,6 +46,20 @@
#define dma_rmb() dmb(oshld)
#define dma_wmb() dmb(oshst)
+
+#define tsb_csync() \
+ do { \
+ /* \
+ * CPUs affected by Arm Erratum 2054223 or 2067961 needs \
+ * another TSB to ensure the trace is flushed. The barriers \
+ * don't have to be strictly back to back, as long as the \
+ * CPU is in trace prohibited state. \
+ */ \
+ if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE)) \
+ __tsb_csync(); \
+ __tsb_csync(); \
+ } while (0)
+
/*
* Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
* and 0 otherwise.