summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/mmu.h
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2019-12-09 18:12:15 +0000
committerWill Deacon <will@kernel.org>2020-01-15 14:11:17 +0000
commitc2d92353b28f8542c6b3150900b38c94b1d61480 (patch)
tree052c44f61d24b70c1e5a8d303c14f51f8518a5db /arch/arm64/include/asm/mmu.h
parent3e6c69a058deaa50d33c3dac36cde80b4ce590e8 (diff)
downloadlinux-c2d92353b28f8542c6b3150900b38c94b1d61480.tar.bz2
arm64: Factor out checks for KASLR in KPTI code into separate function
In preparation for integrating E0PD support with KASLR factor out the checks for interaction between KASLR and KPTI done in boot context into a new function kaslr_requires_kpti(), in the process clarifying the distinction between what we do in boot context and what we do at runtime. Signed-off-by: Mark Brown <broonie@kernel.org> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/include/asm/mmu.h')
-rw-r--r--arch/arm64/include/asm/mmu.h62
1 files changed, 45 insertions, 17 deletions
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index f217e3292919..2a93d34cc0ca 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -35,10 +35,46 @@ static inline bool arm64_kernel_unmapped_at_el0(void)
cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
}
-static inline bool arm64_kernel_use_ng_mappings(void)
+/*
+ * This check is triggered during the early boot before the cpufeature
+ * is initialised. Checking the status on the local CPU allows the boot
+ * CPU to detect the need for non-global mappings and thus avoiding a
+ * pagetable re-write after all the CPUs are booted. This check will be
+ * anyway run on individual CPUs, allowing us to get the consistent
+ * state once the SMP CPUs are up and thus make the switch to non-global
+ * mappings if required.
+ */
+static inline bool kaslr_requires_kpti(void)
{
bool tx1_bug;
+ if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return false;
+
+ /*
+ * Systems affected by Cavium erratum 24756 are incompatible
+ * with KPTI.
+ */
+ if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
+ tx1_bug = false;
+#ifndef MODULE
+ } else if (!static_branch_likely(&arm64_const_caps_ready)) {
+ extern const struct midr_range cavium_erratum_27456_cpus[];
+
+ tx1_bug = is_midr_in_range_list(read_cpuid_id(),
+ cavium_erratum_27456_cpus);
+#endif
+ } else {
+ tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
+ }
+ if (tx1_bug)
+ return false;
+
+ return kaslr_offset() > 0;
+}
+
+static inline bool arm64_kernel_use_ng_mappings(void)
+{
/* What's a kpti? Use global mappings if we don't know. */
if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
return false;
@@ -52,29 +88,21 @@ static inline bool arm64_kernel_use_ng_mappings(void)
if (arm64_kernel_unmapped_at_el0())
return true;
- if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ /*
+ * Once we are far enough into boot for capabilities to be
+ * ready we will have confirmed if we are using non-global
+ * mappings so don't need to consider anything else here.
+ */
+ if (static_branch_likely(&arm64_const_caps_ready))
return false;
/*
* KASLR is enabled so we're going to be enabling kpti on non-broken
* CPUs regardless of their susceptibility to Meltdown. Rather
* than force everybody to go through the G -> nG dance later on,
- * just put down non-global mappings from the beginning.
+ * just put down non-global mappings from the beginning
*/
- if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
- tx1_bug = false;
-#ifndef MODULE
- } else if (!static_branch_likely(&arm64_const_caps_ready)) {
- extern const struct midr_range cavium_erratum_27456_cpus[];
-
- tx1_bug = is_midr_in_range_list(read_cpuid_id(),
- cavium_erratum_27456_cpus);
-#endif
- } else {
- tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
- }
-
- return !tx1_bug && kaslr_offset() > 0;
+ return kaslr_requires_kpti();
}
typedef void (*bp_hardening_cb_t)(void);