summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2020-11-13 11:38:44 +0000
committerMarc Zyngier <maz@kernel.org>2020-11-16 10:43:05 +0000
commitb881cdce77b48bd488f268041f32951bab89bb0f (patch)
treeee08e27ab59bdf43f567d3edaa8e0ea5151d14a3 /arch/arm64/kvm
parentda592e68a5a333b81111bd6336838764732f723e (diff)
downloadlinux-b881cdce77b48bd488f268041f32951bab89bb0f.tar.bz2
KVM: arm64: Allocate hyp vectors statically
The EL2 vectors installed when a guest is running point at one of the following configurations for a given CPU: - Straight at __kvm_hyp_vector - A trampoline containing an SMC sequence to mitigate Spectre-v2 and then a direct branch to __kvm_hyp_vector - A dynamically-allocated trampoline which has an indirect branch to __kvm_hyp_vector - A dynamically-allocated trampoline containing an SMC sequence to mitigate Spectre-v2 and then an indirect branch to __kvm_hyp_vector The indirect branches mean that VA randomization at EL2 isn't trivially bypassable using Spectre-v3a (where the vector base is readable by the guest). Rather than populate these vectors dynamically, configure everything statically and use an enumerated type to identify the vector "slot" corresponding to one of the configurations above. This both simplifies the code, but also makes it much easier to implement at EL2 later on. Signed-off-by: Will Deacon <will@kernel.org> [maz: fixed double call to kvm_init_vector_slots() on nVHE] Signed-off-by: Marc Zyngier <maz@kernel.org> Cc: Marc Zyngier <maz@kernel.org> Cc: Quentin Perret <qperret@google.com> Link: https://lore.kernel.org/r/20201113113847.21619-8-will@kernel.org
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/arm.c86
-rw-r--r--arch/arm64/kvm/hyp/Makefile2
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S72
-rw-r--r--arch/arm64/kvm/hyp/smccc_wa.S32
-rw-r--r--arch/arm64/kvm/va_layout.c11
5 files changed, 77 insertions, 126 deletions
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 044c5fc81f90..5e6fe5eef3ec 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -51,14 +51,6 @@ DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
-/* Hypervisor VA of the indirect vector trampoline page */
-static void *__kvm_bp_vect_base;
-/*
- * Slot in the hyp vector page for use by the indirect vector trampoline
- * when mitigation against Spectre-v2 is not required.
- */
-static int __kvm_harden_el2_vector_slot;
-
/* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u32 kvm_next_vmid;
@@ -1303,33 +1295,38 @@ static unsigned long nvhe_percpu_order(void)
return size ? get_order(size) : 0;
}
-static int kvm_map_vectors(void)
+/* A lookup table holding the hypervisor VA for each vector slot */
+static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
+
+static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot)
{
- int slot;
+ hyp_spectre_vector_selector[slot] = base + (slot * SZ_2K);
+}
+
+static int kvm_init_vector_slots(void)
+{
+ int err;
+ void *base;
+
+ base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
+ kvm_init_vector_slot(base, HYP_VECTOR_DIRECT);
+
+ base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
+ kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
- /*
- * SV2 = ARM64_SPECTRE_V2
- * HEL2 = ARM64_HARDEN_EL2_VECTORS
- *
- * !SV2 + !HEL2 -> use direct vectors
- * SV2 + !HEL2 -> use hardened vectors in place
- * !SV2 + HEL2 -> allocate one vector slot and use exec mapping
- * SV2 + HEL2 -> use hardened vectors and use exec mapping
- */
if (!cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS))
return 0;
- /*
- * Always allocate a spare vector slot, as we don't know yet which CPUs
- * have a BP hardening slot that we can reuse.
- */
- slot = atomic_inc_return(&arm64_el2_vector_last_slot);
- BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
- __kvm_harden_el2_vector_slot = slot;
+ if (!has_vhe()) {
+ err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
+ __BP_HARDEN_HYP_VECS_SZ, &base);
+ if (err)
+ return err;
+ }
- return create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
- __BP_HARDEN_HYP_VECS_SZ,
- &__kvm_bp_vect_base);
+ kvm_init_vector_slot(base, HYP_VECTOR_INDIRECT);
+ kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_INDIRECT);
+ return 0;
}
static void cpu_init_hyp_mode(void)
@@ -1406,24 +1403,9 @@ static void cpu_hyp_reset(void)
static void cpu_set_hyp_vector(void)
{
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
- void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
- int slot = -1;
-
- if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
- vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
- slot = data->hyp_vectors_slot;
- }
-
- if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) {
- vect = __kvm_bp_vect_base;
- if (slot == -1)
- slot = __kvm_harden_el2_vector_slot;
- }
-
- if (slot != -1)
- vect += slot * SZ_2K;
+ void *vector = hyp_spectre_vector_selector[data->slot];
- *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vect;
+ *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector;
}
static void cpu_hyp_reinit(void)
@@ -1661,12 +1643,6 @@ static int init_hyp_mode(void)
goto out_err;
}
- err = kvm_map_vectors();
- if (err) {
- kvm_err("Cannot map vectors\n");
- goto out_err;
- }
-
/*
* Map the Hyp stack pages
*/
@@ -1810,6 +1786,12 @@ int kvm_arch_init(void *opaque)
goto out_err;
}
+ err = kvm_init_vector_slots();
+ if (err) {
+ kvm_err("Cannot initialise vector slots\n");
+ goto out_err;
+ }
+
err = init_subsystems();
if (err)
goto out_hyp;
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 4a81eddabcd8..687598e41b21 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -10,4 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
-DDISABLE_BRANCH_PROFILING \
$(DISABLE_STACKLEAK_PLUGIN)
-obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o smccc_wa.o
+obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 874eacdabc64..d0a3660c7256 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -188,52 +188,62 @@ SYM_CODE_START(__kvm_hyp_vector)
valid_vect el1_error // Error 32-bit EL1
SYM_CODE_END(__kvm_hyp_vector)
-.macro hyp_ventry
- .align 7
+.macro spectrev2_smccc_wa1_smc
+ sub sp, sp, #(8 * 4)
+ stp x2, x3, [sp, #(8 * 0)]
+ stp x0, x1, [sp, #(8 * 2)]
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
+ smc #0
+ ldp x2, x3, [sp, #(8 * 0)]
+ add sp, sp, #(8 * 2)
+.endm
+
+.macro hyp_ventry indirect, spectrev2
+ .align 7
1: esb
- .rept 26
- nop
- .endr
-/*
- * The default sequence is to directly branch to the KVM vectors,
- * using the computed offset. This applies for VHE as well as
- * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
- *
- * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
- * with:
- *
- * stp x0, x1, [sp, #-16]!
- * movz x0, #(addr & 0xffff)
- * movk x0, #((addr >> 16) & 0xffff), lsl #16
- * movk x0, #((addr >> 32) & 0xffff), lsl #32
- * br x0
- *
- * Where:
- * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
- * See kvm_patch_vector_branch for details.
- */
-alternative_cb kvm_patch_vector_branch
+ .if \spectrev2 != 0
+ spectrev2_smccc_wa1_smc
+ .else
stp x0, x1, [sp, #-16]!
- b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
+ .endif
+ .if \indirect != 0
+ alternative_cb kvm_patch_vector_branch
+ /*
+ * For ARM64_HARDEN_EL2_VECTORS configurations, these NOPs get replaced
+ * with:
+ *
+ * movz x0, #(addr & 0xffff)
+ * movk x0, #((addr >> 16) & 0xffff), lsl #16
+ * movk x0, #((addr >> 32) & 0xffff), lsl #32
+ * br x0
+ *
+ * Where:
+ * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
+ * See kvm_patch_vector_branch for details.
+ */
nop
nop
nop
-alternative_cb_end
+ nop
+ alternative_cb_end
+ .endif
+ b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
.endm
-.macro generate_vectors
+.macro generate_vectors indirect, spectrev2
0:
.rept 16
- hyp_ventry
+ hyp_ventry \indirect, \spectrev2
.endr
.org 0b + SZ_2K // Safety measure
.endm
.align 11
SYM_CODE_START(__bp_harden_hyp_vecs)
- .rept BP_HARDEN_EL2_SLOTS
- generate_vectors
- .endr
+ generate_vectors indirect = 0, spectrev2 = 0 // HYP_VECTOR_DIRECT
+ generate_vectors indirect = 0, spectrev2 = 1 // HYP_VECTOR_SPECTRE_DIRECT
+ generate_vectors indirect = 1, spectrev2 = 0 // HYP_VECTOR_INDIRECT
+ generate_vectors indirect = 1, spectrev2 = 1 // HYP_VECTOR_SPECTRE_INDIRECT
1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
.org 1b
SYM_CODE_END(__bp_harden_hyp_vecs)
diff --git a/arch/arm64/kvm/hyp/smccc_wa.S b/arch/arm64/kvm/hyp/smccc_wa.S
deleted file mode 100644
index b0441dbdf68b..000000000000
--- a/arch/arm64/kvm/hyp/smccc_wa.S
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2015-2018 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#include <linux/arm-smccc.h>
-#include <linux/linkage.h>
-
-#include <asm/kvm_asm.h>
-#include <asm/kvm_mmu.h>
-
- /*
- * This is not executed directly and is instead copied into the vectors
- * by install_bp_hardening_cb().
- */
- .data
- .pushsection .rodata
- .global __smccc_workaround_1_smc
-SYM_DATA_START(__smccc_workaround_1_smc)
- esb
- sub sp, sp, #(8 * 4)
- stp x2, x3, [sp, #(8 * 0)]
- stp x0, x1, [sp, #(8 * 2)]
- mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
- smc #0
- ldp x2, x3, [sp, #(8 * 0)]
- ldp x0, x1, [sp, #(8 * 2)]
- add sp, sp, #(8 * 4)
-1: .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ
- .org 1b
-SYM_DATA_END(__smccc_workaround_1_smc)
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index 760db2c84b9b..cc8e8756600f 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -137,7 +137,7 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
u64 addr;
u32 insn;
- BUG_ON(nr_inst != 5);
+ BUG_ON(nr_inst != 4);
if (!cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS) ||
WARN_ON_ONCE(has_vhe())) {
@@ -160,15 +160,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
*/
addr += KVM_VECTOR_PREAMBLE;
- /* stp x0, x1, [sp, #-16]! */
- insn = aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0,
- AARCH64_INSN_REG_1,
- AARCH64_INSN_REG_SP,
- -16,
- AARCH64_INSN_VARIANT_64BIT,
- AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX);
- *updptr++ = cpu_to_le32(insn);
-
/* movz x0, #(addr & 0xffff) */
insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
(u16)addr,