From 7ae2f3db6167aa7184529fdacd1de72619baf93b Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 30 May 2020 17:22:19 +0100 Subject: KVM: arm64: Flush the instruction cache if not unmapping the VM on reboot On a system with FWB, we don't need to unmap Stage-2 on reboot, as even if userspace takes this opportunity to repaint the whole of memory, FWB ensures that the data side stays consistent even if the guest uses non-cacheable mappings. However, the I-side is not necessarily coherent with the D-side if CTR_EL0.DIC is 0. In this case, invalidate the i-cache to preserve coherency. Reported-by: Alexandru Elisei Reviewed-by: Alexandru Elisei Fixes: 892713e97ca1 ("KVM: arm64: Sidestep stage2_unmap_vm() on vcpu reset when S2FWB is supported") Signed-off-by: Marc Zyngier --- arch/arm64/kvm/arm.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index b0b569f2cdd0..d6988401c22a 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -989,11 +989,17 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, * Ensure a rebooted VM will fault in RAM pages and detect if the * guest MMU is turned off and flush the caches as needed. * - * S2FWB enforces all memory accesses to RAM being cacheable, we - * ensure that the cache is always coherent. + * S2FWB enforces all memory accesses to RAM being cacheable, + * ensuring that the data side is always coherent. We still + * need to invalidate the I-cache though, as FWB does *not* + * imply CTR_EL0.DIC. */ - if (vcpu->arch.has_run_once && !cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) - stage2_unmap_vm(vcpu->kvm); + if (vcpu->arch.has_run_once) { + if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) + stage2_unmap_vm(vcpu->kvm); + else + __flush_icache_all(); + } vcpu_reset_hcr(vcpu); -- cgit v1.2.3 From 7c582bf4ed84f3eb58bdd1f63024a14c17551e7d Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 29 May 2020 15:06:54 +0000 Subject: KVM: arm64: Stop writing aarch32's CSSELR into ACTLR aarch32 has pairs of registers to access the high and low parts of 64bit registers. KVM has a union of 64bit sys_regs[] and 32bit copro[]. The 32bit accessors read the high or low part of the 64bit sys_reg[] value through the union. Both sys_reg_descs[] and cp15_regs[] list access_csselr() as the accessor for CSSELR{,_EL1}. access_csselr() is only aware of the 64bit sys_regs[], and expects r->reg to be 'CSSELR_EL1' in the enum, index 2 of the 64bit array. cp15_regs[] uses the 32bit copro[] alias of sys_regs[]. Here CSSELR is c0_CSSELR which is the same location in sys_reg[]. r->reg is 'c0_CSSELR', index 4 in the 32bit array. access_csselr() uses the 32bit r->reg value to access the 64bit array, so reads and write the wrong value. sys_regs[4], is ACTLR_EL1, which is subsequently save/restored when we enter the guest. ACTLR_EL1 is supposed to be read-only for the guest. This register only affects execution at EL1, and the host's value is restored before we return to host EL1. Convert the 32bit register index back to the 64bit version. Suggested-by: Marc Zyngier Signed-off-by: James Morse Signed-off-by: Marc Zyngier Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200529150656.7339-2-james.morse@arm.com --- arch/arm64/kvm/sys_regs.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index ad1d57501d6d..12f8d57a3cb8 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1319,10 +1319,16 @@ static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { + int reg = r->reg; + + /* See the 32bit mapping in kvm_host.h */ + if (p->is_aarch32) + reg = r->reg / 2; + if (p->is_write) - vcpu_write_sys_reg(vcpu, p->regval, r->reg); + vcpu_write_sys_reg(vcpu, p->regval, reg); else - p->regval = vcpu_read_sys_reg(vcpu, r->reg); + p->regval = vcpu_read_sys_reg(vcpu, reg); return true; } -- cgit v1.2.3 From ef5a294be8d0e17b91d23be905f69368b0d3952e Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 29 May 2020 15:06:55 +0000 Subject: KVM: arm64: Add emulation for 32bit guests accessing ACTLR2 ACTLR_EL1 is a 64bit register while the 32bit ACTLR is obviously 32bit. For 32bit software, the extra bits are accessible via ACTLR2... which KVM doesn't emulate. Suggested-by: Marc Zyngier Signed-off-by: James Morse Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200529150656.7339-3-james.morse@arm.com --- arch/arm64/kvm/sys_regs_generic_v8.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c index 9cb6b4c8355a..aa9d356451eb 100644 --- a/arch/arm64/kvm/sys_regs_generic_v8.c +++ b/arch/arm64/kvm/sys_regs_generic_v8.c @@ -27,6 +27,14 @@ static bool access_actlr(struct kvm_vcpu *vcpu, return ignore_write(vcpu, p); p->regval = vcpu_read_sys_reg(vcpu, ACTLR_EL1); + + if (p->is_aarch32) { + if (r->Op2 & 2) + p->regval = upper_32_bits(p->regval); + else + p->regval = lower_32_bits(p->regval); + } + return true; } @@ -47,6 +55,8 @@ static const struct sys_reg_desc genericv8_cp15_regs[] = { /* ACTLR */ { Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001), access_actlr }, + { Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b011), + access_actlr }, }; static struct kvm_sys_reg_target_table genericv8_target_table = { -- cgit v1.2.3 From e8679fedd026eb3b4655af83829d9036e32c9b97 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 29 May 2020 15:06:56 +0000 Subject: KVM: arm64: Stop save/restoring ACTLR_EL1 KVM sets HCR_EL2.TACR via HCR_GUEST_FLAGS. This means ACTLR* accesses from the guest are always trapped, and always return the value in the sys_regs array. The guest can't change the value of these registers, so we are save restoring the reset value, which came from the host. Stop save/restoring this register. Keep the storage for this register in sys_regs[] as this is how the value is exposed to user-space, removing it would break migration. Signed-off-by: James Morse Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200529150656.7339-4-james.morse@arm.com --- arch/arm64/kvm/hyp/sysreg-sr.c | 2 -- arch/arm64/kvm/sys_regs.c | 2 -- 2 files changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 6d2df9fe0b5d..0d60e6ee96b2 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -39,7 +39,6 @@ static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) { ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR); - ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(SYS_CPACR); ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(SYS_TTBR0); ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(SYS_TTBR1); @@ -122,7 +121,6 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) isb(); } - write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR); write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0); write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 12f8d57a3cb8..dfb966f3863a 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -78,7 +78,6 @@ static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val) switch (reg) { case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break; case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break; - case ACTLR_EL1: *val = read_sysreg_s(SYS_ACTLR_EL1); break; case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break; case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break; case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break; @@ -118,7 +117,6 @@ static bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg) switch (reg) { case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break; case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break; - case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); break; case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break; case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break; case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break; -- cgit v1.2.3 From ef3e40a7ea8dbe2abd0a345032cd7d5023b9684f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 3 Jun 2020 18:24:01 +0100 Subject: KVM: arm64: Save the host's PtrAuth keys in non-preemptible context When using the PtrAuth feature in a guest, we need to save the host's keys before allowing the guest to program them. For that, we dump them in a per-CPU data structure (the so called host context). But both call sites that do this are in preemptible context, which may end up in disaster should the vcpu thread get preempted before reentering the guest. Instead, save the keys eagerly on each vcpu_load(). This has an increased overhead, but is at least safe. Cc: stable@vger.kernel.org Reviewed-by: Mark Rutland Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_emulate.h | 6 ------ arch/arm64/kvm/arm.c | 18 +++++++++++++++++- arch/arm64/kvm/handle_exit.c | 19 ++----------------- 3 files changed, 19 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index a30b4eec7cb4..977843e4d5fb 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -112,12 +112,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); } -static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) -{ - if (vcpu_has_ptrauth(vcpu)) - vcpu_ptrauth_disable(vcpu); -} - static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) { return vcpu->arch.vsesr_el2; diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index d6988401c22a..152049c5055d 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -337,6 +337,12 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) preempt_enable(); } +#define __ptrauth_save_key(regs, key) \ +({ \ + regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ + regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ +}) + void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { int *last_ran; @@ -370,7 +376,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) else vcpu_set_wfx_traps(vcpu); - vcpu_ptrauth_setup_lazy(vcpu); + if (vcpu_has_ptrauth(vcpu)) { + struct kvm_cpu_context *ctxt = vcpu->arch.host_cpu_context; + + __ptrauth_save_key(ctxt->sys_regs, APIA); + __ptrauth_save_key(ctxt->sys_regs, APIB); + __ptrauth_save_key(ctxt->sys_regs, APDA); + __ptrauth_save_key(ctxt->sys_regs, APDB); + __ptrauth_save_key(ctxt->sys_regs, APGA); + + vcpu_ptrauth_disable(vcpu); + } } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index eb194696ef62..065251efa2e6 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -162,31 +162,16 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) return 1; } -#define __ptrauth_save_key(regs, key) \ -({ \ - regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ - regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ -}) - /* * Handle the guest trying to use a ptrauth instruction, or trying to access a * ptrauth register. */ void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu) { - struct kvm_cpu_context *ctxt; - - if (vcpu_has_ptrauth(vcpu)) { + if (vcpu_has_ptrauth(vcpu)) vcpu_ptrauth_enable(vcpu); - ctxt = vcpu->arch.host_cpu_context; - __ptrauth_save_key(ctxt->sys_regs, APIA); - __ptrauth_save_key(ctxt->sys_regs, APIB); - __ptrauth_save_key(ctxt->sys_regs, APDA); - __ptrauth_save_key(ctxt->sys_regs, APDB); - __ptrauth_save_key(ctxt->sys_regs, APGA); - } else { + else kvm_inject_undefined(vcpu); - } } /* -- cgit v1.2.3 From 29eb5a3c57f7e06d803bb44a0ce2f9ed79f39cd9 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 4 Jun 2020 11:14:00 +0100 Subject: KVM: arm64: Handle PtrAuth traps early The current way we deal with PtrAuth is a bit heavy handed: - We forcefully save the host's keys on each vcpu_load() - Handling the PtrAuth trap forces us to go all the way back to the exit handling code to just set the HCR bits Overall, this is pretty cumbersome. A better approach would be to handle it the same way we deal with the FPSIMD registers: - On vcpu_load() disable PtrAuth for the guest - On first use, save the host's keys, enable PtrAuth in the guest Crucially, this can happen as a fixup, which is done very early on exit. We can then reenter the guest immediately without leaving the hypervisor role. Another thing is that it simplify the rest of the host handling: exiting all the way to the host means that the only possible outcome for this trap is to inject an UNDEF. Reviewed-by: Mark Rutland Signed-off-by: Marc Zyngier --- arch/arm64/kvm/arm.c | 17 +----------- arch/arm64/kvm/handle_exit.c | 17 +++--------- arch/arm64/kvm/hyp/switch.c | 61 ++++++++++++++++++++++++++++++++++++++++++++ arch/arm64/kvm/sys_regs.c | 13 ++++------ 4 files changed, 70 insertions(+), 38 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 152049c5055d..14b747266607 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -337,12 +337,6 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) preempt_enable(); } -#define __ptrauth_save_key(regs, key) \ -({ \ - regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ - regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ -}) - void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { int *last_ran; @@ -376,17 +370,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) else vcpu_set_wfx_traps(vcpu); - if (vcpu_has_ptrauth(vcpu)) { - struct kvm_cpu_context *ctxt = vcpu->arch.host_cpu_context; - - __ptrauth_save_key(ctxt->sys_regs, APIA); - __ptrauth_save_key(ctxt->sys_regs, APIB); - __ptrauth_save_key(ctxt->sys_regs, APDA); - __ptrauth_save_key(ctxt->sys_regs, APDB); - __ptrauth_save_key(ctxt->sys_regs, APGA); - + if (vcpu_has_ptrauth(vcpu)) vcpu_ptrauth_disable(vcpu); - } } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 065251efa2e6..5a02d4c90559 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -162,25 +162,14 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) return 1; } -/* - * Handle the guest trying to use a ptrauth instruction, or trying to access a - * ptrauth register. - */ -void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu) -{ - if (vcpu_has_ptrauth(vcpu)) - vcpu_ptrauth_enable(vcpu); - else - kvm_inject_undefined(vcpu); -} - /* * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into - * a NOP). + * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all + * that we can do is give the guest an UNDEF. */ static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run) { - kvm_arm_vcpu_ptrauth_trap(vcpu); + kvm_inject_undefined(vcpu); return 1; } diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index c07a45643cd4..d60c2ef0fe8c 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -490,6 +490,64 @@ static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu) return true; } +static bool __hyp_text esr_is_ptrauth_trap(u32 esr) +{ + u32 ec = ESR_ELx_EC(esr); + + if (ec == ESR_ELx_EC_PAC) + return true; + + if (ec != ESR_ELx_EC_SYS64) + return false; + + switch (esr_sys64_to_sysreg(esr)) { + case SYS_APIAKEYLO_EL1: + case SYS_APIAKEYHI_EL1: + case SYS_APIBKEYLO_EL1: + case SYS_APIBKEYHI_EL1: + case SYS_APDAKEYLO_EL1: + case SYS_APDAKEYHI_EL1: + case SYS_APDBKEYLO_EL1: + case SYS_APDBKEYHI_EL1: + case SYS_APGAKEYLO_EL1: + case SYS_APGAKEYHI_EL1: + return true; + } + + return false; +} + +#define __ptrauth_save_key(regs, key) \ +({ \ + regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ + regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ +}) + +static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *ctxt; + u64 val; + + if (!vcpu_has_ptrauth(vcpu) || + !esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu))) + return false; + + ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + __ptrauth_save_key(ctxt->sys_regs, APIA); + __ptrauth_save_key(ctxt->sys_regs, APIB); + __ptrauth_save_key(ctxt->sys_regs, APDA); + __ptrauth_save_key(ctxt->sys_regs, APDB); + __ptrauth_save_key(ctxt->sys_regs, APGA); + + vcpu_ptrauth_enable(vcpu); + + val = read_sysreg(hcr_el2); + val |= (HCR_API | HCR_APK); + write_sysreg(val, hcr_el2); + + return true; +} + /* * Return true when we were able to fixup the guest exit and should return to * the guest, false when we should restore the host state and return to the @@ -524,6 +582,9 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) if (__hyp_handle_fpsimd(vcpu)) return true; + if (__hyp_handle_ptrauth(vcpu)) + return true; + if (!__populate_fault_info(vcpu)) return true; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index ad1d57501d6d..564995084cf8 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1034,16 +1034,13 @@ static bool trap_ptrauth(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { - kvm_arm_vcpu_ptrauth_trap(vcpu); - /* - * Return false for both cases as we never skip the trapped - * instruction: - * - * - Either we re-execute the same key register access instruction - * after enabling ptrauth. - * - Or an UNDEF is injected as ptrauth is not supported/enabled. + * If we land here, that is because we didn't fixup the access on exit + * by allowing the PtrAuth sysregs. The only way this happens is when + * the guest does not have PtrAuth support enabled. */ + kvm_inject_undefined(vcpu); + return false; } -- cgit v1.2.3 From b990d37fdf6781fd0907ffd14d0dff16b5d58ffa Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 7 Jun 2020 10:55:28 +0100 Subject: KVM: arm64: Stop sparse from moaning at __hyp_this_cpu_ptr Sparse complains that __hyp_this_cpu_ptr() returns something that is flagged noderef and not in the correct address space (both being the result of the __percpu annotation). Pretend that __hyp_this_cpu_ptr() knows what it is doing by forcefully casting the pointer with __kernel __force. Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_asm.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 0c9b5fc4ba0a..d9b7da15dbca 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -81,12 +81,19 @@ extern u32 __kvm_get_mdcr_el2(void); extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; -/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */ +/* + * Home-grown __this_cpu_{ptr,read} variants that always work at HYP, + * provided that sym is really a *symbol* and not a pointer obtained from + * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps + * sparse quiet. + */ #define __hyp_this_cpu_ptr(sym) \ ({ \ - void *__ptr = hyp_symbol_addr(sym); \ + void *__ptr; \ + __verify_pcpu_ptr(&sym); \ + __ptr = hyp_symbol_addr(sym); \ __ptr += read_sysreg(tpidr_el2); \ - (typeof(&sym))__ptr; \ + (typeof(sym) __kernel __force *)__ptr; \ }) #define __hyp_this_cpu_read(sym) \ -- cgit v1.2.3 From 07da1ffaa1373f99331712faa67a00b5b807dfe8 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 5 Jun 2020 14:08:13 +0100 Subject: KVM: arm64: Remove host_cpu_context member from vcpu structure For very long, we have kept this pointer back to the per-cpu host state, despite having working per-cpu accessors at EL2 for some time now. Recent investigations have shown that this pointer is easy to abuse in preemptible context, which is a sure sign that it would better be gone. Not to mention that a per-cpu pointer is faster to access at all times. Reported-by: Andrew Scull Acked-by: Mark Rutland Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 3 --- arch/arm64/kvm/arm.c | 3 --- arch/arm64/kvm/hyp/debug-sr.c | 4 ++-- arch/arm64/kvm/hyp/switch.c | 6 +++--- arch/arm64/kvm/hyp/sysreg-sr.c | 6 ++++-- arch/arm64/kvm/pmu.c | 8 ++------ 6 files changed, 11 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 59029e90b557..ada1faa92211 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -284,9 +284,6 @@ struct kvm_vcpu_arch { struct kvm_guest_debug_arch vcpu_debug_state; struct kvm_guest_debug_arch external_debug_state; - /* Pointer to host CPU context */ - struct kvm_cpu_context *host_cpu_context; - struct thread_info *host_thread_info; /* hyp VA */ struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 14b747266607..6ddaa23ef346 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -340,10 +340,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { int *last_ran; - kvm_host_data_t *cpu_data; last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); - cpu_data = this_cpu_ptr(&kvm_host_data); /* * We might get preempted before the vCPU actually runs, but @@ -355,7 +353,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } vcpu->cpu = cpu; - vcpu->arch.host_cpu_context = &cpu_data->host_ctxt; kvm_vgic_load(vcpu); kvm_timer_vcpu_load(vcpu); diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c index 0fc9872a1467..e95af204fec7 100644 --- a/arch/arm64/kvm/hyp/debug-sr.c +++ b/arch/arm64/kvm/hyp/debug-sr.c @@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu) if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) return; - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; guest_ctxt = &vcpu->arch.ctxt; host_dbg = &vcpu->arch.host_debug_state.regs; guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); @@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu) if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)) return; - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; guest_ctxt = &vcpu->arch.ctxt; host_dbg = &vcpu->arch.host_debug_state.regs; guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index d60c2ef0fe8c..1853c1788e0c 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -532,7 +532,7 @@ static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) !esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu))) return false; - ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; __ptrauth_save_key(ctxt->sys_regs, APIA); __ptrauth_save_key(ctxt->sys_regs, APIB); __ptrauth_save_key(ctxt->sys_regs, APDA); @@ -703,7 +703,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) struct kvm_cpu_context *guest_ctxt; u64 exit_code; - host_ctxt = vcpu->arch.host_cpu_context; + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; host_ctxt->__hyp_running_vcpu = vcpu; guest_ctxt = &vcpu->arch.ctxt; @@ -808,7 +808,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) vcpu = kern_hyp_va(vcpu); - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; host_ctxt->__hyp_running_vcpu = vcpu; guest_ctxt = &vcpu->arch.ctxt; diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 6d2df9fe0b5d..143d7b7358f2 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -265,12 +265,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu) */ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) { - struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context; struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; + struct kvm_cpu_context *host_ctxt; if (!has_vhe()) return; + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; __sysreg_save_user_state(host_ctxt); /* @@ -301,12 +302,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) */ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) { - struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context; struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; + struct kvm_cpu_context *host_ctxt; if (!has_vhe()) return; + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; deactivate_traps_vhe_put(); __sysreg_save_el1_state(guest_ctxt); diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index e71d00bb5271..b5ae3a5d509e 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -163,15 +163,13 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events) */ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) { - struct kvm_cpu_context *host_ctxt; struct kvm_host_data *host; u32 events_guest, events_host; if (!has_vhe()) return; - host_ctxt = vcpu->arch.host_cpu_context; - host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + host = this_cpu_ptr(&kvm_host_data); events_guest = host->pmu_events.events_guest; events_host = host->pmu_events.events_host; @@ -184,15 +182,13 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) */ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) { - struct kvm_cpu_context *host_ctxt; struct kvm_host_data *host; u32 events_guest, events_host; if (!has_vhe()) return; - host_ctxt = vcpu->arch.host_cpu_context; - host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); + host = this_cpu_ptr(&kvm_host_data); events_guest = host->pmu_events.events_guest; events_host = host->pmu_events.events_host; -- cgit v1.2.3 From 3204be4109ad681523e3461ce64454c79278450a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Jun 2020 08:40:35 +0100 Subject: KVM: arm64: Make vcpu_cp1x() work on Big Endian hosts AArch32 CP1x registers are overlayed on their AArch64 counterparts in the vcpu struct. This leads to an interesting problem as they are stored in their CPU-local format, and thus a CP1x register doesn't "hit" the lower 32bit portion of the AArch64 register on a BE host. To workaround this unfortunate situation, introduce a bias trick in the vcpu_cp1x() accessors which picks the correct half of the 64bit register. Cc: stable@vger.kernel.org Reported-by: James Morse Tested-by: James Morse Acked-by: James Morse Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 59029e90b557..521eaedfdcc3 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -404,8 +404,10 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); * CP14 and CP15 live in the same array, as they are backed by the * same system registers. */ -#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)]) -#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)]) +#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) + +#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) +#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) struct kvm_vm_stat { ulong remote_tlb_flush; -- cgit v1.2.3 From 0370964dd3ff7d3d406f292cb443a927952cbd05 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 9 Jun 2020 08:50:29 +0100 Subject: KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception On a VHE system, the EL1 state is left in the CPU most of the time, and only syncronized back to memory when vcpu_put() is called (most of the time on preemption). Which means that when injecting an exception, we'd better have a way to either: (1) write directly to the EL1 sysregs (2) synchronize the state back to memory, and do the changes there For an AArch64, we already do (1), so we are safe. Unfortunately, doing the same thing for AArch32 would be pretty invasive. Instead, we can easily implement (2) by calling the put/load architectural backends, and keep preemption disabled. We can then reload the state back into EL1. Cc: stable@vger.kernel.org Reported-by: James Morse Signed-off-by: Marc Zyngier --- arch/arm64/kvm/aarch32.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kvm/aarch32.c b/arch/arm64/kvm/aarch32.c index 0a356aa91aa1..40a62a99fbf8 100644 --- a/arch/arm64/kvm/aarch32.c +++ b/arch/arm64/kvm/aarch32.c @@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = { [7] = { 4, 4 }, /* FIQ, unused */ }; +static bool pre_fault_synchronize(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + if (vcpu->arch.sysregs_loaded_on_cpu) { + kvm_arch_vcpu_put(vcpu); + return true; + } + + preempt_enable(); + return false; +} + +static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded) +{ + if (loaded) { + kvm_arch_vcpu_load(vcpu, smp_processor_id()); + preempt_enable(); + } +} + /* * When an exception is taken, most CPSR fields are left unchanged in the * handler. However, some are explicitly overridden (e.g. M[4:0]). @@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) void kvm_inject_undef32(struct kvm_vcpu *vcpu) { + bool loaded = pre_fault_synchronize(vcpu); + prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4); + post_fault_synchronize(vcpu, loaded); } /* @@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 vect_offset; u32 *far, *fsr; bool is_lpae; + bool loaded; + + loaded = pre_fault_synchronize(vcpu); if (is_pabt) { vect_offset = 12; @@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, /* no need to shuffle FS[4] into DFSR[10] as its 0 */ *fsr = DFSR_FSC_EXTABT_nLPAE; } + + post_fault_synchronize(vcpu, loaded); } void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr) -- cgit v1.2.3 From 304e2989c93e941fa55b38c59c975d4acfb6e4a2 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 10 Jun 2020 16:27:46 +0100 Subject: KVM: arm64: Move hyp_symbol_addr() to kvm_asm.h Recent refactoring of the arm64 code make it awkward to have hyp_symbol_addr() in kvm_mmu.h. Instead, move it next to its main user, which is __hyp_this_cpu_ptr(). Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_asm.h | 20 ++++++++++++++++++++ arch/arm64/include/asm/kvm_mmu.h | 20 -------------------- 2 files changed, 20 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index d9b7da15dbca..352aaebf4198 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -81,6 +81,26 @@ extern u32 __kvm_get_mdcr_el2(void); extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; +/* + * Obtain the PC-relative address of a kernel symbol + * s: symbol + * + * The goal of this macro is to return a symbol's address based on a + * PC-relative computation, as opposed to a loading the VA from a + * constant pool or something similar. This works well for HYP, as an + * absolute VA is guaranteed to be wrong. Only use this if trying to + * obtain the address of a symbol (i.e. not something you obtained by + * following a pointer). + */ +#define hyp_symbol_addr(s) \ + ({ \ + typeof(s) *addr; \ + asm("adrp %0, %1\n" \ + "add %0, %0, :lo12:%1\n" \ + : "=r" (addr) : "S" (&s)); \ + addr; \ + }) + /* * Home-grown __this_cpu_{ptr,read} variants that always work at HYP, * provided that sym is really a *symbol* and not a pointer obtained from diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 53bd4d517a4d..df485840005c 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -107,26 +107,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) -/* - * Obtain the PC-relative address of a kernel symbol - * s: symbol - * - * The goal of this macro is to return a symbol's address based on a - * PC-relative computation, as opposed to a loading the VA from a - * constant pool or something similar. This works well for HYP, as an - * absolute VA is guaranteed to be wrong. Only use this if trying to - * obtain the address of a symbol (i.e. not something you obtained by - * following a pointer). - */ -#define hyp_symbol_addr(s) \ - ({ \ - typeof(s) *addr; \ - asm("adrp %0, %1\n" \ - "add %0, %0, :lo12:%1\n" \ - : "=r" (addr) : "S" (&s)); \ - addr; \ - }) - /* * We currently support using a VM-specified IPA size. For backward * compatibility, the default IPA size is fixed to 40bits. -- cgit v1.2.3