summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2020-10-13 11:56:50 +0100
committerMarc Zyngier <maz@kernel.org>2020-11-10 08:34:23 +0000
commitc22588c99635ac4dace0ce2d55c1e2dc4f13cb54 (patch)
tree211c4ef56a7f3680df90bbb60f73645339a12cd3
parentf8394f232b1eab649ce2df5c5f15b0e528c92091 (diff)
downloadlinux-c22588c99635ac4dace0ce2d55c1e2dc4f13cb54.tar.bz2
KVM: arm64: Don't adjust PC on SError during SMC trap
On SMC trap, the prefered return address is set to that of the SMC instruction itself. It is thus wrong to try and roll it back when an SError occurs while trapping on SMC. It is still necessary on HVC though, as HVC doesn't cause a trap, and sets ELR to returning *after* the HVC. It also became apparent that there is no 16bit encoding for an AArch32 HVC instruction, meaning that the displacement is always 4 bytes, no matter what the ISA is. Take this opportunity to simplify it. Acked-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm64/kvm/handle_exit.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 5d690d60ccad..79a720657c47 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -245,15 +245,15 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
u8 esr_ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
/*
- * HVC/SMC already have an adjusted PC, which we need
- * to correct in order to return to after having
- * injected the SError.
+ * HVC already have an adjusted PC, which we need to
+ * correct in order to return to after having injected
+ * the SError.
+ *
+ * SMC, on the other hand, is *trapped*, meaning its
+ * preferred return address is the SMC itself.
*/
- if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64 ||
- esr_ec == ESR_ELx_EC_SMC32 || esr_ec == ESR_ELx_EC_SMC64) {
- u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
- *vcpu_pc(vcpu) -= adj;
- }
+ if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64)
+ *vcpu_pc(vcpu) -= 4;
return 1;
}