summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/handle_exit.c
diff options
context:
space:
mode:
authorJames Morse <james.morse@arm.com>2018-01-15 19:39:04 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2018-01-16 15:09:13 +0000
commit3368bd809764d3ef0810e16c1e1531fec32e8d8e (patch)
tree7a7baf0049b8dd2b1d8b22a7a89ce3bbe80b7e37 /arch/arm64/kvm/handle_exit.c
parentc60590b552bdf682043579b9b965e6224fbf65d9 (diff)
downloadlinux-3368bd809764d3ef0810e16c1e1531fec32e8d8e.tar.bz2
KVM: arm64: Handle RAS SErrors from EL1 on guest exit
We expect to have firmware-first handling of RAS SErrors, with errors notified via an APEI method. For systems without firmware-first, add some minimal handling to KVM. There are two ways KVM can take an SError due to a guest, either may be a RAS error: we exit the guest due to an SError routed to EL2 by HCR_EL2.AMO, or we take an SError from EL2 when we unmask PSTATE.A from __guest_exit. For SError that interrupt a guest and are routed to EL2 the existing behaviour is to inject an impdef SError into the guest. Add code to handle RAS SError based on the ESR. For uncontained and uncategorized errors arm64_is_fatal_ras_serror() will panic(), these errors compromise the host too. All other error types are contained: For the fatal errors the vCPU can't make progress, so we inject a virtual SError. We ignore contained errors where we can make progress as if we're lucky, we may not hit them again. If only some of the CPUs support RAS the guest will see the cpufeature sanitised version of the id registers, but we may still take RAS SError on this CPU. Move the SError handling out of handle_exit() into a new handler that runs before we can be preempted. This allows us to use this_cpu_has_cap(), via arm64_is_ras_serror(). Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: James Morse <james.morse@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/kvm/handle_exit.c')
-rw-r--r--arch/arm64/kvm/handle_exit.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 304203fa9e33..6a5a5db4292f 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -29,12 +29,19 @@
#include <asm/kvm_mmu.h>
#include <asm/kvm_psci.h>
#include <asm/debug-monitors.h>
+#include <asm/traps.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
+{
+ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
+ kvm_inject_vabt(vcpu);
+}
+
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
@@ -252,7 +259,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
case ARM_EXCEPTION_IRQ:
return 1;
case ARM_EXCEPTION_EL1_SERROR:
- kvm_inject_vabt(vcpu);
/* We may still need to return for single-step */
if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
&& kvm_arm_handle_step_debug(vcpu, run))
@@ -275,3 +281,13 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
return 0;
}
}
+
+/* For exit types that need handling before we can be preempted */
+void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index)
+{
+ exception_index = ARM_EXCEPTION_CODE(exception_index);
+
+ if (exception_index == ARM_EXCEPTION_EL1_SERROR)
+ kvm_handle_guest_serror(vcpu, kvm_vcpu_get_hsr(vcpu));
+}