summaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorOliver Upton <oupton@google.com>2022-05-04 03:24:40 +0000
committerMarc Zyngier <maz@kernel.org>2022-05-04 09:28:45 +0100
commit7b33a09d036ffd9a04506122840629c7e870cf08 (patch)
tree3ee3d0cf1d939f6c7ecfb1f02357bf7ec0ba54ae /arch/arm64
parent3fdd04592d38bb31a0bea567d9a66672b484bed3 (diff)
downloadlinux-7b33a09d036ffd9a04506122840629c7e870cf08.tar.bz2
KVM: arm64: Add support for userspace to suspend a vCPU
Introduce a new MP state, KVM_MP_STATE_SUSPENDED, which indicates a vCPU is in a suspended state. In the suspended state the vCPU will block until a wakeup event (pending interrupt) is recognized. Add a new system event type, KVM_SYSTEM_EVENT_WAKEUP, to indicate to userspace that KVM has recognized one such wakeup event. It is the responsibility of userspace to then make the vCPU runnable, or leave it suspended until the next wakeup event. Signed-off-by: Oliver Upton <oupton@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20220504032446.4133305-7-oupton@google.com
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/kvm_host.h1
-rw-r--r--arch/arm64/kvm/arm.c51
2 files changed, 52 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f3f93d48e21a..46027b9b80ca 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -46,6 +46,7 @@
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
+#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
KVM_DIRTY_LOG_INITIALLY_SET)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index efe54aba5cce..abd32a84ed7a 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -444,6 +444,18 @@ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_STOPPED;
}
+static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.mp_state.mp_state = KVM_MP_STATE_SUSPENDED;
+ kvm_make_request(KVM_REQ_SUSPEND, vcpu);
+ kvm_vcpu_kick(vcpu);
+}
+
+static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_SUSPENDED;
+}
+
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
@@ -464,6 +476,9 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
case KVM_MP_STATE_STOPPED:
kvm_arm_vcpu_power_off(vcpu);
break;
+ case KVM_MP_STATE_SUSPENDED:
+ kvm_arm_vcpu_suspend(vcpu);
+ break;
default:
ret = -EINVAL;
}
@@ -648,6 +663,39 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
preempt_enable();
}
+static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_arm_vcpu_suspended(vcpu))
+ return 1;
+
+ kvm_vcpu_wfi(vcpu);
+
+ /*
+ * The suspend state is sticky; we do not leave it until userspace
+ * explicitly marks the vCPU as runnable. Request that we suspend again
+ * later.
+ */
+ kvm_make_request(KVM_REQ_SUSPEND, vcpu);
+
+ /*
+ * Check to make sure the vCPU is actually runnable. If so, exit to
+ * userspace informing it of the wakeup condition.
+ */
+ if (kvm_arch_vcpu_runnable(vcpu)) {
+ memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
+ vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP;
+ vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+ return 0;
+ }
+
+ /*
+ * Otherwise, we were unblocked to process a different event, such as a
+ * pending signal. Return 1 and allow kvm_arch_vcpu_ioctl_run() to
+ * process the event.
+ */
+ return 1;
+}
+
/**
* check_vcpu_requests - check and handle pending vCPU requests
* @vcpu: the VCPU pointer
@@ -686,6 +734,9 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
kvm_pmu_handle_pmcr(vcpu,
__vcpu_sys_reg(vcpu, PMCR_EL0));
+
+ if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
+ return kvm_vcpu_suspend(vcpu);
}
return 1;