summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2013-08-02 11:41:13 +0100
committerMarc Zyngier <marc.zyngier@arm.com>2013-10-29 18:25:25 +0000
commitd241aac798eb042e605f78c31a4122e583b2cd13 (patch)
tree0b2f77cfbf32cfd1ff8b46a302d4480d9cb5c8ae /arch/arm64/kvm
parent4a10c2ac2f368583138b774ca41fac4207911983 (diff)
downloadlinux-d241aac798eb042e605f78c31a4122e583b2cd13.tar.bz2
arm64: KVM: Yield CPU when vcpu executes a WFE
On an (even slightly) oversubscribed system, spinlocks are quickly becoming a bottleneck, as some vcpus are spinning, waiting for a lock to be released, while the vcpu holding the lock may not be running at all. The solution is to trap blocking WFEs and tell KVM that we're now spinning. This ensures that other vpus will get a scheduling boost, allowing the lock to be released more quickly. Also, using CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT slightly improves the performance when the VM is severely overcommited. Acked-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/Kconfig1
-rw-r--r--arch/arm64/kvm/handle_exit.c18
2 files changed, 14 insertions, 5 deletions
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 21e90820bd23..4480ab339a00 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -21,6 +21,7 @@ config KVM
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select ANON_INODES
+ select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO
select KVM_ARM_HOST
select KVM_ARM_VGIC
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 9beaca033437..8da56067c304 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -47,21 +47,29 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
/**
- * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
+ * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
+ * instruction executed by a guest
+ *
* @vcpu: the vcpu pointer
*
- * Simply call kvm_vcpu_block(), which will halt execution of
+ * WFE: Yield the CPU and come back to this vcpu when the scheduler
+ * decides to.
+ * WFI: Simply call kvm_vcpu_block(), which will halt execution of
* world-switches and schedule other host processes until there is an
* incoming IRQ or FIQ to the VM.
*/
-static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- kvm_vcpu_block(vcpu);
+ if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE)
+ kvm_vcpu_on_spin(vcpu);
+ else
+ kvm_vcpu_block(vcpu);
+
return 1;
}
static exit_handle_fn arm_exit_handlers[] = {
- [ESR_EL2_EC_WFI] = kvm_handle_wfi,
+ [ESR_EL2_EC_WFI] = kvm_handle_wfx,
[ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
[ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
[ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access,