diff options
| author | Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> | 2012-07-19 15:17:52 +0530 | 
|---|---|---|
| committer | Avi Kivity <avi@redhat.com> | 2012-07-23 13:02:37 +0300 | 
| commit | 06e48c510aa37f6e791602e6420422ea7071fe94 (patch) | |
| tree | 083e13a1a1b58d32369adc6eaa42c2a22d17d95d /virt | |
| parent | 4c088493c8d07e4e27bad53a99dcfdc14cdf45f8 (diff) | |
| download | linux-06e48c510aa37f6e791602e6420422ea7071fe94.tar.bz2 | |
KVM: Choose better candidate for directed yield
Currently, on a large vcpu guests, there is a high probability of
yielding to the same vcpu who had recently done a pause-loop exit or
cpu relax intercepted. Such a yield can lead to the vcpu spinning
again and hence degrade the performance.
The patchset keeps track of the pause loop exit/cpu relax interception
and gives chance to a vcpu which:
 (a) Has not done pause loop exit or cpu relax intercepted at all
     (probably he is preempted lock-holder)
 (b) Was skipped in last iteration because it did pause loop exit or
     cpu relax intercepted, and probably has become eligible now
     (next eligible lock holder)
Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> # on s390x
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt')
| -rw-r--r-- | virt/kvm/kvm_main.c | 42 | 
1 files changed, 42 insertions, 0 deletions
| diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0892b75eeedd..1e10ebe1a370 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1579,6 +1579,43 @@ bool kvm_vcpu_yield_to(struct kvm_vcpu *target)  }  EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); +#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT +/* + * Helper that checks whether a VCPU is eligible for directed yield. + * Most eligible candidate to yield is decided by following heuristics: + * + *  (a) VCPU which has not done pl-exit or cpu relax intercepted recently + *  (preempted lock holder), indicated by @in_spin_loop. + *  Set at the beiginning and cleared at the end of interception/PLE handler. + * + *  (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get + *  chance last time (mostly it has become eligible now since we have probably + *  yielded to lockholder in last iteration. This is done by toggling + *  @dy_eligible each time a VCPU checked for eligibility.) + * + *  Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding + *  to preempted lock-holder could result in wrong VCPU selection and CPU + *  burning. Giving priority for a potential lock-holder increases lock + *  progress. + * + *  Since algorithm is based on heuristics, accessing another VCPU data without + *  locking does not harm. It may result in trying to yield to  same VCPU, fail + *  and continue with next VCPU and so on. + */ +bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) +{ +	bool eligible; + +	eligible = !vcpu->spin_loop.in_spin_loop || +			(vcpu->spin_loop.in_spin_loop && +			 vcpu->spin_loop.dy_eligible); + +	if (vcpu->spin_loop.in_spin_loop) +		kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible); + +	return eligible; +} +#endif  void kvm_vcpu_on_spin(struct kvm_vcpu *me)  {  	struct kvm *kvm = me->kvm; @@ -1607,6 +1644,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)  				continue;  			if (waitqueue_active(&vcpu->wq))  				continue; +			if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) +				continue;  			if (kvm_vcpu_yield_to(vcpu)) {  				kvm->last_boosted_vcpu = i;  				yielded = 1; @@ -1615,6 +1654,9 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)  		}  	}  	kvm_vcpu_set_in_spin_loop(me, false); + +	/* Ensure vcpu is not eligible during next spinloop */ +	kvm_vcpu_set_dy_eligible(me, false);  }  EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin); |