summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/kvm_cache_regs.h
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2020-05-01 21:32:30 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2020-05-13 12:15:10 -0400
commitf98c1e77127de7d9ff558570c25d02ef077df50f (patch)
tree047dd57f09a83b3abf03ad2a179f804ca53e113a /arch/x86/kvm/kvm_cache_regs.h
parent0cc69204e77275105d5b0fc4cf3c970e3579457f (diff)
downloadlinux-f98c1e77127de7d9ff558570c25d02ef077df50f.tar.bz2
KVM: VMX: Add proper cache tracking for CR4
Move CR4 caching into the standard register caching mechanism in order to take advantage of the availability checks provided by regs_avail. This avoids multiple VMREADs and retpolines (when configured) during nested VMX transitions as kvm_read_cr4_bits() is invoked multiple times on each transition, e.g. when stuffing CR0 and CR3. As an added bonus, this eliminates a kvm_x86_ops hook, saves a retpoline on SVM when reading CR4, and squashes the confusing naming discrepancy of "cache_reg" vs. "decache_cr4_guest_bits". No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200502043234.12481-7-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/kvm_cache_regs.h')
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 62558b9bdda7..921a539bcb96 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -129,8 +129,9 @@ static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
{
ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
- if (tmask & vcpu->arch.cr4_guest_owned_bits)
- kvm_x86_ops.decache_cr4_guest_bits(vcpu);
+ if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
+ !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
+ kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4);
return vcpu->arch.cr4 & mask;
}