summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx/vmx.h
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2018-12-03 13:53:07 -0800
committerPaolo Bonzini <pbonzini@redhat.com>2018-12-14 17:17:27 +0100
commit89b0c9f58350f6820f062ea12000e8a171177f3b (patch)
treefb16a10237cb096eb9ce234698e8e1a6a94c302d /arch/x86/kvm/vmx/vmx.h
parent75edce8a45486fe5fa5becdb43a7c36354b2a379 (diff)
downloadlinux-89b0c9f58350f6820f062ea12000e8a171177f3b.tar.bz2
KVM: VMX: Move VMX instruction wrappers to a dedicated header file
VMX has a few hundred lines of code just to wrap various VMX specific instructions, e.g. VMWREAD, INVVPID, etc... Move them to a dedicated header so it's easier to find/isolate the boilerplate. With this change, more inlines can be moved from vmx.c to vmx.h. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx/vmx.h')
-rw-r--r--arch/x86/kvm/vmx/vmx.h108
1 files changed, 108 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index c9aa150aa014..4c7fcf1571c3 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -7,6 +7,7 @@
#include <asm/kvm.h>
#include "capabilities.h"
+#include "ops.h"
#include "vmcs.h"
#define MSR_TYPE_R 1
@@ -312,6 +313,75 @@ static inline int pi_test_sn(struct pi_desc *pi_desc)
(unsigned long *)&pi_desc->control);
}
+static inline u8 vmx_get_rvi(void)
+{
+ return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
+}
+
+static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
+{
+ vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
+}
+
+static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
+{
+ vmcs_write32(VM_ENTRY_CONTROLS, val);
+ vmx->vm_entry_controls_shadow = val;
+}
+
+static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
+{
+ if (vmx->vm_entry_controls_shadow != val)
+ vm_entry_controls_init(vmx, val);
+}
+
+static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
+{
+ return vmx->vm_entry_controls_shadow;
+}
+
+static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
+}
+
+static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
+}
+
+static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
+{
+ vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
+}
+
+static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
+{
+ vmcs_write32(VM_EXIT_CONTROLS, val);
+ vmx->vm_exit_controls_shadow = val;
+}
+
+static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
+{
+ if (vmx->vm_exit_controls_shadow != val)
+ vm_exit_controls_init(vmx, val);
+}
+
+static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
+{
+ return vmx->vm_exit_controls_shadow;
+}
+
+static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
+}
+
+static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
+}
+
static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
{
vmx->segment_cache.bitmask = 0;
@@ -348,4 +418,42 @@ static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
return &(to_vmx(vcpu)->pi_desc);
}
+struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu);
+void free_vmcs(struct vmcs *vmcs);
+int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
+void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
+void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs);
+void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
+
+static inline struct vmcs *alloc_vmcs(bool shadow)
+{
+ return alloc_vmcs_cpu(shadow, raw_smp_processor_id());
+}
+
+u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
+
+static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
+ bool invalidate_gpa)
+{
+ if (enable_ept && (invalidate_gpa || !enable_vpid)) {
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
+ return;
+ ept_sync_context(construct_eptp(vcpu,
+ vcpu->arch.mmu->root_hpa));
+ } else {
+ vpid_sync_context(vpid);
+ }
+}
+
+static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
+{
+ __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
+}
+
+static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
+{
+ vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
+ vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
+}
+
#endif /* __KVM_X86_VMX_H */