summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx/nested.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-01-26 11:10:14 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-01-26 11:10:14 -0800
commit4992eb41ab1b5d08479193afdc94f9678e5ded13 (patch)
tree923ade0380fb76883019095a07b3a242b3025fe8 /arch/x86/kvm/vmx/nested.c
parentc7230a48ed5ebdda54867816303e974c154841d1 (diff)
parent9a78e15802a87de2b08dfd1bd88e855201d2c8fa (diff)
downloadlinux-4992eb41ab1b5d08479193afdc94f9678e5ded13.tar.bz2
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: - x86 bugfixes - Documentation fixes - Avoid performance regression due to SEV-ES patches - ARM: - Don't allow tagged pointers to point to memslots - Filter out ARMv8.1+ PMU events on v8.0 hardware - Hide PMU registers from userspace when no PMU is configured - More PMU cleanups - Don't try to handle broken PSCI firmware - More sys_reg() to reg_to_encoding() conversions * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86: allow KVM_REQ_GET_NESTED_STATE_PAGES outside guest mode for VMX KVM: x86: Revert "KVM: x86: Mark GPRs dirty when written" KVM: SVM: Unconditionally sync GPRs to GHCB on VMRUN of SEV-ES guest KVM: nVMX: Sync unsync'd vmcs02 state to vmcs12 on migration kvm: tracing: Fix unmatched kvm_entry and kvm_exit events KVM: Documentation: Update description of KVM_{GET,CLEAR}_DIRTY_LOG KVM: x86: get smi pending status correctly KVM: x86/pmu: Fix HW_REF_CPU_CYCLES event pseudo-encoding in intel_arch_events[] KVM: x86/pmu: Fix UBSAN shift-out-of-bounds warning in intel_pmu_refresh() KVM: x86: Add more protection against undefined behavior in rsvd_bits() KVM: Documentation: Fix spec for KVM_CAP_ENABLE_CAP_VM KVM: Forbid the use of tagged userspace addresses for memslots KVM: arm64: Filter out v8.1+ events on v8.0 HW KVM: arm64: Compute TPIDR_EL2 ignoring MTE tag KVM: arm64: Use the reg_to_encoding() macro instead of sys_reg() KVM: arm64: Allow PSCI SYSTEM_OFF/RESET to return KVM: arm64: Simplify handling of absent PMU system registers KVM: arm64: Hide PMU registers from userspace when not available
Diffstat (limited to 'arch/x86/kvm/vmx/nested.c')
-rw-r--r--arch/x86/kvm/vmx/nested.c44
1 files changed, 33 insertions, 11 deletions
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 0fbb46990dfc..f2b9bfb58206 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3124,13 +3124,9 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
return 0;
}
-static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct kvm_host_map *map;
- struct page *page;
- u64 hpa;
/*
* hv_evmcs may end up being not mapped after migration (when
@@ -3153,6 +3149,17 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
}
}
+ return true;
+}
+
+static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_host_map *map;
+ struct page *page;
+ u64 hpa;
+
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
/*
* Translate L1 physical address to host physical
@@ -3221,6 +3228,18 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
else
exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
+
+ return true;
+}
+
+static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
+{
+ if (!nested_get_evmcs_page(vcpu))
+ return false;
+
+ if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
+ return false;
+
return true;
}
@@ -6077,11 +6096,14 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
if (is_guest_mode(vcpu)) {
sync_vmcs02_to_vmcs12(vcpu, vmcs12);
sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
- } else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
- if (vmx->nested.hv_evmcs)
- copy_enlightened_to_vmcs12(vmx);
- else if (enable_shadow_vmcs)
- copy_shadow_to_vmcs12(vmx);
+ } else {
+ copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
+ if (!vmx->nested.need_vmcs12_to_shadow_sync) {
+ if (vmx->nested.hv_evmcs)
+ copy_enlightened_to_vmcs12(vmx);
+ else if (enable_shadow_vmcs)
+ copy_shadow_to_vmcs12(vmx);
+ }
}
BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
@@ -6602,7 +6624,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
.hv_timer_pending = nested_vmx_preemption_timer_pending,
.get_state = vmx_get_nested_state,
.set_state = vmx_set_nested_state,
- .get_nested_state_pages = nested_get_vmcs12_pages,
+ .get_nested_state_pages = vmx_get_nested_state_pages,
.write_log_dirty = nested_vmx_write_pml_buffer,
.enable_evmcs = nested_enable_evmcs,
.get_evmcs_version = nested_get_evmcs_version,