diff options
author | Michael Roth <michael.roth@amd.com> | 2021-02-02 13:01:25 -0600 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2021-02-04 05:27:34 -0500 |
commit | 553cc15f6e8d1467dc09a1fe6e51fcdea5f96471 (patch) | |
tree | b25a38fb3150a1a499bdf627600038b32eff7603 | |
parent | e79b91bb3c916a52ce823ab60489c717c925c49f (diff) | |
download | linux-553cc15f6e8d1467dc09a1fe6e51fcdea5f96471.tar.bz2 |
KVM: SVM: remove uneeded fields from host_save_users_msrs
Now that the set of host user MSRs that need to be individually
saved/restored are the same with/without SEV-ES, we can drop the
.sev_es_restored flag and just iterate through the list unconditionally
for both cases. A subsequent patch can then move these loops to a
common path.
Signed-off-by: Michael Roth <michael.roth@amd.com>
Message-Id: <20210202190126.2185715-3-michael.roth@amd.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/svm/sev.c | 16 | ||||
-rw-r--r-- | arch/x86/kvm/svm/svm.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/svm/svm.h | 7 |
3 files changed, 8 insertions, 21 deletions
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index a3e2b29f484d..87167ef8ca23 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2083,12 +2083,8 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu) * Certain MSRs are restored on VMEXIT, only save ones that aren't * restored. */ - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) { - if (host_save_user_msrs[i].sev_es_restored) - continue; - - rdmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]); - } + for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) + rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); /* XCR0 is restored on VMEXIT, save the current host value */ hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400); @@ -2109,12 +2105,8 @@ void sev_es_vcpu_put(struct vcpu_svm *svm) * Certain MSRs are restored on VMEXIT and were saved with vmsave in * sev_es_vcpu_load() above. Only restore ones that weren't. */ - for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) { - if (host_save_user_msrs[i].sev_es_restored) - continue; - - wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]); - } + for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) + wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); } void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index fc4c3cc60d69..8b2cbcb50239 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1428,8 +1428,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) sev_es_vcpu_load(svm, cpu); } else { for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) - rdmsrl(host_save_user_msrs[i].index, - svm->host_user_msrs[i]); + rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); vmsave(__sme_page_pa(sd->save_area)); } @@ -1464,8 +1463,7 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) sev_es_vcpu_put(svm); } else { for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) - wrmsrl(host_save_user_msrs[i].index, - svm->host_user_msrs[i]); + wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); } } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 2d227ba34368..42c1faaddd17 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -23,11 +23,8 @@ #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) -static const struct svm_host_save_msrs { - u32 index; /* Index of the MSR */ - bool sev_es_restored; /* True if MSR is restored on SEV-ES VMEXIT */ -} host_save_user_msrs[] = { - { .index = MSR_TSC_AUX, .sev_es_restored = false }, +static const u32 host_save_user_msrs[] = { + MSR_TSC_AUX, }; #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) |