summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-08-21 11:48:38 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-08-21 11:48:38 -0700
commitbb7ba8069de933d69cb45dd0a5806b61033796a3 (patch)
treea6efe79c2ec068f0445206aedde6bbc839c9dca0 /arch/x86/kvm/mmu.c
parent2babd34df2294a72df02dc4a3745df3408147eba (diff)
parente4427372398c31f57450565de277f861a4db5b3b (diff)
downloadlinux-bb7ba8069de933d69cb45dd0a5806b61033796a3.tar.bz2
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "A couple bugfixes, and mostly selftests changes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: selftests/kvm: make platform_info_test pass on AMD Revert "KVM: x86/mmu: Zap only the relevant pages when removing a memslot" selftests: kvm: fix state save/load on processors without XSAVE selftests: kvm: fix vmx_set_nested_state_test selftests: kvm: provide common function to enable eVMCS selftests: kvm: do not try running the VM in vmx_set_nested_state_test KVM: x86: svm: remove redundant assignment of var new_entry MAINTAINERS: add KVM x86 reviewers MAINTAINERS: change list for KVM/s390 kvm: x86: skip populating logical dest map if apic is not sw enabled
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c33
1 files changed, 1 insertions, 32 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 24843cf49579..218b277bfda3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5653,38 +5653,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node)
{
- struct kvm_mmu_page *sp;
- LIST_HEAD(invalid_list);
- unsigned long i;
- bool flush;
- gfn_t gfn;
-
- spin_lock(&kvm->mmu_lock);
-
- if (list_empty(&kvm->arch.active_mmu_pages))
- goto out_unlock;
-
- flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
-
- for (i = 0; i < slot->npages; i++) {
- gfn = slot->base_gfn + i;
-
- for_each_valid_sp(kvm, sp, gfn) {
- if (sp->gfn != gfn)
- continue;
-
- kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
- }
- if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
- kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
- flush = false;
- cond_resched_lock(&kvm->mmu_lock);
- }
- }
- kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
-
-out_unlock:
- spin_unlock(&kvm->mmu_lock);
+ kvm_mmu_zap_all(kvm);
}
void kvm_mmu_init_vm(struct kvm *kvm)