summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-09-12 19:46:08 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2019-09-24 14:35:41 +0200
commit4506ecf4855e53c3e5a2b40312a240696863582c (patch)
tree4741d26769bb601c42bb5c731bdbe6433e081c7c /arch/x86
parentfbb158cb88b6e4f8279707a1842ee0332f64b1a3 (diff)
downloadlinux-4506ecf4855e53c3e5a2b40312a240696863582c.tar.bz2
KVM: x86/mmu: Revert "Revert "KVM: MMU: collapse TLB flushes when zap all pages""
Now that the fast invalidate mechanism has been reintroduced, restore the performance tweaks for fast invalidation that existed prior to its removal. Paraphrashing the original changelog: Reload the mmu on all vCPUs after updating the generation number so that obsolete pages are not used by any vCPUs. This allows collapsing all TLB flushes during obsolete page zapping into a single flush, as there is no need to flush when dropping mmu_lock (to reschedule). Note: a remote TLB flush is still needed before freeing the pages as other vCPUs may be doing a lockless shadow page walk. Opportunstically improve the comments restored by the revert (the code itself is a true revert). This reverts commit f34d251d66ba263c077ed9d2bbd1874339a4c887. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/mmu.c25
1 files changed, 22 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3acc4b046d47..652f2ce14583 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5696,11 +5696,15 @@ restart:
if (sp->role.invalid)
continue;
+ /*
+ * No need to flush the TLB since we're only zapping shadow
+ * pages with an obsolete generation number and all vCPUS have
+ * loaded a new root, i.e. the shadow pages being zapped cannot
+ * be in active use by the guest.
+ */
if (batch >= BATCH_ZAP_PAGES &&
- (need_resched() || spin_needbreak(&kvm->mmu_lock))) {
+ cond_resched_lock(&kvm->mmu_lock)) {
batch = 0;
- kvm_mmu_commit_zap_page(kvm, &invalid_list);
- cond_resched_lock(&kvm->mmu_lock);
goto restart;
}
@@ -5711,6 +5715,11 @@ restart:
}
}
+ /*
+ * Trigger a remote TLB flush before freeing the page tables to ensure
+ * KVM is not in the middle of a lockless shadow page table walk, which
+ * may reference the pages.
+ */
kvm_mmu_commit_zap_page(kvm, &invalid_list);
}
@@ -5729,6 +5738,16 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
trace_kvm_mmu_zap_all_fast(kvm);
kvm->arch.mmu_valid_gen++;
+ /*
+ * Notify all vcpus to reload its shadow page table and flush TLB.
+ * Then all vcpus will switch to new shadow page table with the new
+ * mmu_valid_gen.
+ *
+ * Note: we need to do this under the protection of mmu_lock,
+ * otherwise, vcpu would purge shadow page but miss tlb flush.
+ */
+ kvm_reload_remote_mmus(kvm);
+
kvm_zap_obsolete_pages(kvm);
spin_unlock(&kvm->mmu_lock);
}