summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2022-02-26 00:15:23 +0000
committerPaolo Bonzini <pbonzini@redhat.com>2022-03-08 09:31:36 -0500
commit7ae5840e6f3325b52ee46437d46ba0465016584d (patch)
tree46cf6d49f9fbfde1fba9d64c6fbacf1661cc4fd4 /arch/x86/kvm/mmu
parentdb01416b22d98b8c8474a418a97cdacfd947469d (diff)
downloadlinux-7ae5840e6f3325b52ee46437d46ba0465016584d.tar.bz2
KVM: x86/mmu: Document that zapping invalidated roots doesn't need to flush
Remove the misleading flush "handling" when zapping invalidated TDP MMU roots, and document that flushing is unnecessary for all flavors of MMUs when zapping invalid/obsolete roots/pages. The "handling" in the TDP MMU is dead code, as zap_gfn_range() is called with shared=true, in which case it will never return true due to the flushing being handled by tdp_mmu_zap_spte_atomic(). No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> Reviewed-by: Ben Gardon <bgardon@google.com> Message-Id: <20220226001546.360188-6-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu')
-rw-r--r--arch/x86/kvm/mmu/mmu.c10
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c15
2 files changed, 17 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 43f92b111d7b..a3154f33e803 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5685,9 +5685,13 @@ restart:
}
/*
- * Trigger a remote TLB flush before freeing the page tables to ensure
- * KVM is not in the middle of a lockless shadow page table walk, which
- * may reference the pages.
+ * Kick all vCPUs (via remote TLB flush) before freeing the page tables
+ * to ensure KVM is not in the middle of a lockless shadow page table
+ * walk, which may reference the pages. The remote TLB flush itself is
+ * not required and is simply a convenient way to kick vCPUs as needed.
+ * KVM performs a local TLB flush when allocating a new root (see
+ * kvm_mmu_load()), and the reload in the caller ensure no vCPUs are
+ * running with an obsolete MMU.
*/
kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 921fa386df99..2ce6915b70fe 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -851,7 +851,6 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
{
struct kvm_mmu_page *next_root;
struct kvm_mmu_page *root;
- bool flush = false;
lockdep_assert_held_read(&kvm->mmu_lock);
@@ -864,7 +863,16 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
rcu_read_unlock();
- flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
+ /*
+ * A TLB flush is unnecessary, invalidated roots are guaranteed
+ * to be unreachable by the guest (see kvm_tdp_mmu_put_root()
+ * for more details), and unlike the legacy MMU, no vCPU kick
+ * is needed to play nice with lockless shadow walks as the TDP
+ * MMU protects its paging structures via RCU. Note, zapping
+ * will still flush on yield, but that's a minor performance
+ * blip and not a functional issue.
+ */
+ (void)zap_gfn_range(kvm, root, 0, -1ull, true, false, true);
/*
* Put the reference acquired in
@@ -878,9 +886,6 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
}
rcu_read_unlock();
-
- if (flush)
- kvm_flush_remote_tlbs(kvm);
}
/*