diff options
author | Kai Huang <kai.huang@linux.intel.com> | 2015-01-12 15:28:54 +0800 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-01-19 11:09:37 +0100 |
commit | d91ffee9ec633c6523ef1ea39c36ace578760258 (patch) | |
tree | a07fcaed7d217743a0a79aa0e8d4915b0fe770f9 /arch | |
parent | 0c55d6d931484a747dbba5c9766379c7720a8520 (diff) | |
download | linux-d91ffee9ec633c6523ef1ea39c36ace578760258.tar.bz2 |
Optimize TLB flush in kvm_mmu_slot_remove_write_access.
No TLB flush is needed when there's no valid rmap in memory slot.
Signed-off-by: Kai Huang <kai.huang@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/mmu.c | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 0d0fdd6f002c..97898abe8386 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4302,6 +4302,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) struct kvm_memory_slot *memslot; gfn_t last_gfn; int i; + bool flush = false; memslot = id_to_memslot(kvm->memslots, slot); last_gfn = memslot->base_gfn + memslot->npages - 1; @@ -4318,7 +4319,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) for (index = 0; index <= last_index; ++index, ++rmapp) { if (*rmapp) - __rmap_write_protect(kvm, rmapp, false); + flush |= __rmap_write_protect(kvm, rmapp, + false); if (need_resched() || spin_needbreak(&kvm->mmu_lock)) cond_resched_lock(&kvm->mmu_lock); @@ -4345,7 +4347,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) * instead of PT_WRITABLE_MASK, that means it does not depend * on PT_WRITABLE_MASK anymore. */ - kvm_flush_remote_tlbs(kvm); + if (flush) + kvm_flush_remote_tlbs(kvm); } #define BATCH_ZAP_PAGES 10 |