From 03133347b4452ef9b1f1456b92f5fafa467c0655 Mon Sep 17 00:00:00 2001 From: Claudio Imbrenda Date: Mon, 30 Apr 2018 18:33:24 +0200 Subject: KVM: s390: a utility function for migration Introduce a utility function that will be used later on for storage attributes migration, and use it in kvm_main.c to replace existing code that does the same thing. Signed-off-by: Claudio Imbrenda Message-Id: <1525106005-13931-2-git-send-email-imbrenda@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger --- virt/kvm/kvm_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 8b47507faab5..f519eb8d06b1 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1169,7 +1169,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, n = kvm_dirty_bitmap_bytes(memslot); - dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); + dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); memset(dirty_bitmap_buffer, 0, n); spin_lock(&kvm->mmu_lock); -- cgit v1.2.3 From 50c28f21d045dde8c52548f8482d456b3f0956f5 Mon Sep 17 00:00:00 2001 From: Junaid Shahid Date: Wed, 27 Jun 2018 14:59:11 -0700 Subject: kvm: x86: Use fast CR3 switch for nested VMX Use the fast CR3 switch mechanism to locklessly change the MMU root page when switching between L1 and L2. The switch from L2 to L1 should always go through the fast path, while the switch from L1 to L2 should go through the fast path if L1's CR3/EPTP for L2 hasn't changed since the last time. Signed-off-by: Junaid Shahid Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 6 ++++-- arch/x86/kvm/mmu.h | 2 +- arch/x86/kvm/vmx.c | 16 ++++++++++------ virt/kvm/kvm_main.c | 14 ++++++++++---- 4 files changed, 25 insertions(+), 13 deletions(-) (limited to 'virt') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 3d822a97dfa1..9b73cfcef917 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -4054,7 +4054,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3, return false; swap(mmu->root_hpa, mmu->prev_root.hpa); - mmu->prev_root.cr3 = kvm_read_cr3(vcpu); + mmu->prev_root.cr3 = mmu->get_cr3(vcpu); if (new_cr3 == prev_cr3 && VALID_PAGE(mmu->root_hpa) && @@ -4091,6 +4091,7 @@ void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3) { __kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu)); } +EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3); static unsigned long get_cr3(struct kvm_vcpu *vcpu) { @@ -4725,12 +4726,13 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty) } void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, - bool accessed_dirty) + bool accessed_dirty, gpa_t new_eptp) { struct kvm_mmu *context = &vcpu->arch.mmu; union kvm_mmu_page_role root_page_role = kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty); + __kvm_mmu_new_cr3(vcpu, new_eptp, root_page_role); context->shadow_root_level = PT64_ROOT_4LEVEL; context->nx = true; diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 11ab3d62ad65..3177127cee96 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -64,7 +64,7 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots); void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, - bool accessed_dirty); + bool accessed_dirty, gpa_t new_eptp); bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, u64 fault_address, char *insn, int insn_len); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index cbe31b9d2b81..c6ea892a610e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -10741,11 +10741,11 @@ static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu))) return 1; - kvm_mmu_unload(vcpu); kvm_init_shadow_ept_mmu(vcpu, to_vmx(vcpu)->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT, - nested_ept_ad_enabled(vcpu)); + nested_ept_ad_enabled(vcpu), + nested_ept_get_cr3(vcpu)); vcpu->arch.mmu.set_cr3 = vmx_set_cr3; vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; @@ -11342,12 +11342,16 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne return 1; } } - - vcpu->arch.cr3 = cr3; - __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); } - kvm_mmu_reset_context(vcpu); + if (!nested_ept) + kvm_mmu_new_cr3(vcpu, cr3); + + vcpu->arch.cr3 = cr3; + __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); + + kvm_init_mmu(vcpu, false); + return 0; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f519eb8d06b1..8f461e0ed382 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2127,16 +2127,22 @@ static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) { + int ret = -EINTR; + int idx = srcu_read_lock(&vcpu->kvm->srcu); + if (kvm_arch_vcpu_runnable(vcpu)) { kvm_make_request(KVM_REQ_UNHALT, vcpu); - return -EINTR; + goto out; } if (kvm_cpu_has_pending_timer(vcpu)) - return -EINTR; + goto out; if (signal_pending(current)) - return -EINTR; + goto out; - return 0; + ret = 0; +out: + srcu_read_unlock(&vcpu->kvm->srcu, idx); + return ret; } /* -- cgit v1.2.3 From b08660e59dbdb600c55953787ed2265a0b510f77 Mon Sep 17 00:00:00 2001 From: Tianyu Lan Date: Thu, 19 Jul 2018 08:40:17 +0000 Subject: KVM: x86: Add tlb remote flush callback in kvm_x86_ops. This patch is to provide a way for platforms to register hv tlb remote flush callback and this helps to optimize operation of tlb flush among vcpus for nested virtualization case. Signed-off-by: Lan Tianyu Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 11 +++++++++++ include/linux/kvm_host.h | 7 +++++++ virt/kvm/kvm_main.c | 3 ++- 3 files changed, 20 insertions(+), 1 deletion(-) (limited to 'virt') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 75ab578c3f6a..150937e64f63 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -985,6 +985,7 @@ struct kvm_x86_ops { void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa); + int (*tlb_remote_flush)(struct kvm *kvm); /* * Flush any TLB entries associated with the given GVA. @@ -1145,6 +1146,16 @@ static inline void kvm_arch_free_vm(struct kvm *kvm) return kvm_x86_ops->vm_free(kvm); } +#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB +static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) +{ + if (kvm_x86_ops->tlb_remote_flush && + !kvm_x86_ops->tlb_remote_flush(kvm)) + return 0; + else + return -ENOTSUPP; +} + int kvm_mmu_module_init(void); void kvm_mmu_module_exit(void); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2840ac74a3d7..7c7362dd2faa 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -834,6 +834,13 @@ static inline void kvm_arch_free_vm(struct kvm *kvm) } #endif +#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB +static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) +{ + return -ENOTSUPP; +} +#endif + #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA void kvm_arch_register_noncoherent_dma(struct kvm *kvm); void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 8f461e0ed382..74544d20bbf8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -273,7 +273,8 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that * barrier here. */ - if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) + if (!kvm_arch_flush_remote_tlb(kvm) + || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) ++kvm->stat.remote_tlb_flush; cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); } -- cgit v1.2.3 From b9b33da2aa7429b0f61bcd218d34e1a277459fb4 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 27 Jul 2018 17:44:41 +0200 Subject: KVM: try __get_user_pages_fast even if not in atomic context We are currently cutting hva_to_pfn_fast short if we do not want an immediate exit, which is represented by !async && !atomic. However, this is unnecessary, and __get_user_pages_fast is *much* faster because the regular get_user_pages takes pmd_lock/pte_lock. In fact, when many CPUs take a nested vmexit at the same time the contention on those locks is visible, and this patch removes about 25% (compared to 4.18) from vmexit.flat on a 16 vCPU nested guest. Suggested-by: Andrea Arcangeli Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 74544d20bbf8..f83239ac8be1 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1343,18 +1343,16 @@ static inline int check_user_page_hwpoison(unsigned long addr) } /* - * The atomic path to get the writable pfn which will be stored in @pfn, - * true indicates success, otherwise false is returned. + * The fast path to get the writable pfn which will be stored in @pfn, + * true indicates success, otherwise false is returned. It's also the + * only part that runs if we can are in atomic context. */ -static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, - bool write_fault, bool *writable, kvm_pfn_t *pfn) +static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, + bool *writable, kvm_pfn_t *pfn) { struct page *page[1]; int npages; - if (!(async || atomic)) - return false; - /* * Fast pin a writable pfn only if it is a write fault request * or the caller allows to map a writable pfn for a read fault @@ -1498,7 +1496,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, /* we can do it either atomically or asynchronously, not both */ BUG_ON(atomic && async); - if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) + if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) return pfn; if (atomic) -- cgit v1.2.3