summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2018-07-27 17:44:41 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2018-08-06 17:59:07 +0200
commitb9b33da2aa7429b0f61bcd218d34e1a277459fb4 (patch)
treee96543535148bb56df1d33bcfdd4886108005121 /virt
parent877ad952be3d51445f6a74dd63708a9327c8f19d (diff)
downloadlinux-b9b33da2aa7429b0f61bcd218d34e1a277459fb4.tar.bz2
KVM: try __get_user_pages_fast even if not in atomic context
We are currently cutting hva_to_pfn_fast short if we do not want an immediate exit, which is represented by !async && !atomic. However, this is unnecessary, and __get_user_pages_fast is *much* faster because the regular get_user_pages takes pmd_lock/pte_lock. In fact, when many CPUs take a nested vmexit at the same time the contention on those locks is visible, and this patch removes about 25% (compared to 4.18) from vmexit.flat on a 16 vCPU nested guest. Suggested-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 74544d20bbf8..f83239ac8be1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1343,18 +1343,16 @@ static inline int check_user_page_hwpoison(unsigned long addr)
}
/*
- * The atomic path to get the writable pfn which will be stored in @pfn,
- * true indicates success, otherwise false is returned.
+ * The fast path to get the writable pfn which will be stored in @pfn,
+ * true indicates success, otherwise false is returned. It's also the
+ * only part that runs if we can are in atomic context.
*/
-static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
- bool write_fault, bool *writable, kvm_pfn_t *pfn)
+static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
+ bool *writable, kvm_pfn_t *pfn)
{
struct page *page[1];
int npages;
- if (!(async || atomic))
- return false;
-
/*
* Fast pin a writable pfn only if it is a write fault request
* or the caller allows to map a writable pfn for a read fault
@@ -1498,7 +1496,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
/* we can do it either atomically or asynchronously, not both */
BUG_ON(atomic && async);
- if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
+ if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
return pfn;
if (atomic)