summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm
diff options
context:
space:
mode:
authorJohn Hubbard <jhubbard@nvidia.com>2020-05-25 23:22:07 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2020-07-08 16:21:41 -0400
commitdc42c8ae0a7762378102dd043779d19331804cce (patch)
tree9fea2b85409e757d77f13a021f5d8fe0037c340e /arch/x86/kvm/svm
parent78824fabc72e5e37d51e6e567fde70a4fc41a6d7 (diff)
downloadlinux-dc42c8ae0a7762378102dd043779d19331804cce.tar.bz2
KVM: SVM: convert get_user_pages() --> pin_user_pages()
This code was using get_user_pages*(), in a "Case 2" scenario (DMA/RDMA), using the categorization from [1]. That means that it's time to convert the get_user_pages*() + put_page() calls to pin_user_pages*() + unpin_user_pages() calls. There is some helpful background in [2]: basically, this is a small part of fixing a long-standing disconnect between pinning pages, and file systems' use of those pages. [1] Documentation/core-api/pin_user_pages.rst [2] "Explicit pinning of user-space pages": https://lwn.net/Articles/807108/ Cc: Ingo Molnar <mingo@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Sean Christopherson <sean.j.christopherson@intel.com> Cc: Vitaly Kuznetsov <vkuznets@redhat.com> Cc: Wanpeng Li <wanpengli@tencent.com> Cc: Jim Mattson <jmattson@google.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: x86@kernel.org Cc: kvm@vger.kernel.org Signed-off-by: John Hubbard <jhubbard@nvidia.com> Message-Id: <20200526062207.1360225-3-jhubbard@nvidia.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm')
-rw-r--r--arch/x86/kvm/svm/sev.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index ceeee4bb6150..a893624b9275 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -348,7 +348,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
return NULL;
/* Pin the user virtual address. */
- npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
+ npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
if (npinned != npages) {
pr_err("SEV: Failure locking %lu pages.\n", npages);
goto err;
@@ -361,7 +361,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
err:
if (npinned > 0)
- release_pages(pages, npinned);
+ unpin_user_pages(pages, npinned);
kvfree(pages);
return NULL;
@@ -372,7 +372,7 @@ static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
- release_pages(pages, npages);
+ unpin_user_pages(pages, npages);
kvfree(pages);
sev->pages_locked -= npages;
}