summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorRalph Campbell <rcampbell@nvidia.com>2020-07-23 15:30:03 -0700
committerJason Gunthorpe <jgg@nvidia.com>2020-07-28 16:20:33 -0300
commit7d17e83abec1be3355260b3e4812044c65c32907 (patch)
tree4115d8e16f9b420235d7d4c0cefa0130918e29c0 /lib
parentf8477ce6b5923ef9829e32f2165ea9c2b4b754d2 (diff)
downloadlinux-7d17e83abec1be3355260b3e4812044c65c32907.tar.bz2
mm/hmm/test: use the new migration invalidation
Use the new MMU_NOTIFY_MIGRATE event to skip MMU invalidations of device private memory and handle the invalidation in the driver as part of migrating device private memory. Link: https://lore.kernel.org/r/20200723223004.9586-6-rcampbell@nvidia.com Signed-off-by: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'lib')
-rw-r--r--lib/test_hmm.c30
1 files changed, 17 insertions, 13 deletions
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index e78a1414f58e..e7dc3de355b7 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -214,6 +214,14 @@ static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
{
struct dmirror *dmirror = container_of(mni, struct dmirror, notifier);
+ /*
+ * Ignore invalidation callbacks for device private pages since
+ * the invalidation is handled as part of the migration process.
+ */
+ if (range->event == MMU_NOTIFY_MIGRATE &&
+ range->migrate_pgmap_owner == dmirror->mdevice)
+ return true;
+
if (mmu_notifier_range_blockable(range))
mutex_lock(&dmirror->mutex);
else if (!mutex_trylock(&dmirror->mutex))
@@ -693,7 +701,7 @@ static int dmirror_migrate(struct dmirror *dmirror,
args.dst = dst_pfns;
args.start = addr;
args.end = next;
- args.pgmap_owner = NULL;
+ args.pgmap_owner = dmirror->mdevice;
args.flags = MIGRATE_VMA_SELECT_SYSTEM;
ret = migrate_vma_setup(&args);
if (ret)
@@ -983,7 +991,7 @@ static void dmirror_devmem_free(struct page *page)
}
static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
- struct dmirror_device *mdevice)
+ struct dmirror *dmirror)
{
const unsigned long *src = args->src;
unsigned long *dst = args->dst;
@@ -1005,6 +1013,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
continue;
lock_page(dpage);
+ xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
copy_highpage(dpage, spage);
*dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
if (*src & MIGRATE_PFN_WRITE)
@@ -1013,15 +1022,6 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
return 0;
}
-static void dmirror_devmem_fault_finalize_and_map(struct migrate_vma *args,
- struct dmirror *dmirror)
-{
- /* Invalidate the device's page table mapping. */
- mutex_lock(&dmirror->mutex);
- dmirror_do_update(dmirror, args->start, args->end);
- mutex_unlock(&dmirror->mutex);
-}
-
static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
{
struct migrate_vma args;
@@ -1051,11 +1051,15 @@ static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf)
if (migrate_vma_setup(&args))
return VM_FAULT_SIGBUS;
- ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror->mdevice);
+ ret = dmirror_devmem_fault_alloc_and_copy(&args, dmirror);
if (ret)
return ret;
migrate_vma_pages(&args);
- dmirror_devmem_fault_finalize_and_map(&args, dmirror);
+ /*
+ * No device finalize step is needed since
+ * dmirror_devmem_fault_alloc_and_copy() will have already
+ * invalidated the device page table.
+ */
migrate_vma_finalize(&args);
return 0;
}