summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-06-26 14:27:11 +0200
committerJason Gunthorpe <jgg@mellanox.com>2019-07-02 14:32:44 -0300
commit897e6365cda6ba6356e83a3aaa68dec82ef4c548 (patch)
tree91a2776f8a994aa78d9eb1f91fce9debc51b1727 /mm
parentf6a55e1a3fe6b3bb294a80a05437fcf86488d819 (diff)
downloadlinux-897e6365cda6ba6356e83a3aaa68dec82ef4c548.tar.bz2
memremap: add a migrate_to_ram method to struct dev_pagemap_ops
This replaces the hacky ->fault callback, which is currently directly called from common code through a hmm specific data structure as an exercise in layering violations. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Tested-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/hmm.c13
-rw-r--r--mm/memory.c9
2 files changed, 7 insertions, 15 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index 5b0bd5f6a74f..96633ee066d8 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -1366,15 +1366,12 @@ static void hmm_devmem_ref_kill(struct dev_pagemap *pgmap)
percpu_ref_kill(pgmap->ref);
}
-static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
- unsigned long addr,
- const struct page *page,
- unsigned int flags,
- pmd_t *pmdp)
+static vm_fault_t hmm_devmem_migrate_to_ram(struct vm_fault *vmf)
{
- struct hmm_devmem *devmem = page->pgmap->data;
+ struct hmm_devmem *devmem = vmf->page->pgmap->data;
- return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
+ return devmem->ops->fault(devmem, vmf->vma, vmf->address, vmf->page,
+ vmf->flags, vmf->pmd);
}
static void hmm_devmem_free(struct page *page, void *data)
@@ -1388,6 +1385,7 @@ static const struct dev_pagemap_ops hmm_pagemap_ops = {
.page_free = hmm_devmem_free,
.kill = hmm_devmem_ref_kill,
.cleanup = hmm_devmem_ref_exit,
+ .migrate_to_ram = hmm_devmem_migrate_to_ram,
};
/*
@@ -1438,7 +1436,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
devmem->pfn_last = devmem->pfn_first +
(resource_size(devmem->resource) >> PAGE_SHIFT);
- devmem->page_fault = hmm_devmem_fault;
devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
devmem->pagemap.res = *devmem->resource;
diff --git a/mm/memory.c b/mm/memory.c
index 2d14f4c7e152..d437ccdb210c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2748,13 +2748,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
migration_entry_wait(vma->vm_mm, vmf->pmd,
vmf->address);
} else if (is_device_private_entry(entry)) {
- /*
- * For un-addressable device memory we call the pgmap
- * fault handler callback. The callback must migrate
- * the page back to some CPU accessible page.
- */
- ret = device_private_entry_fault(vma, vmf->address, entry,
- vmf->flags, vmf->pmd);
+ vmf->page = device_private_entry_to_page(entry);
+ ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
} else {