diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dax/dax-private.h | 4 | ||||
-rw-r--r-- | drivers/dax/device.c | 41 | ||||
-rw-r--r-- | drivers/dax/pmem/core.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/Kconfig | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_dmem.c | 103 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_svm.c | 2 | ||||
-rw-r--r-- | drivers/nvdimm/pfn_devs.c | 3 | ||||
-rw-r--r-- | drivers/nvdimm/pmem.c | 51 | ||||
-rw-r--r-- | drivers/pci/p2pdma.c | 52 |
9 files changed, 77 insertions, 187 deletions
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h index b4177aafbbd1..c915889d1769 100644 --- a/drivers/dax/dax-private.h +++ b/drivers/dax/dax-private.h @@ -43,8 +43,6 @@ struct dax_region { * @target_node: effective numa node if dev_dax memory range is onlined * @dev - device core * @pgmap - pgmap for memmap setup / lifetime (driver owned) - * @ref: pgmap reference count (driver owned) - * @cmp: @ref final put completion (driver owned) */ struct dev_dax { struct dax_region *region; @@ -52,8 +50,6 @@ struct dev_dax { int target_node; struct device dev; struct dev_pagemap pgmap; - struct percpu_ref ref; - struct completion cmp; }; static inline struct dev_dax *to_dev_dax(struct device *dev) diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 8465d12fecba..1af823b2fe6b 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -14,37 +14,6 @@ #include "dax-private.h" #include "bus.h" -static struct dev_dax *ref_to_dev_dax(struct percpu_ref *ref) -{ - return container_of(ref, struct dev_dax, ref); -} - -static void dev_dax_percpu_release(struct percpu_ref *ref) -{ - struct dev_dax *dev_dax = ref_to_dev_dax(ref); - - dev_dbg(&dev_dax->dev, "%s\n", __func__); - complete(&dev_dax->cmp); -} - -static void dev_dax_percpu_exit(struct percpu_ref *ref) -{ - struct dev_dax *dev_dax = ref_to_dev_dax(ref); - - dev_dbg(&dev_dax->dev, "%s\n", __func__); - wait_for_completion(&dev_dax->cmp); - percpu_ref_exit(ref); -} - -static void dev_dax_percpu_kill(struct percpu_ref *data) -{ - struct percpu_ref *ref = data; - struct dev_dax *dev_dax = ref_to_dev_dax(ref); - - dev_dbg(&dev_dax->dev, "%s\n", __func__); - percpu_ref_kill(ref); -} - static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, const char *func) { @@ -459,15 +428,7 @@ int dev_dax_probe(struct device *dev) return -EBUSY; } - init_completion(&dev_dax->cmp); - rc = percpu_ref_init(&dev_dax->ref, dev_dax_percpu_release, 0, - GFP_KERNEL); - if (rc) - return rc; - - dev_dax->pgmap.ref = &dev_dax->ref; - dev_dax->pgmap.kill = dev_dax_percpu_kill; - dev_dax->pgmap.cleanup = dev_dax_percpu_exit; + dev_dax->pgmap.type = MEMORY_DEVICE_DEVDAX; addr = devm_memremap_pages(dev, &dev_dax->pgmap); if (IS_ERR(addr)) return PTR_ERR(addr); diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c index f9f51786d556..6eb6dfdf19bf 100644 --- a/drivers/dax/pmem/core.c +++ b/drivers/dax/pmem/core.c @@ -16,7 +16,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys) struct dev_dax *dev_dax; struct nd_namespace_io *nsio; struct dax_region *dax_region; - struct dev_pagemap pgmap = { 0 }; + struct dev_pagemap pgmap = { }; struct nd_namespace_common *ndns; struct nd_dax *nd_dax = to_nd_dax(dev); struct nd_pfn *nd_pfn = &nd_dax->nd_pfn; diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index dba2613f7180..96b9814e6d06 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -84,11 +84,11 @@ config DRM_NOUVEAU_BACKLIGHT config DRM_NOUVEAU_SVM bool "(EXPERIMENTAL) Enable SVM (Shared Virtual Memory) support" - depends on ARCH_HAS_HMM + depends on DEVICE_PRIVATE depends on DRM_NOUVEAU + depends on HMM_MIRROR depends on STAGING - select HMM_MIRROR - select DEVICE_PRIVATE + select MIGRATE_VMA_HELPER default n help Say Y here if you want to enable experimental support for diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 40c47d6a7d78..42c026010938 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -72,7 +72,8 @@ struct nouveau_dmem_migrate { }; struct nouveau_dmem { - struct hmm_devmem *devmem; + struct nouveau_drm *drm; + struct dev_pagemap pagemap; struct nouveau_dmem_migrate migrate; struct list_head chunk_free; struct list_head chunk_full; @@ -80,6 +81,11 @@ struct nouveau_dmem { struct mutex mutex; }; +static inline struct nouveau_dmem *page_to_dmem(struct page *page) +{ + return container_of(page->pgmap, struct nouveau_dmem, pagemap); +} + struct nouveau_dmem_fault { struct nouveau_drm *drm; struct nouveau_fence *fence; @@ -96,14 +102,10 @@ struct nouveau_migrate { unsigned long dma_nr; }; -static void -nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page) +static void nouveau_dmem_page_free(struct page *page) { - struct nouveau_dmem_chunk *chunk; - unsigned long idx; - - chunk = (void *)hmm_devmem_page_get_drvdata(page); - idx = page_to_pfn(page) - chunk->pfn_first; + struct nouveau_dmem_chunk *chunk = page->zone_device_data; + unsigned long idx = page_to_pfn(page) - chunk->pfn_first; /* * FIXME: @@ -148,11 +150,12 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma, if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) continue; - dpage = hmm_vma_alloc_locked_page(vma, addr); + dpage = alloc_page_vma(GFP_HIGHUSER, vma, addr); if (!dpage) { dst_pfns[i] = MIGRATE_PFN_ERROR; continue; } + lock_page(dpage); dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; @@ -194,7 +197,7 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma, dst_addr = fault->dma[fault->npages++]; - chunk = (void *)hmm_devmem_page_get_drvdata(spage); + chunk = spage->zone_device_data; src_addr = page_to_pfn(spage) - chunk->pfn_first; src_addr = (src_addr << PAGE_SHIFT) + chunk->bo->bo.offset; @@ -259,29 +262,21 @@ static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = { .finalize_and_map = nouveau_dmem_fault_finalize_and_map, }; -static vm_fault_t -nouveau_dmem_fault(struct hmm_devmem *devmem, - struct vm_area_struct *vma, - unsigned long addr, - const struct page *page, - unsigned int flags, - pmd_t *pmdp) +static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) { - struct drm_device *drm_dev = dev_get_drvdata(devmem->device); + struct nouveau_dmem *dmem = page_to_dmem(vmf->page); unsigned long src[1] = {0}, dst[1] = {0}; - struct nouveau_dmem_fault fault = {0}; + struct nouveau_dmem_fault fault = { .drm = dmem->drm }; int ret; - - /* * FIXME what we really want is to find some heuristic to migrate more * than just one page on CPU fault. When such fault happens it is very * likely that more surrounding page will CPU fault too. */ - fault.drm = nouveau_drm(drm_dev); - ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vma, addr, - addr + PAGE_SIZE, src, dst, &fault); + ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vmf->vma, + vmf->address, vmf->address + PAGE_SIZE, + src, dst, &fault); if (ret) return VM_FAULT_SIGBUS; @@ -291,10 +286,9 @@ nouveau_dmem_fault(struct hmm_devmem *devmem, return 0; } -static const struct hmm_devmem_ops -nouveau_dmem_devmem_ops = { - .free = nouveau_dmem_free, - .fault = nouveau_dmem_fault, +static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = { + .page_free = nouveau_dmem_page_free, + .migrate_to_ram = nouveau_dmem_migrate_to_ram, }; static int @@ -580,7 +574,8 @@ void nouveau_dmem_init(struct nouveau_drm *drm) { struct device *device = drm->dev->dev; - unsigned long i, size; + struct resource *res; + unsigned long i, size, pfn_first; int ret; /* This only make sense on PASCAL or newer */ @@ -590,6 +585,7 @@ nouveau_dmem_init(struct nouveau_drm *drm) if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL))) return; + drm->dmem->drm = drm; mutex_init(&drm->dmem->mutex); INIT_LIST_HEAD(&drm->dmem->chunk_free); INIT_LIST_HEAD(&drm->dmem->chunk_full); @@ -599,11 +595,8 @@ nouveau_dmem_init(struct nouveau_drm *drm) /* Initialize migration dma helpers before registering memory */ ret = nouveau_dmem_migrate_init(drm); - if (ret) { - kfree(drm->dmem); - drm->dmem = NULL; - return; - } + if (ret) + goto out_free; /* * FIXME we need some kind of policy to decide how much VRAM we @@ -611,14 +604,16 @@ nouveau_dmem_init(struct nouveau_drm *drm) * and latter if we want to do thing like over commit then we * could revisit this. */ - drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops, - device, size); - if (IS_ERR(drm->dmem->devmem)) { - kfree(drm->dmem); - drm->dmem = NULL; - return; - } - + res = devm_request_free_mem_region(device, &iomem_resource, size); + if (IS_ERR(res)) + goto out_free; + drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE; + drm->dmem->pagemap.res = *res; + drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops; + if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap))) + goto out_free; + + pfn_first = res->start >> PAGE_SHIFT; for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) { struct nouveau_dmem_chunk *chunk; struct page *page; @@ -631,17 +626,19 @@ nouveau_dmem_init(struct nouveau_drm *drm) } chunk->drm = drm; - chunk->pfn_first = drm->dmem->devmem->pfn_first; - chunk->pfn_first += (i * DMEM_CHUNK_NPAGES); + chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES); list_add_tail(&chunk->list, &drm->dmem->chunk_empty); page = pfn_to_page(chunk->pfn_first); - for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page) { - hmm_devmem_page_set_drvdata(page, (long)chunk); - } + for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page) + page->zone_device_data = chunk; } NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20); + return; +out_free: + kfree(drm->dmem); + drm->dmem = NULL; } static void @@ -697,7 +694,7 @@ nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma, if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR) continue; - chunk = (void *)hmm_devmem_page_get_drvdata(dpage); + chunk = dpage->zone_device_data; dst_addr = page_to_pfn(dpage) - chunk->pfn_first; dst_addr = (dst_addr << PAGE_SHIFT) + chunk->bo->bo.offset; @@ -832,13 +829,7 @@ out: static inline bool nouveau_dmem_page(struct nouveau_drm *drm, struct page *page) { - if (!is_device_private_page(page)) - return false; - - if (drm->dmem->devmem != page->pgmap->data) - return false; - - return true; + return is_device_private_page(page) && drm->dmem == page_to_dmem(page); } void @@ -867,7 +858,7 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm, continue; } - chunk = (void *)hmm_devmem_page_get_drvdata(page); + chunk = page->zone_device_data; addr = page_to_pfn(page) - chunk->pfn_first; addr = (addr + chunk->bo->bo.mem.start) << PAGE_SHIFT; diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 93ed43c413f0..8c92374afcf2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -649,7 +649,7 @@ nouveau_svm_fault(struct nvif_notify *notify) range.values = nouveau_svm_pfn_values; range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT; again: - ret = hmm_vma_fault(&range, true); + ret = hmm_vma_fault(&svmm->mirror, &range, true); if (ret == 0) { mutex_lock(&svmm->mutex); if (!hmm_vma_range_done(&range)) { diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 0f81fc56bbfd..55fb6b7433ed 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -622,7 +622,6 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) if (offset < reserve) return -EINVAL; nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); - pgmap->altmap_valid = false; } else if (nd_pfn->mode == PFN_MODE_PMEM) { nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res) - offset) / PAGE_SIZE); @@ -634,7 +633,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) memcpy(altmap, &__altmap, sizeof(*altmap)); altmap->free = PHYS_PFN(offset - reserve); altmap->alloc = 0; - pgmap->altmap_valid = true; + pgmap->flags |= PGMAP_ALTMAP_VALID; } else return -ENXIO; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 24d7fe7c74ed..e7d8cc9f41e8 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -303,24 +303,24 @@ static const struct attribute_group *pmem_attribute_groups[] = { NULL, }; -static void __pmem_release_queue(struct percpu_ref *ref) +static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap) { - struct request_queue *q; + struct request_queue *q = + container_of(pgmap->ref, struct request_queue, q_usage_counter); - q = container_of(ref, typeof(*q), q_usage_counter); blk_cleanup_queue(q); } -static void pmem_release_queue(void *ref) +static void pmem_release_queue(void *pgmap) { - __pmem_release_queue(ref); + pmem_pagemap_cleanup(pgmap); } -static void pmem_freeze_queue(struct percpu_ref *ref) +static void pmem_pagemap_kill(struct dev_pagemap *pgmap) { - struct request_queue *q; + struct request_queue *q = + container_of(pgmap->ref, struct request_queue, q_usage_counter); - q = container_of(ref, typeof(*q), q_usage_counter); blk_freeze_queue_start(q); } @@ -334,26 +334,16 @@ static void pmem_release_disk(void *__pmem) put_disk(pmem->disk); } -static void pmem_release_pgmap_ops(void *__pgmap) -{ - dev_pagemap_put_ops(); -} - -static void fsdax_pagefree(struct page *page, void *data) +static void pmem_pagemap_page_free(struct page *page) { wake_up_var(&page->_refcount); } -static int setup_pagemap_fsdax(struct device *dev, struct dev_pagemap *pgmap) -{ - dev_pagemap_get_ops(); - if (devm_add_action_or_reset(dev, pmem_release_pgmap_ops, pgmap)) - return -ENOMEM; - pgmap->type = MEMORY_DEVICE_FS_DAX; - pgmap->page_free = fsdax_pagefree; - - return 0; -} +static const struct dev_pagemap_ops fsdax_pagemap_ops = { + .page_free = pmem_pagemap_page_free, + .kill = pmem_pagemap_kill, + .cleanup = pmem_pagemap_cleanup, +}; static int pmem_attach_disk(struct device *dev, struct nd_namespace_common *ndns) @@ -409,11 +399,9 @@ static int pmem_attach_disk(struct device *dev, pmem->pfn_flags = PFN_DEV; pmem->pgmap.ref = &q->q_usage_counter; - pmem->pgmap.kill = pmem_freeze_queue; - pmem->pgmap.cleanup = __pmem_release_queue; if (is_nd_pfn(dev)) { - if (setup_pagemap_fsdax(dev, &pmem->pgmap)) - return -ENOMEM; + pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; + pmem->pgmap.ops = &fsdax_pagemap_ops; addr = devm_memremap_pages(dev, &pmem->pgmap); pfn_sb = nd_pfn->pfn_sb; pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); @@ -424,15 +412,14 @@ static int pmem_attach_disk(struct device *dev, bb_res.start += pmem->data_offset; } else if (pmem_should_map_pages(dev)) { memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res)); - pmem->pgmap.altmap_valid = false; - if (setup_pagemap_fsdax(dev, &pmem->pgmap)) - return -ENOMEM; + pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; + pmem->pgmap.ops = &fsdax_pagemap_ops; addr = devm_memremap_pages(dev, &pmem->pgmap); pmem->pfn_flags |= PFN_MAP; memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res)); } else { if (devm_add_action_or_reset(dev, pmem_release_queue, - &q->q_usage_counter)) + &pmem->pgmap)) return -ENOMEM; addr = devm_memremap(dev, pmem->phys_addr, pmem->size, ARCH_MEMREMAP_PMEM); diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index a4994aa3acc0..a3073ce16520 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -25,12 +25,6 @@ struct pci_p2pdma { bool p2pmem_published; }; -struct p2pdma_pagemap { - struct dev_pagemap pgmap; - struct percpu_ref ref; - struct completion ref_done; -}; - static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -79,31 +73,6 @@ static const struct attribute_group p2pmem_group = { .name = "p2pmem", }; -static struct p2pdma_pagemap *to_p2p_pgmap(struct percpu_ref *ref) -{ - return container_of(ref, struct p2pdma_pagemap, ref); -} - -static void pci_p2pdma_percpu_release(struct percpu_ref *ref) -{ - struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref); - - complete(&p2p_pgmap->ref_done); -} - -static void pci_p2pdma_percpu_kill(struct percpu_ref *ref) -{ - percpu_ref_kill(ref); -} - -static void pci_p2pdma_percpu_cleanup(struct percpu_ref *ref) -{ - struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref); - - wait_for_completion(&p2p_pgmap->ref_done); - percpu_ref_exit(&p2p_pgmap->ref); -} - static void pci_p2pdma_release(void *data) { struct pci_dev *pdev = data; @@ -166,7 +135,6 @@ out: int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, u64 offset) { - struct p2pdma_pagemap *p2p_pgmap; struct dev_pagemap *pgmap; void *addr; int error; @@ -189,27 +157,15 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, return error; } - p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL); - if (!p2p_pgmap) + pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL); + if (!pgmap) return -ENOMEM; - - init_completion(&p2p_pgmap->ref_done); - error = percpu_ref_init(&p2p_pgmap->ref, - pci_p2pdma_percpu_release, 0, GFP_KERNEL); - if (error) - goto pgmap_free; - - pgmap = &p2p_pgmap->pgmap; - pgmap->res.start = pci_resource_start(pdev, bar) + offset; pgmap->res.end = pgmap->res.start + size - 1; pgmap->res.flags = pci_resource_flags(pdev, bar); - pgmap->ref = &p2p_pgmap->ref; pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) - pci_resource_start(pdev, bar); - pgmap->kill = pci_p2pdma_percpu_kill; - pgmap->cleanup = pci_p2pdma_percpu_cleanup; addr = devm_memremap_pages(&pdev->dev, pgmap); if (IS_ERR(addr)) { @@ -220,7 +176,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr, pci_bus_address(pdev, bar) + offset, resource_size(&pgmap->res), dev_to_node(&pdev->dev), - &p2p_pgmap->ref); + pgmap->ref); if (error) goto pages_free; @@ -232,7 +188,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, pages_free: devm_memunmap_pages(&pdev->dev, pgmap); pgmap_free: - devm_kfree(&pdev->dev, p2p_pgmap); + devm_kfree(&pdev->dev, pgmap); return error; } EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource); |