summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c2
3 files changed, 51 insertions, 60 deletions
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index dba2613f7180..96b9814e6d06 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -84,11 +84,11 @@ config DRM_NOUVEAU_BACKLIGHT
config DRM_NOUVEAU_SVM
bool "(EXPERIMENTAL) Enable SVM (Shared Virtual Memory) support"
- depends on ARCH_HAS_HMM
+ depends on DEVICE_PRIVATE
depends on DRM_NOUVEAU
+ depends on HMM_MIRROR
depends on STAGING
- select HMM_MIRROR
- select DEVICE_PRIVATE
+ select MIGRATE_VMA_HELPER
default n
help
Say Y here if you want to enable experimental support for
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 40c47d6a7d78..42c026010938 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -72,7 +72,8 @@ struct nouveau_dmem_migrate {
};
struct nouveau_dmem {
- struct hmm_devmem *devmem;
+ struct nouveau_drm *drm;
+ struct dev_pagemap pagemap;
struct nouveau_dmem_migrate migrate;
struct list_head chunk_free;
struct list_head chunk_full;
@@ -80,6 +81,11 @@ struct nouveau_dmem {
struct mutex mutex;
};
+static inline struct nouveau_dmem *page_to_dmem(struct page *page)
+{
+ return container_of(page->pgmap, struct nouveau_dmem, pagemap);
+}
+
struct nouveau_dmem_fault {
struct nouveau_drm *drm;
struct nouveau_fence *fence;
@@ -96,14 +102,10 @@ struct nouveau_migrate {
unsigned long dma_nr;
};
-static void
-nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
+static void nouveau_dmem_page_free(struct page *page)
{
- struct nouveau_dmem_chunk *chunk;
- unsigned long idx;
-
- chunk = (void *)hmm_devmem_page_get_drvdata(page);
- idx = page_to_pfn(page) - chunk->pfn_first;
+ struct nouveau_dmem_chunk *chunk = page->zone_device_data;
+ unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
/*
* FIXME:
@@ -148,11 +150,12 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE))
continue;
- dpage = hmm_vma_alloc_locked_page(vma, addr);
+ dpage = alloc_page_vma(GFP_HIGHUSER, vma, addr);
if (!dpage) {
dst_pfns[i] = MIGRATE_PFN_ERROR;
continue;
}
+ lock_page(dpage);
dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)) |
MIGRATE_PFN_LOCKED;
@@ -194,7 +197,7 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
dst_addr = fault->dma[fault->npages++];
- chunk = (void *)hmm_devmem_page_get_drvdata(spage);
+ chunk = spage->zone_device_data;
src_addr = page_to_pfn(spage) - chunk->pfn_first;
src_addr = (src_addr << PAGE_SHIFT) + chunk->bo->bo.offset;
@@ -259,29 +262,21 @@ static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
.finalize_and_map = nouveau_dmem_fault_finalize_and_map,
};
-static vm_fault_t
-nouveau_dmem_fault(struct hmm_devmem *devmem,
- struct vm_area_struct *vma,
- unsigned long addr,
- const struct page *page,
- unsigned int flags,
- pmd_t *pmdp)
+static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
{
- struct drm_device *drm_dev = dev_get_drvdata(devmem->device);
+ struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
unsigned long src[1] = {0}, dst[1] = {0};
- struct nouveau_dmem_fault fault = {0};
+ struct nouveau_dmem_fault fault = { .drm = dmem->drm };
int ret;
-
-
/*
* FIXME what we really want is to find some heuristic to migrate more
* than just one page on CPU fault. When such fault happens it is very
* likely that more surrounding page will CPU fault too.
*/
- fault.drm = nouveau_drm(drm_dev);
- ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vma, addr,
- addr + PAGE_SIZE, src, dst, &fault);
+ ret = migrate_vma(&nouveau_dmem_fault_migrate_ops, vmf->vma,
+ vmf->address, vmf->address + PAGE_SIZE,
+ src, dst, &fault);
if (ret)
return VM_FAULT_SIGBUS;
@@ -291,10 +286,9 @@ nouveau_dmem_fault(struct hmm_devmem *devmem,
return 0;
}
-static const struct hmm_devmem_ops
-nouveau_dmem_devmem_ops = {
- .free = nouveau_dmem_free,
- .fault = nouveau_dmem_fault,
+static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
+ .page_free = nouveau_dmem_page_free,
+ .migrate_to_ram = nouveau_dmem_migrate_to_ram,
};
static int
@@ -580,7 +574,8 @@ void
nouveau_dmem_init(struct nouveau_drm *drm)
{
struct device *device = drm->dev->dev;
- unsigned long i, size;
+ struct resource *res;
+ unsigned long i, size, pfn_first;
int ret;
/* This only make sense on PASCAL or newer */
@@ -590,6 +585,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
return;
+ drm->dmem->drm = drm;
mutex_init(&drm->dmem->mutex);
INIT_LIST_HEAD(&drm->dmem->chunk_free);
INIT_LIST_HEAD(&drm->dmem->chunk_full);
@@ -599,11 +595,8 @@ nouveau_dmem_init(struct nouveau_drm *drm)
/* Initialize migration dma helpers before registering memory */
ret = nouveau_dmem_migrate_init(drm);
- if (ret) {
- kfree(drm->dmem);
- drm->dmem = NULL;
- return;
- }
+ if (ret)
+ goto out_free;
/*
* FIXME we need some kind of policy to decide how much VRAM we
@@ -611,14 +604,16 @@ nouveau_dmem_init(struct nouveau_drm *drm)
* and latter if we want to do thing like over commit then we
* could revisit this.
*/
- drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops,
- device, size);
- if (IS_ERR(drm->dmem->devmem)) {
- kfree(drm->dmem);
- drm->dmem = NULL;
- return;
- }
-
+ res = devm_request_free_mem_region(device, &iomem_resource, size);
+ if (IS_ERR(res))
+ goto out_free;
+ drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ drm->dmem->pagemap.res = *res;
+ drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
+ if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
+ goto out_free;
+
+ pfn_first = res->start >> PAGE_SHIFT;
for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
struct nouveau_dmem_chunk *chunk;
struct page *page;
@@ -631,17 +626,19 @@ nouveau_dmem_init(struct nouveau_drm *drm)
}
chunk->drm = drm;
- chunk->pfn_first = drm->dmem->devmem->pfn_first;
- chunk->pfn_first += (i * DMEM_CHUNK_NPAGES);
+ chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES);
list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
page = pfn_to_page(chunk->pfn_first);
- for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page) {
- hmm_devmem_page_set_drvdata(page, (long)chunk);
- }
+ for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page)
+ page->zone_device_data = chunk;
}
NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
+ return;
+out_free:
+ kfree(drm->dmem);
+ drm->dmem = NULL;
}
static void
@@ -697,7 +694,7 @@ nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma,
if (!dpage || dst_pfns[i] == MIGRATE_PFN_ERROR)
continue;
- chunk = (void *)hmm_devmem_page_get_drvdata(dpage);
+ chunk = dpage->zone_device_data;
dst_addr = page_to_pfn(dpage) - chunk->pfn_first;
dst_addr = (dst_addr << PAGE_SHIFT) + chunk->bo->bo.offset;
@@ -832,13 +829,7 @@ out:
static inline bool
nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
{
- if (!is_device_private_page(page))
- return false;
-
- if (drm->dmem->devmem != page->pgmap->data)
- return false;
-
- return true;
+ return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
}
void
@@ -867,7 +858,7 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
continue;
}
- chunk = (void *)hmm_devmem_page_get_drvdata(page);
+ chunk = page->zone_device_data;
addr = page_to_pfn(page) - chunk->pfn_first;
addr = (addr + chunk->bo->bo.mem.start) << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 93ed43c413f0..8c92374afcf2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -649,7 +649,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
range.values = nouveau_svm_pfn_values;
range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
- ret = hmm_vma_fault(&range, true);
+ ret = hmm_vma_fault(&svmm->mirror, &range, true);
if (ret == 0) {
mutex_lock(&svmm->mutex);
if (!hmm_vma_range_done(&range)) {