summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nvkm
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2017-11-01 03:56:19 +1000
committerBen Skeggs <bskeggs@redhat.com>2017-11-02 13:32:22 +1000
commit19a82e492c3d71efe8763d50496a1701dfcf3f15 (patch)
treed66ab0ad5c2a3f4e6e7af75b88a6c52632e4c31a /drivers/gpu/drm/nouveau/nvkm
parent7f53d6dc9a728d54cc4e1d70259376e09675f924 (diff)
downloadlinux-19a82e492c3d71efe8763d50496a1701dfcf3f15.tar.bz2
drm/nouveau/core/memory: change map interface to support upcoming mmu changes
Map flags (access, kind, etc) are currently defined in either the VMA, or the memory object, which turns out to not be ideal for things like suballocated buffers, etc. These will become per-map flags instead, so we need to support passing these arguments in nvkm_memory_map(). Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c9
9 files changed, 75 insertions, 41 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
index 04e5b2136f0c..1264d5fc632b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c
@@ -42,6 +42,14 @@ nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data)
}
/* accessor functions for gpuobjs allocated directly from instmem */
+static int
+nvkm_gpuobj_heap_map(struct nvkm_gpuobj *gpuobj, u64 offset,
+ struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc)
+{
+ return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc);
+}
+
static u32
nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{
@@ -67,6 +75,7 @@ nvkm_gpuobj_heap_fast = {
.release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast,
+ .map = nvkm_gpuobj_heap_map,
};
static const struct nvkm_gpuobj_func
@@ -74,6 +83,7 @@ nvkm_gpuobj_heap_slow = {
.release = nvkm_gpuobj_heap_release,
.rd32 = nvkm_gpuobj_heap_rd32,
.wr32 = nvkm_gpuobj_heap_wr32,
+ .map = nvkm_gpuobj_heap_map,
};
static void *
@@ -90,9 +100,19 @@ nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj)
static const struct nvkm_gpuobj_func
nvkm_gpuobj_heap = {
.acquire = nvkm_gpuobj_heap_acquire,
+ .map = nvkm_gpuobj_heap_map,
};
/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */
+static int
+nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u64 offset,
+ struct nvkm_vmm *vmm, struct nvkm_vma *vma,
+ void *argv, u32 argc)
+{
+ return nvkm_memory_map(gpuobj->parent, gpuobj->node->offset + offset,
+ vmm, vma, argv, argc);
+}
+
static u32
nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset)
{
@@ -118,6 +138,7 @@ nvkm_gpuobj_fast = {
.release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32_fast,
.wr32 = nvkm_gpuobj_wr32_fast,
+ .map = nvkm_gpuobj_map,
};
static const struct nvkm_gpuobj_func
@@ -125,6 +146,7 @@ nvkm_gpuobj_slow = {
.release = nvkm_gpuobj_release,
.rd32 = nvkm_gpuobj_rd32,
.wr32 = nvkm_gpuobj_wr32,
+ .map = nvkm_gpuobj_map,
};
static void *
@@ -143,6 +165,7 @@ nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj)
static const struct nvkm_gpuobj_func
nvkm_gpuobj_func = {
.acquire = nvkm_gpuobj_acquire,
+ .map = nvkm_gpuobj_map,
};
static int
@@ -218,26 +241,6 @@ nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero,
return ret;
}
-int
-nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, struct nvkm_vm *vm,
- u32 access, struct nvkm_vma *vma)
-{
- struct nvkm_memory *memory = gpuobj->memory;
- int ret = nvkm_vm_get(vm, gpuobj->size, 12, access, vma);
- if (ret == 0)
- nvkm_memory_map(memory, vma, 0);
- return ret;
-}
-
-void
-nvkm_gpuobj_unmap(struct nvkm_vma *vma)
-{
- if (vma->node) {
- nvkm_vm_unmap(vma);
- nvkm_vm_put(vma);
- }
-}
-
/* the below is basically only here to support sharing the paged dma object
* for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
* anywhere else.
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index b1ab34f1a0e9..14b1a616d26a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -591,8 +591,7 @@ gf100_fifo_oneinit(struct nvkm_fifo *base)
if (ret)
return ret;
- nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
- return 0;
+ return nvkm_memory_map(fifo->user.mem, 0, bar, &fifo->user.bar, NULL, 0);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index cdadff59bb4c..9629416f4947 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -841,8 +841,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
if (ret)
return ret;
- nvkm_memory_map(fifo->user.mem, &fifo->user.bar, 0);
- return 0;
+ return nvkm_memory_map(fifo->user.mem, 0, bar, &fifo->user.bar, NULL, 0);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
index f9e0377d3d24..6343e0c84d96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
@@ -126,7 +126,11 @@ gf100_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
- nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+ struct nvkm_vma *vma = &chan->engn[engine->subdev.index].vma;
+ if (vma->vm) {
+ nvkm_vm_unmap(vma);
+ nvkm_vm_put(vma);
+ }
nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
}
@@ -146,8 +150,13 @@ gf100_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
if (ret)
return ret;
- return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
- NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+ ret = nvkm_vm_get(chan->vm, chan->engn[engn].inst->size, 12,
+ NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+ if (ret)
+ return ret;
+
+ return nvkm_memory_map(chan->engn[engn].inst, 0, chan->vm,
+ &chan->engn[engn].vma, NULL, 0);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 8abf6f8ef445..6e93b8652c66 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -138,7 +138,11 @@ gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
- nvkm_gpuobj_unmap(&chan->engn[engine->subdev.index].vma);
+ struct nvkm_vma *vma = &chan->engn[engine->subdev.index].vma;
+ if (vma->vm) {
+ nvkm_vm_unmap(vma);
+ nvkm_vm_put(vma);
+ }
nvkm_gpuobj_del(&chan->engn[engine->subdev.index].inst);
}
@@ -158,8 +162,13 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
if (ret)
return ret;
- return nvkm_gpuobj_map(chan->engn[engn].inst, chan->vm,
- NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+ ret = nvkm_vm_get(chan->vm, chan->engn[engn].inst->size, 12,
+ NV_MEM_ACCESS_RW, &chan->engn[engn].vma);
+ if (ret)
+ return ret;
+
+ return nvkm_memory_map(chan->engn[engn].inst, 0, chan->vm,
+ &chan->engn[engn].vma, NULL, 0);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 49c7d08b0ceb..0c4ca0fa48cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -403,7 +403,10 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
if (ret)
return ret;
- nvkm_memory_map(chan->mmio, &chan->mmio_vma, 0);
+ ret = nvkm_memory_map(chan->mmio, 0, fifoch->vm,
+ &chan->mmio_vma, NULL, 0);
+ if (ret)
+ return ret;
/* allocate buffers referenced by mmio list */
for (i = 0; data->size && i < ARRAY_SIZE(gr->mmio_data); i++) {
@@ -419,7 +422,11 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
if (ret)
return ret;
- nvkm_memory_map(chan->data[i].mem, &chan->data[i].vma, 0);
+ ret = nvkm_memory_map(chan->data[i].mem, 0, fifoch->vm,
+ &chan->data[i].vma, NULL, 0);
+ if (ret)
+ return ret;
+
data++;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index f2879c48d1e6..73a9511c3101 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -272,12 +272,13 @@ gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
node->vaddr[offset / 4] = data;
}
-static void
-gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+static int
+gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
{
struct gk20a_instobj *node = gk20a_instobj(memory);
-
- nvkm_vm_map_at(vma, offset, &node->mem);
+ nvkm_vm_map_at(vma, 0, &node->mem);
+ return 0;
}
static void *
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index bb524e3aa5d9..657257daa74d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -159,7 +159,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
}
if (ret == 0)
- nvkm_memory_map(memory, &bar, 0);
+ ret = nvkm_memory_map(memory, 0, vmm, &bar, NULL, 0);
mutex_lock(&subdev->mutex);
if (ret || iobj->bar.node) {
/* We either failed, or another thread beat us. */
@@ -179,11 +179,13 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
}
}
-static void
-nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
+static int
+nv50_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, void *argv, u32 argc)
{
struct nv50_instobj *iobj = nv50_instobj(memory);
nvkm_vm_map_at(vma, offset, iobj->mem);
+ return 0;
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
index 73ca1203281d..d35c041fdcec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -48,12 +48,16 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
return ret;
/* Map the HS firmware so the HS bootloader can see it */
- ret = nvkm_gpuobj_map(blob, gsb->vm, NV_MEM_ACCESS_RW, &vma);
+ ret = nvkm_vm_get(gsb->vm, blob->size, 12, NV_MEM_ACCESS_RW, &vma);
if (ret) {
nvkm_falcon_put(falcon, subdev);
return ret;
}
+ ret = nvkm_memory_map(blob, 0, gsb->vm, &vma, NULL, 0);
+ if (ret)
+ goto end;
+
/* Reset and set the falcon up */
ret = nvkm_falcon_reset(falcon);
if (ret)
@@ -91,7 +95,8 @@ end:
nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, true);
/* We don't need the ACR firmware anymore */
- nvkm_gpuobj_unmap(&vma);
+ nvkm_vm_unmap(&vma);
+ nvkm_vm_put(&vma);
nvkm_falcon_put(falcon, subdev);
return ret;