summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c343
1 files changed, 156 insertions, 187 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e4424b4db5d3..749420f1ea6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -126,19 +126,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
return 0;
}
-static void amdgpu_job_work_func(struct work_struct *work)
-{
- struct amdgpu_cs_parser *sched_job =
- container_of(work, struct amdgpu_cs_parser,
- job_work);
- mutex_lock(&sched_job->job_lock);
- if (sched_job->free_job)
- sched_job->free_job(sched_job);
- mutex_unlock(&sched_job->job_lock);
- /* after processing job, free memory */
- fence_put(&sched_job->s_fence->base);
- kfree(sched_job);
-}
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
struct drm_file *filp,
struct amdgpu_ctx *ctx,
@@ -157,10 +144,6 @@ struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
parser->ctx = ctx;
parser->ibs = ibs;
parser->num_ibs = num_ibs;
- if (amdgpu_enable_scheduler) {
- mutex_init(&parser->job_lock);
- INIT_WORK(&parser->job_work, amdgpu_job_work_func);
- }
for (i = 0; i < num_ibs; i++)
ibs[i].ctx = ctx;
@@ -171,56 +154,41 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
union drm_amdgpu_cs *cs = data;
uint64_t *chunk_array_user;
- uint64_t *chunk_array = NULL;
+ uint64_t *chunk_array;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct amdgpu_bo_list *bo_list = NULL;
unsigned size, i;
- int r = 0;
+ int ret;
- if (!cs->in.num_chunks)
- goto out;
+ if (cs->in.num_chunks == 0)
+ return 0;
+
+ chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
+ if (!chunk_array)
+ return -ENOMEM;
p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
if (!p->ctx) {
- r = -EINVAL;
- goto out;
- }
- bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
- if (!amdgpu_enable_scheduler)
- p->bo_list = bo_list;
- else {
- if (bo_list && !bo_list->has_userptr) {
- p->bo_list = amdgpu_bo_list_clone(bo_list);
- amdgpu_bo_list_put(bo_list);
- if (!p->bo_list)
- return -ENOMEM;
- } else if (bo_list && bo_list->has_userptr)
- p->bo_list = bo_list;
- else
- p->bo_list = NULL;
+ ret = -EINVAL;
+ goto free_chunk;
}
+ p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
+
/* get chunks */
INIT_LIST_HEAD(&p->validated);
- chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
- if (chunk_array == NULL) {
- r = -ENOMEM;
- goto out;
- }
-
chunk_array_user = (uint64_t __user *)(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
- r = -EFAULT;
- goto out;
+ ret = -EFAULT;
+ goto put_bo_list;
}
p->nchunks = cs->in.num_chunks;
p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
GFP_KERNEL);
- if (p->chunks == NULL) {
- r = -ENOMEM;
- goto out;
+ if (!p->chunks) {
+ ret = -ENOMEM;
+ goto put_bo_list;
}
for (i = 0; i < p->nchunks; i++) {
@@ -231,8 +199,9 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
chunk_ptr = (void __user *)chunk_array[i];
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_amdgpu_cs_chunk))) {
- r = -EFAULT;
- goto out;
+ ret = -EFAULT;
+ i--;
+ goto free_partial_kdata;
}
p->chunks[i].chunk_id = user_chunk.chunk_id;
p->chunks[i].length_dw = user_chunk.length_dw;
@@ -243,13 +212,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
if (p->chunks[i].kdata == NULL) {
- r = -ENOMEM;
- goto out;
+ ret = -ENOMEM;
+ i--;
+ goto free_partial_kdata;
}
size *= sizeof(uint32_t);
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
- r = -EFAULT;
- goto out;
+ ret = -EFAULT;
+ goto free_partial_kdata;
}
switch (p->chunks[i].chunk_id) {
@@ -269,15 +239,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
gobj = drm_gem_object_lookup(p->adev->ddev,
p->filp, handle);
if (gobj == NULL) {
- r = -EINVAL;
- goto out;
+ ret = -EINVAL;
+ goto free_partial_kdata;
}
p->uf.bo = gem_to_amdgpu_bo(gobj);
p->uf.offset = fence_data->offset;
} else {
- r = -EINVAL;
- goto out;
+ ret = -EINVAL;
+ goto free_partial_kdata;
}
break;
@@ -285,19 +255,35 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
break;
default:
- r = -EINVAL;
- goto out;
+ ret = -EINVAL;
+ goto free_partial_kdata;
}
}
- p->ibs = kmalloc_array(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
- if (!p->ibs)
- r = -ENOMEM;
+ p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
+ if (!p->ibs) {
+ ret = -ENOMEM;
+ goto free_all_kdata;
+ }
-out:
kfree(chunk_array);
- return r;
+ return 0;
+
+free_all_kdata:
+ i = p->nchunks - 1;
+free_partial_kdata:
+ for (; i >= 0; i--)
+ drm_free_large(p->chunks[i].kdata);
+ kfree(p->chunks);
+put_bo_list:
+ if (p->bo_list)
+ amdgpu_bo_list_put(p->bo_list);
+ amdgpu_ctx_put(p->ctx);
+free_chunk:
+ kfree(chunk_array);
+
+ return ret;
}
/* Returns how many bytes TTM can move per IB.
@@ -352,25 +338,17 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
return max(bytes_moved_threshold, 1024*1024ull);
}
-int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
+int amdgpu_cs_list_validate(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct list_head *validated)
{
- struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_device *adev = p->adev;
struct amdgpu_bo_list_entry *lobj;
- struct list_head duplicates;
struct amdgpu_bo *bo;
u64 bytes_moved = 0, initial_bytes_moved;
u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
int r;
- INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
- if (unlikely(r != 0)) {
- return r;
- }
-
- list_for_each_entry(lobj, &p->validated, tv.head) {
+ list_for_each_entry(lobj, validated, tv.head) {
bo = lobj->robj;
if (!bo->pin_count) {
u32 domain = lobj->prefered_domains;
@@ -385,7 +363,7 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
* into account. We don't want to disallow buffer moves
* completely.
*/
- if (current_domain != AMDGPU_GEM_DOMAIN_CPU &&
+ if ((lobj->allowed_domains & current_domain) != 0 &&
(domain & current_domain) == 0 && /* will be moved */
bytes_moved > bytes_moved_threshold) {
/* don't move it */
@@ -404,7 +382,6 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
domain = lobj->allowed_domains;
goto retry;
}
- ttm_eu_backoff_reservation(&p->ticket, &p->validated);
return r;
}
}
@@ -417,6 +394,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_cs_buckets buckets;
+ struct list_head duplicates;
bool need_mmap_lock = false;
int i, r;
@@ -436,8 +414,22 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
if (need_mmap_lock)
down_read(&current->mm->mmap_sem);
- r = amdgpu_cs_list_validate(p);
+ INIT_LIST_HEAD(&duplicates);
+ r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
+ if (unlikely(r != 0))
+ goto error_reserve;
+
+ r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
+ if (r)
+ goto error_validate;
+ r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates);
+
+error_validate:
+ if (r)
+ ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+
+error_reserve:
if (need_mmap_lock)
up_read(&current->mm->mmap_sem);
@@ -498,25 +490,24 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
unsigned i;
if (parser->ctx)
amdgpu_ctx_put(parser->ctx);
- if (parser->bo_list) {
- if (amdgpu_enable_scheduler && !parser->bo_list->has_userptr)
- amdgpu_bo_list_free(parser->bo_list);
- else
- amdgpu_bo_list_put(parser->bo_list);
- }
+ if (parser->bo_list)
+ amdgpu_bo_list_put(parser->bo_list);
+
drm_free_large(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
- if (parser->ibs)
- for (i = 0; i < parser->num_ibs; i++)
- amdgpu_ib_free(parser->adev, &parser->ibs[i]);
- kfree(parser->ibs);
- if (parser->uf.bo)
- drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
-
if (!amdgpu_enable_scheduler)
- kfree(parser);
+ {
+ if (parser->ibs)
+ for (i = 0; i < parser->num_ibs; i++)
+ amdgpu_ib_free(parser->adev, &parser->ibs[i]);
+ kfree(parser->ibs);
+ if (parser->uf.bo)
+ drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
+ }
+
+ kfree(parser);
}
/**
@@ -533,12 +524,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
amdgpu_cs_parser_fini_late(parser);
}
-static int amdgpu_cs_parser_free_job(struct amdgpu_cs_parser *sched_job)
-{
- amdgpu_cs_parser_fini_late(sched_job);
- return 0;
-}
-
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
struct amdgpu_vm *vm)
{
@@ -810,68 +795,16 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
return 0;
}
-static int amdgpu_cs_parser_prepare_job(struct amdgpu_cs_parser *sched_job)
+static int amdgpu_cs_free_job(struct amdgpu_job *job)
{
- int r, i;
- struct amdgpu_cs_parser *parser = sched_job;
- struct amdgpu_device *adev = sched_job->adev;
- bool reserved_buffers = false;
-
- r = amdgpu_cs_parser_relocs(parser);
- if (r) {
- if (r != -ERESTARTSYS) {
- if (r == -ENOMEM)
- DRM_ERROR("Not enough memory for command submission!\n");
- else
- DRM_ERROR("Failed to process the buffer list %d!\n", r);
- }
- }
-
- if (!r) {
- reserved_buffers = true;
- r = amdgpu_cs_ib_fill(adev, parser);
- }
- if (!r) {
- r = amdgpu_cs_dependencies(adev, parser);
- if (r)
- DRM_ERROR("Failed in the dependencies handling %d!\n", r);
- }
- if (r) {
- amdgpu_cs_parser_fini(parser, r, reserved_buffers);
- return r;
- }
-
- for (i = 0; i < parser->num_ibs; i++)
- trace_amdgpu_cs(parser, i);
-
- r = amdgpu_cs_ib_vm_chunk(adev, parser);
- return r;
-}
-
-static struct amdgpu_ring *amdgpu_cs_parser_get_ring(
- struct amdgpu_device *adev,
- struct amdgpu_cs_parser *parser)
-{
- int i, r;
-
- struct amdgpu_cs_chunk *chunk;
- struct drm_amdgpu_cs_chunk_ib *chunk_ib;
- struct amdgpu_ring *ring;
- for (i = 0; i < parser->nchunks; i++) {
- chunk = &parser->chunks[i];
- chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
-
- if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
- continue;
-
- r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
- chunk_ib->ip_instance, chunk_ib->ring,
- &ring);
- if (r)
- return NULL;
- break;
- }
- return ring;
+ int i;
+ if (job->ibs)
+ for (i = 0; i < job->num_ibs; i++)
+ amdgpu_ib_free(job->adev, &job->ibs[i]);
+ kfree(job->ibs);
+ if (job->uf.bo)
+ drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
+ return 0;
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
@@ -879,7 +812,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct amdgpu_device *adev = dev->dev_private;
union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser *parser;
- int r;
+ bool reserved_buffers = false;
+ int i, r;
down_read(&adev->exclusive_lock);
if (!adev->accel_working) {
@@ -893,50 +827,85 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
- amdgpu_cs_parser_fini(parser, r, false);
+ kfree(parser);
up_read(&adev->exclusive_lock);
r = amdgpu_cs_handle_lockup(adev, r);
return r;
}
- if (amdgpu_enable_scheduler && parser->num_ibs) {
- struct amdgpu_ring * ring =
- amdgpu_cs_parser_get_ring(adev, parser);
- r = amdgpu_cs_parser_prepare_job(parser);
+ r = amdgpu_cs_parser_relocs(parser);
+ if (r == -ENOMEM)
+ DRM_ERROR("Not enough memory for command submission!\n");
+ else if (r && r != -ERESTARTSYS)
+ DRM_ERROR("Failed to process the buffer list %d!\n", r);
+ else if (!r) {
+ reserved_buffers = true;
+ r = amdgpu_cs_ib_fill(adev, parser);
+ }
+
+ if (!r) {
+ r = amdgpu_cs_dependencies(adev, parser);
if (r)
- goto out;
- parser->ring = ring;
- parser->free_job = amdgpu_cs_parser_free_job;
- mutex_lock(&parser->job_lock);
- r = amd_sched_push_job(ring->scheduler,
- &parser->ctx->rings[ring->idx].entity,
- parser,
- &parser->s_fence);
+ DRM_ERROR("Failed in the dependencies handling %d!\n", r);
+ }
+
+ if (r)
+ goto out;
+
+ for (i = 0; i < parser->num_ibs; i++)
+ trace_amdgpu_cs(parser, i);
+
+ r = amdgpu_cs_ib_vm_chunk(adev, parser);
+ if (r)
+ goto out;
+
+ if (amdgpu_enable_scheduler && parser->num_ibs) {
+ struct amdgpu_job *job;
+ struct amdgpu_ring * ring = parser->ibs->ring;
+ job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+ job->base.sched = &ring->sched;
+ job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
+ job->adev = parser->adev;
+ job->ibs = parser->ibs;
+ job->num_ibs = parser->num_ibs;
+ job->base.owner = parser->filp;
+ mutex_init(&job->job_lock);
+ if (job->ibs[job->num_ibs - 1].user) {
+ memcpy(&job->uf, &parser->uf,
+ sizeof(struct amdgpu_user_fence));
+ job->ibs[job->num_ibs - 1].user = &job->uf;
+ }
+
+ job->free_job = amdgpu_cs_free_job;
+ mutex_lock(&job->job_lock);
+ r = amd_sched_entity_push_job(&job->base);
if (r) {
- mutex_unlock(&parser->job_lock);
+ mutex_unlock(&job->job_lock);
+ amdgpu_cs_free_job(job);
+ kfree(job);
goto out;
}
- parser->ibs[parser->num_ibs - 1].sequence =
+ cs->out.handle =
amdgpu_ctx_add_fence(parser->ctx, ring,
- &parser->s_fence->base,
- parser->s_fence->v_seq);
- cs->out.handle = parser->s_fence->v_seq;
+ &job->base.s_fence->base);
+ parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
+
list_sort(NULL, &parser->validated, cmp_size_smaller_first);
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
- &parser->s_fence->base);
+ &job->base.s_fence->base);
- mutex_unlock(&parser->job_lock);
+ mutex_unlock(&job->job_lock);
+ amdgpu_cs_parser_fini_late(parser);
up_read(&adev->exclusive_lock);
return 0;
}
- r = amdgpu_cs_parser_prepare_job(parser);
- if (r)
- goto out;
cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
out:
- amdgpu_cs_parser_fini(parser, r, true);
+ amdgpu_cs_parser_fini(parser, r, reserved_buffers);
up_read(&adev->exclusive_lock);
r = amdgpu_cs_handle_lockup(adev, r);
return r;