From e686941a32d31d22ce7c8b7faf9cce17816f7c4d Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Mon, 7 Mar 2016 12:49:55 +0800 Subject: drm/amdgpu: use sched_job_init to initialize sched_job Consolidate job initialization in one place rather than duplicating it in multiple places. Signed-off-by: Monk Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 9c9b19e2f353..eb0f7890401a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -87,16 +87,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, struct fence **f) { + struct fence *fence; + int r; job->ring = ring; - job->base.sched = &ring->sched; - job->base.s_entity = entity; - job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); - if (!job->base.s_fence) - return -ENOMEM; - *f = fence_get(&job->base.s_fence->base); + if (!f) + return -EINVAL; + + r = amd_sched_job_init(&job->base, &ring->sched, entity, owner, &fence); + if (r) + return r; job->owner = owner; + *f = fence_get(fence); amd_sched_entity_push_job(&job->base); return 0; -- cgit v1.2.3 From e472d2588eef38c2f16f71d6160e58fb5948e84f Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Thu, 3 Mar 2016 19:00:50 +0800 Subject: drm/amdgpu: delay job free to when it's finished (v2) for those jobs submitted through scheduler, do not free it immediately after scheduled, instead free it in global workqueue by its sched fence signaling callback function. v2: call uf's bo_undef after job_run() call job's sync free after job_run() no static inline __amdgpu_job_free() anymore, just use kfree(job) to replace it. Signed-off-by: Monk Liu Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 11 ++++++++++- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 8 ++++++++ drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 5 ++++- 4 files changed, 22 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 412fc2f39fa5..9bf72b24495c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -2401,5 +2401,4 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo); #include "amdgpu_object.h" - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index eb0f7890401a..23468088a995 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -28,6 +28,12 @@ #include "amdgpu.h" #include "amdgpu_trace.h" +static void amdgpu_job_free_handler(struct work_struct *ws) +{ + struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job); + kfree(job); +} + int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, struct amdgpu_job **job) { @@ -45,6 +51,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, (*job)->adev = adev; (*job)->ibs = (void *)&(*job)[1]; (*job)->num_ibs = num_ibs; + INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler); amdgpu_sync_create(&(*job)->sync); @@ -80,7 +87,9 @@ void amdgpu_job_free(struct amdgpu_job *job) amdgpu_bo_unref(&job->uf.bo); amdgpu_sync_free(&job->sync); - kfree(job); + + if (!job->base.use_sched) + kfree(job); } int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index b9d5822bece8..8d49ea2e4134 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -319,6 +319,11 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) return added; } +static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) { + struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job); + schedule_work(&job->work_free_job); +} + /** * Submit a job to the job queue * @@ -330,6 +335,9 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) { struct amd_sched_entity *entity = sched_job->s_entity; + sched_job->use_sched = 1; + fence_add_callback(&sched_job->s_fence->base, + &sched_job->cb_free_job, amd_sched_free_job); trace_amd_sched_job(sched_job); wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 74bbec837f58..ee1e8127f863 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -37,7 +37,7 @@ extern atomic_t sched_fence_slab_ref; /** * A scheduler entity is a wrapper around a job queue or a group - * of other entities. Entities take turns emitting jobs from their + * of other entities. Entities take turns emitting jobs from their * job queues to corresponding hardware ring based on scheduling * policy. */ @@ -82,6 +82,9 @@ struct amd_sched_job { struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; + bool use_sched; /* true if the job goes to scheduler */ + struct fence_cb cb_free_job; + struct work_struct work_free_job; }; extern const struct fence_ops amd_sched_fence_ops; -- cgit v1.2.3 From 0de2479c953ae07fd11e7b1bc8d4fc831e6842bb Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Fri, 4 Mar 2016 18:51:02 +0800 Subject: drm/amdgpu: rework TDR in scheduler (v2) Add two callbacks to scheduler to maintain jobs, and invoked for job timeout calculations. Now TDR measures time gap from job is processed by hw. v2: fix typo Signed-off-by: Monk Liu Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 16 +++++++++++- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 37 +++++++++++++++++++++++++++ drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 7 +++++ drivers/gpu/drm/amd/scheduler/sched_fence.c | 1 + 6 files changed, 62 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9bf72b24495c..ccb28468ece8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -754,6 +754,7 @@ void amdgpu_job_free(struct amdgpu_job *job); int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, struct fence **f); +void amdgpu_job_timeout_func(struct work_struct *work); struct amdgpu_ring { struct amdgpu_device *adev; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 23266b454aec..9025671d21c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -871,6 +871,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, r = amd_sched_job_init(&job->base, &ring->sched, &p->ctx->rings[ring->idx].entity, + amdgpu_job_timeout_func, p->filp, &fence); if (r) { amdgpu_job_free(job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 23468088a995..961cae4a1955 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -34,6 +34,15 @@ static void amdgpu_job_free_handler(struct work_struct *ws) kfree(job); } +void amdgpu_job_timeout_func(struct work_struct *work) +{ + struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work); + DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n", + job->base.sched->name, + (uint32_t)atomic_read(&job->ring->fence_drv.last_seq), + job->ring->fence_drv.sync_seq); +} + int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, struct amdgpu_job **job) { @@ -103,7 +112,10 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, if (!f) return -EINVAL; - r = amd_sched_job_init(&job->base, &ring->sched, entity, owner, &fence); + r = amd_sched_job_init(&job->base, &ring->sched, + entity, owner, + amdgpu_job_timeout_func, + &fence); if (r) return r; @@ -180,4 +192,6 @@ err: struct amd_sched_backend_ops amdgpu_sched_ops = { .dependency = amdgpu_job_dependency, .run_job = amdgpu_job_run, + .begin_job = amd_sched_job_begin, + .finish_job = amd_sched_job_finish, }; diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 9a9fffdc272b..b7e8071448c6 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -324,6 +324,40 @@ static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) { schedule_work(&job->work_free_job); } +/* job_finish is called after hw fence signaled, and + * the job had already been deleted from ring_mirror_list + */ +void amd_sched_job_finish(struct amd_sched_job *s_job) +{ + struct amd_sched_job *next; + struct amd_gpu_scheduler *sched = s_job->sched; + + if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { + cancel_delayed_work(&s_job->work_tdr); /*TODO: how to deal the case that tdr is running */ + + /* queue TDR for next job */ + next = list_first_entry_or_null(&sched->ring_mirror_list, + struct amd_sched_job, node); + + if (next) { + INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback); + schedule_delayed_work(&next->work_tdr, sched->timeout); + } + } +} + +void amd_sched_job_begin(struct amd_sched_job *s_job) +{ + struct amd_gpu_scheduler *sched = s_job->sched; + + if (sched->timeout != MAX_SCHEDULE_TIMEOUT && + list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job) + { + INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback); + schedule_delayed_work(&s_job->work_tdr, sched->timeout); + } +} + /** * Submit a job to the job queue * @@ -347,6 +381,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) int amd_sched_job_init(struct amd_sched_job *job, struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity, + void (*timeout_cb)(struct work_struct *work), void *owner, struct fence **fence) { INIT_LIST_HEAD(&job->node); @@ -357,6 +392,7 @@ int amd_sched_job_init(struct amd_sched_job *job, return -ENOMEM; job->s_fence->s_job = job; + job->timeout_callback = timeout_cb; if (fence) *fence = &job->s_fence->base; @@ -415,6 +451,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) /* remove job from ring_mirror_list */ spin_lock_irqsave(&sched->job_list_lock, flags); list_del_init(&s_fence->s_job->node); + sched->ops->finish_job(s_fence->s_job); spin_unlock_irqrestore(&sched->job_list_lock, flags); amd_sched_fence_signal(s_fence); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index b26148d24a3d..a5700aded5bf 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -85,6 +85,8 @@ struct amd_sched_job { struct fence_cb cb_free_job; struct work_struct work_free_job; struct list_head node; + struct delayed_work work_tdr; + void (*timeout_callback) (struct work_struct *work); }; extern const struct fence_ops amd_sched_fence_ops; @@ -105,6 +107,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) struct amd_sched_backend_ops { struct fence *(*dependency)(struct amd_sched_job *sched_job); struct fence *(*run_job)(struct amd_sched_job *sched_job); + void (*begin_job)(struct amd_sched_job *sched_job); + void (*finish_job)(struct amd_sched_job *sched_job); }; enum amd_sched_priority { @@ -150,7 +154,10 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence); int amd_sched_job_init(struct amd_sched_job *job, struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity, + void (*timeout_cb)(struct work_struct *work), void *owner, struct fence **fence); void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched , struct amd_sched_job *s_job); +void amd_sched_job_finish(struct amd_sched_job *s_job); +void amd_sched_job_begin(struct amd_sched_job *s_job); #endif diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 33ddd38185d5..2a732c490375 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -63,6 +63,7 @@ void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched , unsigned long flags; spin_lock_irqsave(&sched->job_list_lock, flags); list_add_tail(&s_job->node, &sched->ring_mirror_list); + sched->ops->begin_job(s_job); spin_unlock_irqrestore(&sched->job_list_lock, flags); } -- cgit v1.2.3 From b6723c8da55af5309cf06e71a5228f3c02846c5a Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Thu, 10 Mar 2016 12:14:44 +0800 Subject: drm/amdgpu: use ref to keep job alive this is to fix fatal page fault error that occured if: job is signaled/released after its timeout work is already put to the global queue (in this case the cancel_delayed_work will return false), which will lead to NX-protection error page fault during job_timeout_func. Signed-off-by: Monk Liu Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 15 ++++++++++++--- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 8 +++++++- drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 13 +++++++++++++ 5 files changed, 35 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ccb28468ece8..2ac07ebecfd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -750,7 +750,9 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, struct amdgpu_job **job); int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, struct amdgpu_job **job); + void amdgpu_job_free(struct amdgpu_job *job); +void amdgpu_job_free_func(struct kref *refcount); int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, struct fence **f); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9025671d21c3..d7e0b0b9a1bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -872,6 +872,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, r = amd_sched_job_init(&job->base, &ring->sched, &p->ctx->rings[ring->idx].entity, amdgpu_job_timeout_func, + amdgpu_job_free_func, p->filp, &fence); if (r) { amdgpu_job_free(job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 961cae4a1955..a052ac2b131d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -31,7 +31,7 @@ static void amdgpu_job_free_handler(struct work_struct *ws) { struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job); - kfree(job); + amd_sched_job_put(&job->base); } void amdgpu_job_timeout_func(struct work_struct *work) @@ -41,6 +41,8 @@ void amdgpu_job_timeout_func(struct work_struct *work) job->base.sched->name, (uint32_t)atomic_read(&job->ring->fence_drv.last_seq), job->ring->fence_drv.sync_seq); + + amd_sched_job_put(&job->base); } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, @@ -101,6 +103,12 @@ void amdgpu_job_free(struct amdgpu_job *job) kfree(job); } +void amdgpu_job_free_func(struct kref *refcount) +{ + struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount); + kfree(job); +} + int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, struct fence **f) @@ -113,9 +121,10 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, return -EINVAL; r = amd_sched_job_init(&job->base, &ring->sched, - entity, owner, + entity, amdgpu_job_timeout_func, - &fence); + amdgpu_job_free_func, + owner, &fence); if (r) return r; diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index b7e8071448c6..639c70de217c 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -333,7 +333,8 @@ void amd_sched_job_finish(struct amd_sched_job *s_job) struct amd_gpu_scheduler *sched = s_job->sched; if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { - cancel_delayed_work(&s_job->work_tdr); /*TODO: how to deal the case that tdr is running */ + if (cancel_delayed_work(&s_job->work_tdr)) + amd_sched_job_put(s_job); /* queue TDR for next job */ next = list_first_entry_or_null(&sched->ring_mirror_list, @@ -341,6 +342,7 @@ void amd_sched_job_finish(struct amd_sched_job *s_job) if (next) { INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback); + amd_sched_job_get(next); schedule_delayed_work(&next->work_tdr, sched->timeout); } } @@ -354,6 +356,7 @@ void amd_sched_job_begin(struct amd_sched_job *s_job) list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job) { INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback); + amd_sched_job_get(s_job); schedule_delayed_work(&s_job->work_tdr, sched->timeout); } } @@ -382,9 +385,11 @@ int amd_sched_job_init(struct amd_sched_job *job, struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity, void (*timeout_cb)(struct work_struct *work), + void (*free_cb)(struct kref *refcount), void *owner, struct fence **fence) { INIT_LIST_HEAD(&job->node); + kref_init(&job->refcount); job->sched = sched; job->s_entity = entity; job->s_fence = amd_sched_fence_create(entity, owner); @@ -393,6 +398,7 @@ int amd_sched_job_init(struct amd_sched_job *job, job->s_fence->s_job = job; job->timeout_callback = timeout_cb; + job->free_callback = free_cb; if (fence) *fence = &job->s_fence->base; diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index a5700aded5bf..95ebfd069690 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -78,6 +78,7 @@ struct amd_sched_fence { }; struct amd_sched_job { + struct kref refcount; struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; @@ -87,6 +88,7 @@ struct amd_sched_job { struct list_head node; struct delayed_work work_tdr; void (*timeout_callback) (struct work_struct *work); + void (*free_callback)(struct kref *refcount); }; extern const struct fence_ops amd_sched_fence_ops; @@ -155,9 +157,20 @@ int amd_sched_job_init(struct amd_sched_job *job, struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity, void (*timeout_cb)(struct work_struct *work), + void (*free_cb)(struct kref* refcount), void *owner, struct fence **fence); void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched , struct amd_sched_job *s_job); void amd_sched_job_finish(struct amd_sched_job *s_job); void amd_sched_job_begin(struct amd_sched_job *s_job); +static inline void amd_sched_job_get(struct amd_sched_job *job) { + if (job) + kref_get(&job->refcount); +} + +static inline void amd_sched_job_put(struct amd_sched_job *job) { + if (job) + kref_put(&job->refcount, job->free_callback); +} + #endif -- cgit v1.2.3 From 62250a910a4090f88b729e04baf4369d78ba5bdc Mon Sep 17 00:00:00 2001 From: Nils Wallménius Date: Sun, 10 Apr 2016 16:30:00 +0200 Subject: drm/amd/scheduler: Mark amdgpu_sched_ops const MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This marks the struct amdgpu_sched_ops const and adjusts amd_sched_init to take a const pointer for the ops param. The ops member of struct amd_gpu_scheduler is also changed to const. Reviewed-by: Christian König Signed-off-by: Nils Wallménius Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 2 +- drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5d05b5d67bbd..660213a1682a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -748,7 +748,7 @@ enum amdgpu_ring_type { AMDGPU_RING_TYPE_VCE }; -extern struct amd_sched_backend_ops amdgpu_sched_ops; +extern const struct amd_sched_backend_ops amdgpu_sched_ops; int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, struct amdgpu_job **job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index a052ac2b131d..4eea2a18d8bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -198,7 +198,7 @@ err: return fence; } -struct amd_sched_backend_ops amdgpu_sched_ops = { +const struct amd_sched_backend_ops amdgpu_sched_ops = { .dependency = amdgpu_job_dependency, .run_job = amdgpu_job_run, .begin_job = amd_sched_job_begin, diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 639c70de217c..c16248cee779 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -530,7 +530,7 @@ static int amd_sched_main(void *param) * Return 0 on success, otherwise error code. */ int amd_sched_init(struct amd_gpu_scheduler *sched, - struct amd_sched_backend_ops *ops, + const struct amd_sched_backend_ops *ops, unsigned hw_submission, long timeout, const char *name) { int i; diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 95ebfd069690..169f70fe949c 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -123,7 +123,7 @@ enum amd_sched_priority { * One scheduler is implemented for each hardware ring */ struct amd_gpu_scheduler { - struct amd_sched_backend_ops *ops; + const struct amd_sched_backend_ops *ops; uint32_t hw_submission_limit; long timeout; const char *name; @@ -137,7 +137,7 @@ struct amd_gpu_scheduler { }; int amd_sched_init(struct amd_gpu_scheduler *sched, - struct amd_sched_backend_ops *ops, + const struct amd_sched_backend_ops *ops, uint32_t hw_submission, long timeout, const char *name); void amd_sched_fini(struct amd_gpu_scheduler *sched); -- cgit v1.2.3 From c5637837ba5d5b5e962e73f5a1a7c5456fa85a68 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 19 Apr 2016 20:11:32 +0800 Subject: drm/amdgpu: keep vm in job instead of ib (v2) ib.vm is a legacy way to get vm, after scheduler implemented vm should be get from job, and all ibs from one job share the same vm, no need to keep ib.vm just move vm field to job. this patch as well add job as paramter to ib_schedule so it can get vm from job->vm. v2: agd: sqaush in: drm/amdgpu: check if ring emit_vm_flush exists in vm flush No vm flush on engines that don't support VM. bug: https://bugs.freedesktop.org/show_bug.cgi?id=95195 Signed-off-by: Monk Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 16 ++++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 9 +++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 ++- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 2 +- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 2 +- 12 files changed, 25 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e72cf4518c30..959008ad65a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -740,7 +740,6 @@ struct amdgpu_ib { uint64_t gpu_addr; uint32_t *ptr; struct amdgpu_user_fence *user; - struct amdgpu_vm *vm; unsigned vm_id; uint64_t vm_pd_addr; struct amdgpu_ctx *ctx; @@ -763,7 +762,7 @@ enum amdgpu_ring_type { extern const struct amd_sched_backend_ops amdgpu_sched_ops; int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_job **job); + struct amdgpu_job **job, struct amdgpu_vm *vm); int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, struct amdgpu_job **job); @@ -1191,7 +1190,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f); int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, struct amdgpu_ib *ib, struct fence *last_vm_update, - struct fence **f); + struct amdgpu_job *job, struct fence **f); int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); @@ -1247,6 +1246,7 @@ struct amdgpu_cs_parser { struct amdgpu_job { struct amd_sched_job base; struct amdgpu_device *adev; + struct amdgpu_vm *vm; struct amdgpu_ring *ring; struct amdgpu_sync sync; struct amdgpu_ib *ibs; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 2ebba295d0e4..1a065961981a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -120,6 +120,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; + struct amdgpu_vm *vm = &fpriv->vm; union drm_amdgpu_cs *cs = data; uint64_t *chunk_array_user; uint64_t *chunk_array; @@ -214,7 +215,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) } } - ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job); + ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm); if (ret) goto free_all_kdata; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 0129617a7962..0ed643036361 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -74,7 +74,6 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); } - ib->vm = vm; ib->vm_id = 0; return 0; @@ -117,13 +116,13 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fen */ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, struct amdgpu_ib *ibs, struct fence *last_vm_update, - struct fence **f) + struct amdgpu_job *job, struct fence **f) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; struct amdgpu_ctx *ctx, *old_ctx; - struct amdgpu_vm *vm; struct fence *hwf; + struct amdgpu_vm *vm = NULL; unsigned i, patch_offset = ~0; int r = 0; @@ -132,7 +131,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, return -EINVAL; ctx = ibs->ctx; - vm = ibs->vm; + if (job) /* for domain0 job like ring test, ibs->job is not assigned */ + vm = job->vm; if (!ring->ready) { dev_err(adev->dev, "couldn't schedule ib\n"); @@ -174,14 +174,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, old_ctx = ring->current_ctx; for (i = 0; i < num_ibs; ++i) { ib = &ibs[i]; - - if (ib->ctx != ctx || ib->vm != vm) { - ring->current_ctx = old_ctx; - if (ib->vm_id) - amdgpu_vm_reset_id(adev, ib->vm_id); - amdgpu_ring_undo(ring); - return -EINVAL; - } amdgpu_ring_emit_ib(ring, ib); ring->current_ctx = ctx; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 4eea2a18d8bb..917c6f3bfa09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -46,7 +46,7 @@ void amdgpu_job_timeout_func(struct work_struct *work) } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_job **job) + struct amdgpu_job **job, struct amdgpu_vm *vm) { size_t size = sizeof(struct amdgpu_job); @@ -60,6 +60,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, return -ENOMEM; (*job)->adev = adev; + (*job)->vm = vm; (*job)->ibs = (void *)&(*job)[1]; (*job)->num_ibs = num_ibs; INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler); @@ -74,7 +75,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, { int r; - r = amdgpu_job_alloc(adev, 1, job); + r = amdgpu_job_alloc(adev, 1, job, NULL); if (r) return r; @@ -138,7 +139,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) { struct amdgpu_job *job = to_amdgpu_job(sched_job); - struct amdgpu_vm *vm = job->ibs->vm; + struct amdgpu_vm *vm = job->vm; struct fence *fence = amdgpu_sync_get_fence(&job->sync); @@ -186,7 +187,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) trace_amdgpu_sched_run_job(job); r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, - job->sync.last_vm_update, &fence); + job->sync.last_vm_update, job, &fence); if (r) { DRM_ERROR("Error scheduling IBs (%d)\n", r); goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index abda242980ba..3f953759002f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -910,7 +910,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); job->fence = f; if (r) goto err_free; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 22a4d96fedb7..79ba2aae0d7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -436,7 +436,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); job->fence = f; if (r) goto err; @@ -498,7 +498,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[i] = 0x0; if (direct) { - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); job->fence = f; if (r) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2c3d9557e1a2..692d0d02b644 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -303,7 +303,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed)) amdgpu_ring_emit_pipeline_sync(ring); - if (pd_addr != AMDGPU_VM_NO_FLUSH) { + if (ring->funcs->emit_vm_flush && + pd_addr != AMDGPU_VM_NO_FLUSH) { struct fence *fence; trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id); diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index b7ed9d376001..8d69c6555e02 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -643,7 +643,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[3] = 1; ib.ptr[4] = 0xDEADBEEF; ib.length_dw = 5; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); if (r) goto err1; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 6686c9c3005d..03108909a275 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2136,7 +2136,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); if (r) goto err2; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 021c17e50d51..a82945f3a5d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -800,7 +800,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); if (r) goto err2; @@ -1551,7 +1551,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); /* shedule the ib on the ring */ - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); if (r) { DRM_ERROR("amdgpu: ib submit failed (%d).\n", r); goto fail; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index e6d3544fda06..27ca46d16bc4 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -701,7 +701,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); if (r) goto err1; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 00b43700c956..278b1fe35385 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -925,7 +925,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); if (r) goto err1; -- cgit v1.2.3 From 92f250989b7098f4b52d50183a7b2fc4e010731b Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 6 May 2016 15:57:42 +0200 Subject: drm/amdgpu: move the context from the IBs into the job MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We only have one context for all IBs. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 +++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 22 +++++++++++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 7 +++---- 4 files changed, 24 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index db87edc72936..9b55ad351602 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -743,7 +743,6 @@ struct amdgpu_ib { struct amdgpu_user_fence *user; unsigned vm_id; uint64_t vm_pd_addr; - uint64_t ctx; uint32_t gds_base, gds_size; uint32_t gws_base, gws_size; uint32_t oa_base, oa_size; @@ -1262,6 +1261,7 @@ struct amdgpu_job { struct fence *fence; /* the hw fence */ uint32_t num_ibs; void *owner; + uint64_t ctx; struct amdgpu_user_fence uf; }; #define to_amdgpu_job(sched_job) \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 87ec1136a0bb..2895d63c9979 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -741,7 +741,6 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ib->length_dw = chunk_ib->ib_bytes / 4; ib->flags = chunk_ib->flags; - ib->ctx = parser->ctx->rings[ring->idx].entity.fence_context; j++; } @@ -840,6 +839,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) { struct amdgpu_ring *ring = p->job->ring; + struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; struct fence *fence; struct amdgpu_job *job; int r; @@ -848,16 +848,16 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, p->job = NULL; r = amd_sched_job_init(&job->base, &ring->sched, - &p->ctx->rings[ring->idx].entity, - amdgpu_job_timeout_func, - amdgpu_job_free_func, - p->filp, &fence); + entity, amdgpu_job_timeout_func, + amdgpu_job_free_func, + p->filp, &fence); if (r) { amdgpu_job_free(job); return r; } job->owner = p->filp; + job->ctx = entity->fence_context; p->fence = fence_get(fence); cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence); job->ibs[job->num_ibs - 1].sequence = cs->out.handle; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 88b8fda7340f..dacbd2e32072 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -121,18 +121,26 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; - struct fence *hwf; - struct amdgpu_vm *vm = NULL; - unsigned i, patch_offset = ~0; bool skip_preamble, need_ctx_switch; + unsigned patch_offset = ~0; + struct amdgpu_vm *vm; + struct fence *hwf; + uint64_t ctx; + unsigned i; int r = 0; if (num_ibs == 0) return -EINVAL; - if (job) /* for domain0 job like ring test, ibs->job is not assigned */ + /* ring tests don't use a job */ + if (job) { vm = job->vm; + ctx = job->ctx; + } else { + vm = NULL; + ctx = 0; + } if (!ring->ready) { dev_err(adev->dev, "couldn't schedule ib\n"); @@ -170,8 +178,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, /* always set cond_exec_polling to CONTINUE */ *ring->cond_exe_cpu_addr = 1; - skip_preamble = ring->current_ctx == ib->ctx; - need_ctx_switch = ring->current_ctx != ib->ctx; + skip_preamble = ring->current_ctx == ctx; + need_ctx_switch = ring->current_ctx != ctx; for (i = 0; i < num_ibs; ++i) { ib = &ibs[i]; @@ -209,7 +217,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (patch_offset != ~0 && ring->funcs->patch_cond_exec) amdgpu_ring_patch_cond_exec(ring, patch_offset); - ring->current_ctx = ibs->ctx; + ring->current_ctx = ctx; amdgpu_ring_commit(ring); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 917c6f3bfa09..a0961f2a93d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -122,14 +122,13 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, return -EINVAL; r = amd_sched_job_init(&job->base, &ring->sched, - entity, - amdgpu_job_timeout_func, - amdgpu_job_free_func, - owner, &fence); + entity, amdgpu_job_timeout_func, + amdgpu_job_free_func, owner, &fence); if (r) return r; job->owner = owner; + job->ctx = entity->fence_context; *f = fence_get(fence); amd_sched_entity_push_job(&job->base); -- cgit v1.2.3 From d88bf583bd06eecb31f82871c90ef6a5a09b5766 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 6 May 2016 17:50:03 +0200 Subject: drm/amdgpu: move VM fields into job MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit They are the same for all IBs. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 15 +++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 36 +++++++++++++++------------------ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 19 +++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 12 ++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h | 3 ++- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 10 +++++---- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 10 +++++---- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 3 ++- drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 3 ++- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 3 ++- 14 files changed, 66 insertions(+), 66 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9b55ad351602..d4c1eb7816f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -283,7 +283,8 @@ struct amdgpu_ring_funcs { int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); /* command emit functions */ void (*emit_ib)(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, bool ctx_switch); + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch); void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq, unsigned flags); void (*emit_pipeline_sync)(struct amdgpu_ring *ring); @@ -741,11 +742,6 @@ struct amdgpu_ib { uint64_t gpu_addr; uint32_t *ptr; struct amdgpu_user_fence *user; - unsigned vm_id; - uint64_t vm_pd_addr; - uint32_t gds_base, gds_size; - uint32_t gws_base, gws_size; - uint32_t oa_base, oa_size; uint32_t flags; /* resulting sequence number */ uint64_t sequence; @@ -1262,6 +1258,11 @@ struct amdgpu_job { uint32_t num_ibs; void *owner; uint64_t ctx; + unsigned vm_id; + uint64_t vm_pd_addr; + uint32_t gds_base, gds_size; + uint32_t gws_base, gws_size; + uint32_t oa_base, oa_size; struct amdgpu_user_fence uf; }; #define to_amdgpu_job(sched_job) \ @@ -2221,7 +2222,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) -#define amdgpu_ring_emit_ib(r, ib, c) (r)->funcs->emit_ib((r), (ib), (c)) +#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c)) #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 2895d63c9979..9ab2f0886a14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -473,6 +473,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, goto error_validate; if (p->bo_list) { + struct amdgpu_bo *gds = p->bo_list->gds_obj; + struct amdgpu_bo *gws = p->bo_list->gws_obj; + struct amdgpu_bo *oa = p->bo_list->oa_obj; struct amdgpu_vm *vm = &fpriv->vm; unsigned i; @@ -481,6 +484,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); } + + if (gds) { + p->job->gds_base = amdgpu_bo_gpu_offset(gds); + p->job->gds_size = amdgpu_bo_size(gds); + } + if (gws) { + p->job->gws_base = amdgpu_bo_gpu_offset(gws); + p->job->gws_size = amdgpu_bo_size(gws); + } + if (oa) { + p->job->oa_base = amdgpu_bo_gpu_offset(oa); + p->job->oa_size = amdgpu_bo_size(oa); + } } error_validate: @@ -744,26 +760,6 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, j++; } - /* add GDS resources to first IB */ - if (parser->bo_list) { - struct amdgpu_bo *gds = parser->bo_list->gds_obj; - struct amdgpu_bo *gws = parser->bo_list->gws_obj; - struct amdgpu_bo *oa = parser->bo_list->oa_obj; - struct amdgpu_ib *ib = &parser->job->ibs[0]; - - if (gds) { - ib->gds_base = amdgpu_bo_gpu_offset(gds); - ib->gds_size = amdgpu_bo_size(gds); - } - if (gws) { - ib->gws_base = amdgpu_bo_gpu_offset(gws); - ib->gws_size = amdgpu_bo_size(gws); - } - if (oa) { - ib->oa_base = amdgpu_bo_gpu_offset(oa); - ib->oa_size = amdgpu_bo_size(oa); - } - } /* wrap the last IB with user fence */ if (parser->job->uf.bo) { struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index dacbd2e32072..201aceb01d8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -74,8 +74,6 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); } - ib->vm_id = 0; - return 0; } @@ -147,7 +145,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, return -EINVAL; } - if (vm && !ibs->vm_id) { + if (vm && !job->vm_id) { dev_err(adev->dev, "VM IB without ID\n"); return -EINVAL; } @@ -162,10 +160,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, patch_offset = amdgpu_ring_init_cond_exec(ring); if (vm) { - r = amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr, - ib->gds_base, ib->gds_size, - ib->gws_base, ib->gws_size, - ib->oa_base, ib->oa_size); + r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr, + job->gds_base, job->gds_size, + job->gws_base, job->gws_size, + job->oa_base, job->oa_size); if (r) { amdgpu_ring_undo(ring); return r; @@ -187,7 +185,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble) continue; - amdgpu_ring_emit_ib(ring, ib, need_ctx_switch); + amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, + need_ctx_switch); need_ctx_switch = false; } @@ -197,8 +196,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, r = amdgpu_fence_emit(ring, &hwf); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); - if (ib->vm_id) - amdgpu_vm_reset_id(adev, ib->vm_id); + if (job && job->vm_id) + amdgpu_vm_reset_id(adev, job->vm_id); amdgpu_ring_undo(ring); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index a0961f2a93d2..8ea68d0cfad6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -142,23 +142,15 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) struct fence *fence = amdgpu_sync_get_fence(&job->sync); - if (fence == NULL && vm && !job->ibs->vm_id) { + if (fence == NULL && vm && !job->vm_id) { struct amdgpu_ring *ring = job->ring; - unsigned i, vm_id; - uint64_t vm_pd_addr; int r; r = amdgpu_vm_grab_id(vm, ring, &job->sync, &job->base.s_fence->base, - &vm_id, &vm_pd_addr); + &job->vm_id, &job->vm_pd_addr); if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); - else { - for (i = 0; i < job->num_ibs; ++i) { - job->ibs[i].vm_id = vm_id; - job->ibs[i].vm_pd_addr = vm_pd_addr; - } - } fence = amdgpu_sync_get_fence(&job->sync); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index ad91664a7649..875626a2eccb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -762,7 +762,8 @@ out: * @ib: the IB to execute * */ -void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, bool ctx_switch) +void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { amdgpu_ring_write(ring, VCE_CMD_IB); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 40d0650e3a37..f40cf761c66f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -34,7 +34,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, bool direct, struct fence **fence); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); -void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, bool ctx_switch); +void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch); void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags); int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 6c2aa2b863b2..518dca43b133 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -210,9 +210,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * Schedule an IB in the DMA ring (CIK). */ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, bool ctx_switch) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { - u32 extra_bits = ib->vm_id & 0xf; + u32 extra_bits = vm_id & 0xf; u32 next_rptr = ring->wptr + 5; while ((next_rptr & 7) != 4) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 189ef2b23668..7f18a53ab53a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2030,7 +2030,8 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, * on the gfx ring for execution by the GPU. */ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, bool ctx_switch) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { u32 header, control = 0; u32 next_rptr = ring->wptr + 5; @@ -2056,7 +2057,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | (ib->vm_id << 24); + control |= ib->length_dw | (vm_id << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, @@ -2069,7 +2070,8 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, } static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, bool ctx_switch) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { u32 header, control = 0; u32 next_rptr = ring->wptr + 5; @@ -2084,7 +2086,7 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | (ib->vm_id << 24); + control |= ib->length_dw | (vm_id << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 0d556c907ab6..92647fbf5b8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5646,7 +5646,8 @@ static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) } static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, bool ctx_switch) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { u32 header, control = 0; u32 next_rptr = ring->wptr + 5; @@ -5672,7 +5673,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, else header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | (ib->vm_id << 24); + control |= ib->length_dw | (vm_id << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, @@ -5685,7 +5686,8 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, } static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, bool ctx_switch) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { u32 header, control = 0; u32 next_rptr = ring->wptr + 5; @@ -5701,7 +5703,7 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - control |= ib->length_dw | (ib->vm_id << 24); + control |= ib->length_dw | (vm_id << 24); amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index de94adb2b19e..f4c3130d3fdb 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -242,9 +242,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * Schedule an IB in the DMA ring (VI). */ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, bool ctx_switch) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { - u32 vmid = ib->vm_id & 0xf; + u32 vmid = vm_id & 0xf; u32 next_rptr = ring->wptr + 5; while ((next_rptr & 7) != 2) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index ca2aee3e88a3..063f08a9957a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -400,9 +400,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) * Schedule an IB in the DMA ring (VI). */ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, bool ctx_switch) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { - u32 vmid = ib->vm_id & 0xf; + u32 vmid = vm_id & 0xf; u32 next_rptr = ring->wptr + 5; while ((next_rptr & 7) != 2) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index a75ffb5b11b2..f07551476a70 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -489,7 +489,8 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) * Write ring commands to execute the indirect buffer */ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, bool ctx_switch) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); amdgpu_ring_write(ring, ib->gpu_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index ecb81014d836..e0a76a883d46 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -539,7 +539,8 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) * Write ring commands to execute the indirect buffer */ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, bool ctx_switch) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index a43f1a7c58bc..c9929d665c01 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -631,7 +631,8 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) * Write ring commands to execute the indirect buffer */ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_ib *ib, bool ctx_switch) + struct amdgpu_ib *ib, + unsigned vm_id, bool ctx_switch) { amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); -- cgit v1.2.3 From 758ac17f963f3497aae4e767d3a9eb68fea71f71 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 6 May 2016 22:14:00 +0200 Subject: drm/amdgpu: fix and cleanup user fence handling v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We leaked the BO in the error pass, additional to that we only have one user fence for all IBs in a job. v2: remove white space changes Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 19 +++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 55 +++++++++++++++------------------ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 9 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- 4 files changed, 38 insertions(+), 47 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d4c1eb7816f0..2a009c398dcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -368,13 +368,6 @@ struct amdgpu_fence_driver { #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) #define AMDGPU_FENCE_FLAG_INT (1 << 1) -struct amdgpu_user_fence { - /* write-back bo */ - struct amdgpu_bo *bo; - /* write-back address offset to bo start */ - uint32_t offset; -}; - int amdgpu_fence_driver_init(struct amdgpu_device *adev); void amdgpu_fence_driver_fini(struct amdgpu_device *adev); void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); @@ -741,10 +734,7 @@ struct amdgpu_ib { uint32_t length_dw; uint64_t gpu_addr; uint32_t *ptr; - struct amdgpu_user_fence *user; uint32_t flags; - /* resulting sequence number */ - uint64_t sequence; }; enum amdgpu_ring_type { @@ -1219,7 +1209,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring); struct amdgpu_cs_chunk { uint32_t chunk_id; uint32_t length_dw; - uint32_t *kdata; + void *kdata; }; struct amdgpu_cs_parser { @@ -1263,7 +1253,12 @@ struct amdgpu_job { uint32_t gds_base, gds_size; uint32_t gws_base, gws_size; uint32_t oa_base, oa_size; - struct amdgpu_user_fence uf; + + /* user fence handling */ + struct amdgpu_bo *uf_bo; + uint32_t uf_offset; + uint64_t uf_sequence; + }; #define to_amdgpu_job(sched_job) \ container_of((sched_job), struct amdgpu_job, base) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9ab2f0886a14..2bbeeb07c187 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -87,33 +87,30 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, } static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, - struct amdgpu_user_fence *uf, - struct drm_amdgpu_cs_chunk_fence *fence_data) + struct drm_amdgpu_cs_chunk_fence *data, + uint32_t *offset) { struct drm_gem_object *gobj; - uint32_t handle; - handle = fence_data->handle; gobj = drm_gem_object_lookup(p->adev->ddev, p->filp, - fence_data->handle); + data->handle); if (gobj == NULL) return -EINVAL; - uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); - uf->offset = fence_data->offset; - - if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) { - drm_gem_object_unreference_unlocked(gobj); - return -EINVAL; - } - - p->uf_entry.robj = amdgpu_bo_ref(uf->bo); + p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); p->uf_entry.priority = 0; p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; p->uf_entry.tv.shared = true; p->uf_entry.user_pages = NULL; + *offset = data->offset; drm_gem_object_unreference_unlocked(gobj); + + if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { + amdgpu_bo_unref(&p->uf_entry.robj); + return -EINVAL; + } + return 0; } @@ -124,8 +121,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) union drm_amdgpu_cs *cs = data; uint64_t *chunk_array_user; uint64_t *chunk_array; - struct amdgpu_user_fence uf = {}; unsigned size, num_ibs = 0; + uint32_t uf_offset = 0; int i; int ret; @@ -200,7 +197,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) goto free_partial_kdata; } - ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata); + ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata, + &uf_offset); if (ret) goto free_partial_kdata; @@ -219,7 +217,10 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) if (ret) goto free_all_kdata; - p->job->uf = uf; + if (p->uf_entry.robj) { + p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj); + p->job->uf_offset = uf_offset; + } kfree(chunk_array); return 0; @@ -377,7 +378,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, INIT_LIST_HEAD(&duplicates); amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); - if (p->job->uf.bo) + if (p->uf_entry.robj) list_add(&p->uf_entry.tv.head, &p->validated); if (need_mmap_lock) @@ -760,17 +761,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, j++; } - /* wrap the last IB with user fence */ - if (parser->job->uf.bo) { - struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1]; - - /* UVD & VCE fw doesn't support user fences */ - if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD || - parser->job->ring->type == AMDGPU_RING_TYPE_VCE) - return -EINVAL; - - ib->user = &parser->job->uf; - } + /* UVD & VCE fw doesn't support user fences */ + if (parser->job->uf_bo && ( + parser->job->ring->type == AMDGPU_RING_TYPE_UVD || + parser->job->ring->type == AMDGPU_RING_TYPE_VCE)) + return -EINVAL; return 0; } @@ -856,7 +851,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job->ctx = entity->fence_context; p->fence = fence_get(fence); cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence); - job->ibs[job->num_ibs - 1].sequence = cs->out.handle; + job->uf_sequence = cs->out.handle; trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 201aceb01d8a..34e35423b78e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -203,10 +203,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, } /* wrap the last IB with fence */ - if (ib->user) { - uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo); - addr += ib->user->offset; - amdgpu_ring_emit_fence(ring, addr, ib->sequence, + if (job && job->uf_bo) { + uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo); + + addr += job->uf_offset; + amdgpu_ring_emit_fence(ring, addr, job->uf_sequence, AMDGPU_FENCE_FLAG_64BIT); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 8ea68d0cfad6..f0dafa514fe4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -97,7 +97,7 @@ void amdgpu_job_free(struct amdgpu_job *job) amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f); fence_put(job->fence); - amdgpu_bo_unref(&job->uf.bo); + amdgpu_bo_unref(&job->uf_bo); amdgpu_sync_free(&job->sync); if (!job->base.use_sched) -- cgit v1.2.3