summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
diff options
context:
space:
mode:
authorMonk Liu <Monk.Liu@amd.com>2017-10-16 19:46:43 +0800
committerAlex Deucher <alexander.deucher@amd.com>2017-12-04 16:33:10 -0500
commita8a51a70416baab813606c6014c5f0746958dfb2 (patch)
tree9198ac8b6d047072804fdd52f367bb4923f05f7e /drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
parent7716ea564f6538236c34749cdf3a6351c2844c1a (diff)
downloadlinux-a8a51a70416baab813606c6014c5f0746958dfb2.tar.bz2
drm/amdgpu:cleanup job reset routine(v2)
merge the setting guilty on context into this function to avoid implement extra routine. v2: go through entity list and compare the fence_ctx before operate on the entity, otherwise the entity may be just a wild pointer Signed-off-by: Monk Liu <Monk.Liu@amd.com> Reviewed-by: Chunming Zhou <David1.Zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/scheduler/gpu_scheduler.c')
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c31
1 files changed, 30 insertions, 1 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 764606ce3541..1474866d9048 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -443,9 +443,18 @@ static void amd_sched_job_timedout(struct work_struct *work)
job->sched->ops->timedout_job(job);
}
-void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
+static void amd_sched_set_guilty(struct amd_sched_job *s_job)
+{
+ if (atomic_inc_return(&s_job->karma) > s_job->sched->hang_limit)
+ if (s_job->s_entity->guilty)
+ atomic_set(s_job->s_entity->guilty, 1);
+}
+
+void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad)
{
struct amd_sched_job *s_job;
+ struct amd_sched_entity *entity, *tmp;
+ int i;;
spin_lock(&sched->job_list_lock);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
@@ -458,6 +467,26 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
}
}
spin_unlock(&sched->job_list_lock);
+
+ if (bad) {
+ bool found = false;
+
+ for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++ ) {
+ struct amd_sched_rq *rq = &sched->sched_rq[i];
+
+ spin_lock(&rq->lock);
+ list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
+ if (bad->s_fence->scheduled.context == entity->fence_context) {
+ found = true;
+ amd_sched_set_guilty(bad);
+ break;
+ }
+ }
+ spin_unlock(&rq->lock);
+ if (found)
+ break;
+ }
+ }
}
void amd_sched_job_kickout(struct amd_sched_job *s_job)