summaryrefslogtreecommitdiffstats
path: root/include/drm/gpu_scheduler.h
diff options
context:
space:
mode:
authorLuben Tuikov <luben.tuikov@amd.com>2020-12-03 22:17:19 -0500
committerChristian König <christian.koenig@amd.com>2020-12-08 14:38:03 +0100
commit6efa4b465cfdaa9be251ba043253e302ddb9976f (patch)
treea3872dddf00e545ff63bf09af9b347f3c12c3472 /include/drm/gpu_scheduler.h
parent8935ff00e3b110e47e3a291ff11951eb1066b1ae (diff)
downloadlinux-6efa4b465cfdaa9be251ba043253e302ddb9976f.tar.bz2
gpu/drm: ring_mirror_list --> pending_list
Rename "ring_mirror_list" to "pending_list", to describe what something is, not what it does, how it's used, or how the hardware implements it. This also abstracts the actual hardware implementation, i.e. how the low-level driver communicates with the device it drives, ring, CAM, etc., shouldn't be exposed to DRM. The pending_list keeps jobs submitted, which are out of our control. Usually this means they are pending execution status in hardware, but the latter definition is a more general (inclusive) definition. Signed-off-by: Luben Tuikov <luben.tuikov@amd.com> Acked-by: Christian König <christian.koenig@amd.com> Link: https://patchwork.freedesktop.org/patch/405573/ Cc: Alexander Deucher <Alexander.Deucher@amd.com> Cc: Andrey Grodzovsky <Andrey.Grodzovsky@amd.com> Cc: Christian König <christian.koenig@amd.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'include/drm/gpu_scheduler.h')
-rw-r--r--include/drm/gpu_scheduler.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 3add0072bd37..2e0c368e19f6 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -174,7 +174,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* @sched: the scheduler instance on which this job is scheduled.
* @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence.
- * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
+ * @node: used to append this struct to the @drm_gpu_scheduler.pending_list.
* @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
@@ -203,7 +203,7 @@ struct drm_sched_job {
static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
int threshold)
{
- return (s_job && atomic_inc_return(&s_job->karma) > threshold);
+ return s_job && atomic_inc_return(&s_job->karma) > threshold;
}
/**
@@ -260,8 +260,8 @@ struct drm_sched_backend_ops {
* @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
* timeout interval is over.
* @thread: the kthread on which the scheduler which run.
- * @ring_mirror_list: the list of jobs which are currently in the job queue.
- * @job_list_lock: lock to protect the ring_mirror_list.
+ * @pending_list: the list of jobs which are currently in the job queue.
+ * @job_list_lock: lock to protect the pending_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
* @score: score to help loadbalancer pick a idle sched
@@ -282,7 +282,7 @@ struct drm_gpu_scheduler {
atomic64_t job_id_count;
struct delayed_work work_tdr;
struct task_struct *thread;
- struct list_head ring_mirror_list;
+ struct list_head pending_list;
spinlock_t job_list_lock;
int hang_limit;
atomic_t score;