summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/msm_ringbuffer.c
diff options
context:
space:
mode:
authorRob Clark <robdclark@chromium.org>2021-07-27 18:06:14 -0700
committerRob Clark <robdclark@chromium.org>2021-07-28 09:19:00 -0700
commit1d8a5ca436ee4a28eec14cb813f790f7cdeeee19 (patch)
tree4e7ef645394d0de1d8fe506ed776e24fc7198611 /drivers/gpu/drm/msm/msm_ringbuffer.c
parent79341eb74c1fd193fe17b015c856e713e82650a2 (diff)
downloadlinux-1d8a5ca436ee4a28eec14cb813f790f7cdeeee19.tar.bz2
drm/msm: Conversion to drm scheduler
For existing adrenos, there is one or more ringbuffer, depending on whether preemption is supported. When preemption is supported, each ringbuffer has it's own priority. A submitqueue (which maps to a gl context or vk queue in userspace) is mapped to a specific ring- buffer at creation time, based on the submitqueue's priority. Each ringbuffer has it's own drm_gpu_scheduler. Each submitqueue maps to a drm_sched_entity. And each submit maps to a drm_sched_job. Closes: https://gitlab.freedesktop.org/drm/msm/-/issues/4 Signed-off-by: Rob Clark <robdclark@chromium.org> Acked-by: Christian König <christian.koenig@amd.com> Link: https://lore.kernel.org/r/20210728010632.2633470-10-robdclark@gmail.com Signed-off-by: Rob Clark <robdclark@chromium.org>
Diffstat (limited to 'drivers/gpu/drm/msm/msm_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c63
1 files changed, 63 insertions, 0 deletions
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 437cca57d005..793dd853b799 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -7,10 +7,61 @@
#include "msm_ringbuffer.h"
#include "msm_gpu.h"
+static uint num_hw_submissions = 8;
+MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)");
+module_param(num_hw_submissions, uint, 0600);
+
+static struct dma_fence *msm_job_dependency(struct drm_sched_job *job,
+ struct drm_sched_entity *s_entity)
+{
+ struct msm_gem_submit *submit = to_msm_submit(job);
+
+ if (!xa_empty(&submit->deps))
+ return xa_erase(&submit->deps, submit->last_dep++);
+
+ return NULL;
+}
+
+static struct dma_fence *msm_job_run(struct drm_sched_job *job)
+{
+ struct msm_gem_submit *submit = to_msm_submit(job);
+ struct msm_gpu *gpu = submit->gpu;
+
+ submit->hw_fence = msm_fence_alloc(submit->ring->fctx);
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ /* TODO move submit path over to using a per-ring lock.. */
+ mutex_lock(&gpu->dev->struct_mutex);
+
+ msm_gpu_submit(gpu, submit);
+
+ mutex_unlock(&gpu->dev->struct_mutex);
+
+ pm_runtime_put(&gpu->pdev->dev);
+
+ return dma_fence_get(submit->hw_fence);
+}
+
+static void msm_job_free(struct drm_sched_job *job)
+{
+ struct msm_gem_submit *submit = to_msm_submit(job);
+
+ drm_sched_job_cleanup(job);
+ msm_gem_submit_put(submit);
+}
+
+const struct drm_sched_backend_ops msm_sched_ops = {
+ .dependency = msm_job_dependency,
+ .run_job = msm_job_run,
+ .free_job = msm_job_free
+};
+
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
void *memptrs, uint64_t memptrs_iova)
{
struct msm_ringbuffer *ring;
+ long sched_timeout;
char name[32];
int ret;
@@ -45,6 +96,16 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->memptrs = memptrs;
ring->memptrs_iova = memptrs_iova;
+ /* currently managing hangcheck ourselves: */
+ sched_timeout = MAX_SCHEDULE_TIMEOUT;
+
+ ret = drm_sched_init(&ring->sched, &msm_sched_ops,
+ num_hw_submissions, 0, sched_timeout,
+ NULL, to_msm_bo(ring->bo)->name);
+ if (ret) {
+ goto fail;
+ }
+
INIT_LIST_HEAD(&ring->submits);
spin_lock_init(&ring->submit_lock);
spin_lock_init(&ring->preempt_lock);
@@ -65,6 +126,8 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
if (IS_ERR_OR_NULL(ring))
return;
+ drm_sched_fini(&ring->sched);
+
msm_fence_context_free(ring->fctx);
msm_gem_kernel_put(ring->bo, ring->gpu->aspace);