From d0e062ebb3a44b56a7e672da568334c76f763552 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Fri, 3 Aug 2018 16:27:21 -0700 Subject: drm/i915/cfl: Add a new CFL PCI ID. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit One more CFL ID added to spec. Cc: José Roberto de Souza Signed-off-by: Rodrigo Vivi Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20180803232721.20038-1-rodrigo.vivi@intel.com --- include/drm/i915_pciids.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/drm') diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index fbf5cfc9b352..fd965ffbb92e 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -386,6 +386,7 @@ INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \ INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */ /* CFL H */ -- cgit v1.2.3 From ac0a6cf1c6ef91e4af2a9d56eeaee8fca61d6ad7 Mon Sep 17 00:00:00 2001 From: Nayan Deshmukh Date: Wed, 1 Aug 2018 13:49:59 +0530 Subject: drm/scheduler: add a list of run queues to the entity MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are the potential run queues on which the jobs from this entity can be scheduled. We will use this to do load balancing. Signed-off-by: Nayan Deshmukh Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 8 ++++++++ include/drm/gpu_scheduler.h | 7 ++++++- 2 files changed, 14 insertions(+), 1 deletion(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 4fc211e19d6e..afffb9a60cdb 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -179,6 +179,8 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, unsigned int num_rq_list, atomic_t *guilty) { + int i; + if (!(entity && rq_list && num_rq_list > 0 && rq_list[0])) return -EINVAL; @@ -186,6 +188,11 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, INIT_LIST_HEAD(&entity->list); entity->rq = rq_list[0]; entity->guilty = guilty; + entity->num_rq_list = num_rq_list; + entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *), + GFP_KERNEL); + for (i = 0; i < num_rq_list; ++i) + entity->rq_list[i] = rq_list[i]; entity->last_scheduled = NULL; spin_lock_init(&entity->rq_lock); @@ -348,6 +355,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity) dma_fence_put(entity->last_scheduled); entity->last_scheduled = NULL; + kfree(entity->rq_list); } EXPORT_SYMBOL(drm_sched_entity_fini); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 21c648b0b2a1..2419887e25eb 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -50,7 +50,10 @@ enum drm_sched_priority { * * @list: used to append this struct to the list of entities in the * runqueue. - * @rq: runqueue to which this entity belongs. + * @rq: runqueue on which this entity is currently scheduled. + * @rq_list: a list of run queues on which jobs from this entity can + * be scheduled + * @num_rq_list: number of run queues in the rq_list * @rq_lock: lock to modify the runqueue to which this entity belongs. * @job_queue: the list of jobs of this entity. * @fence_seq: a linearly increasing seqno incremented with each @@ -75,6 +78,8 @@ enum drm_sched_priority { struct drm_sched_entity { struct list_head list; struct drm_sched_rq *rq; + struct drm_sched_rq **rq_list; + unsigned int num_rq_list; spinlock_t rq_lock; struct spsc_queue job_queue; -- cgit v1.2.3 From 249a07c05a8da9637c2eb3205f1fc739c216f707 Mon Sep 17 00:00:00 2001 From: Nayan Deshmukh Date: Wed, 1 Aug 2018 13:50:00 +0530 Subject: drm/scheduler: add counter for total jobs in scheduler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To keep track of the scheduler load. Signed-off-by: Nayan Deshmukh Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++ include/drm/gpu_scheduler.h | 2 ++ 2 files changed, 5 insertions(+) (limited to 'include/drm') diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index afffb9a60cdb..3b9969190927 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -530,6 +530,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, trace_drm_sched_job(sched_job, entity); + atomic_inc(&entity->rq->sched->num_jobs); WRITE_ONCE(entity->last_user, current->group_leader); first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); @@ -821,6 +822,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) dma_fence_get(&s_fence->finished); atomic_dec(&sched->hw_rq_count); + atomic_dec(&sched->num_jobs); drm_sched_fence_finished(s_fence); trace_drm_sched_process_job(s_fence); @@ -938,6 +940,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, INIT_LIST_HEAD(&sched->ring_mirror_list); spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0); + atomic_set(&sched->num_jobs, 0); atomic64_set(&sched->job_id_count, 0); /* Each scheduler will run on a seperate kernel thread */ diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 2419887e25eb..0c4cfe689d4c 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -262,6 +262,7 @@ struct drm_sched_backend_ops { * @job_list_lock: lock to protect the ring_mirror_list. * @hang_limit: once the hangs by a job crosses this limit then it is marked * guilty and it will be considered for scheduling further. + * @num_jobs: the number of jobs in queue in the scheduler * * One scheduler is implemented for each hardware ring. */ @@ -279,6 +280,7 @@ struct drm_gpu_scheduler { struct list_head ring_mirror_list; spinlock_t job_list_lock; int hang_limit; + atomic_t num_jobs; }; int drm_sched_init(struct drm_gpu_scheduler *sched, -- cgit v1.2.3 From 7febe4bfd5d477eba17f70d4879cb81e9787118e Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 1 Aug 2018 16:22:39 +0200 Subject: drm/scheduler: fix setting the priorty for entities (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since we now deal with multiple rq we need to update all of them, not just the current one. v2: Trivial: Removed unused variable (Alex) Signed-off-by: Christian König Acked-by: Nayan Deshmukh Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 4 +--- drivers/gpu/drm/scheduler/gpu_scheduler.c | 36 ++++++++++++++++++++----------- include/drm/gpu_scheduler.h | 5 ++--- 3 files changed, 26 insertions(+), 19 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index df6965761046..02d563cfb4a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -394,7 +394,6 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, { int i; struct amdgpu_device *adev = ctx->adev; - struct drm_sched_rq *rq; struct drm_sched_entity *entity; struct amdgpu_ring *ring; enum drm_sched_priority ctx_prio; @@ -407,12 +406,11 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, for (i = 0; i < adev->num_rings; i++) { ring = adev->rings[i]; entity = &ctx->rings[i].entity; - rq = &ring->sched.sched_rq[ctx_prio]; if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) continue; - drm_sched_entity_set_rq(entity, rq); + drm_sched_entity_set_priority(entity, ctx_prio); } } diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 08fa5b65acaf..695a9643f046 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -416,29 +416,39 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb } /** - * drm_sched_entity_set_rq - Sets the run queue for an entity + * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority + */ +static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq, + enum drm_sched_priority priority) +{ + *rq = &(*rq)->sched->sched_rq[priority]; +} + +/** + * drm_sched_entity_set_priority - Sets priority of the entity * * @entity: scheduler entity - * @rq: scheduler run queue + * @priority: scheduler priority * - * Sets the run queue for an entity and removes the entity from the previous - * run queue in which was present. + * Update the priority of runqueus used for the entity. */ -void drm_sched_entity_set_rq(struct drm_sched_entity *entity, - struct drm_sched_rq *rq) +void drm_sched_entity_set_priority(struct drm_sched_entity *entity, + enum drm_sched_priority priority) { - if (entity->rq == rq) - return; - - BUG_ON(!rq); + unsigned int i; spin_lock(&entity->rq_lock); + + for (i = 0; i < entity->num_rq_list; ++i) + drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority); + drm_sched_rq_remove_entity(entity->rq, entity); - entity->rq = rq; - drm_sched_rq_add_entity(rq, entity); + drm_sched_entity_set_rq_priority(&entity->rq, priority); + drm_sched_rq_add_entity(entity->rq, entity); + spin_unlock(&entity->rq_lock); } -EXPORT_SYMBOL(drm_sched_entity_set_rq); +EXPORT_SYMBOL(drm_sched_entity_set_priority); /** * drm_sched_dependency_optimized diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 0c4cfe689d4c..22c0f88f7d8f 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -298,9 +298,8 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity); void drm_sched_entity_destroy(struct drm_sched_entity *entity); void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity); -void drm_sched_entity_set_rq(struct drm_sched_entity *entity, - struct drm_sched_rq *rq); - +void drm_sched_entity_set_priority(struct drm_sched_entity *entity, + enum drm_sched_priority priority); struct drm_sched_fence *drm_sched_fence_create( struct drm_sched_entity *s_entity, void *owner); void drm_sched_fence_scheduled(struct drm_sched_fence *fence); -- cgit v1.2.3 From 620e762f9a984fc3f77cd6f757581a21605ce125 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 6 Aug 2018 14:25:32 +0200 Subject: drm/scheduler: move entity handling into separate file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is complex enough on it's own. Move it into a separate C file. Signed-off-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/Makefile | 2 +- drivers/gpu/drm/scheduler/gpu_scheduler.c | 441 +--------------------------- drivers/gpu/drm/scheduler/sched_entity.c | 459 ++++++++++++++++++++++++++++++ include/drm/gpu_scheduler.h | 28 +- 4 files changed, 484 insertions(+), 446 deletions(-) create mode 100644 drivers/gpu/drm/scheduler/sched_entity.c (limited to 'include/drm') diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile index 7665883f81d4..f23785d4b3c8 100644 --- a/drivers/gpu/drm/scheduler/Makefile +++ b/drivers/gpu/drm/scheduler/Makefile @@ -20,6 +20,6 @@ # OTHER DEALINGS IN THE SOFTWARE. # # -gpu-sched-y := gpu_scheduler.o sched_fence.o +gpu-sched-y := gpu_scheduler.o sched_fence.o sched_entity.o obj-$(CONFIG_DRM_SCHED) += gpu-sched.o diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 85c1f95752cc..9ca741f3a0bc 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -58,8 +58,6 @@ #define to_drm_sched_job(sched_job) \ container_of((sched_job), struct drm_sched_job, queue_node) -static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); -static void drm_sched_wakeup(struct drm_gpu_scheduler *sched); static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); /** @@ -86,8 +84,8 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, * * Adds a scheduler entity to the run queue. */ -static void drm_sched_rq_add_entity(struct drm_sched_rq *rq, - struct drm_sched_entity *entity) +void drm_sched_rq_add_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity) { if (!list_empty(&entity->list)) return; @@ -104,8 +102,8 @@ static void drm_sched_rq_add_entity(struct drm_sched_rq *rq, * * Removes a scheduler entity from the run queue. */ -static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, - struct drm_sched_entity *entity) +void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity) { if (list_empty(&entity->list)) return; @@ -158,301 +156,6 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) return NULL; } -/** - * drm_sched_entity_init - Init a context entity used by scheduler when - * submit to HW ring. - * - * @entity: scheduler entity to init - * @rq_list: the list of run queue on which jobs from this - * entity can be submitted - * @num_rq_list: number of run queue in rq_list - * @guilty: atomic_t set to 1 when a job on this queue - * is found to be guilty causing a timeout - * - * Note: the rq_list should have atleast one element to schedule - * the entity - * - * Returns 0 on success or a negative error code on failure. -*/ -int drm_sched_entity_init(struct drm_sched_entity *entity, - struct drm_sched_rq **rq_list, - unsigned int num_rq_list, - atomic_t *guilty) -{ - int i; - - if (!(entity && rq_list && num_rq_list > 0 && rq_list[0])) - return -EINVAL; - - memset(entity, 0, sizeof(struct drm_sched_entity)); - INIT_LIST_HEAD(&entity->list); - entity->rq = rq_list[0]; - entity->guilty = guilty; - entity->num_rq_list = num_rq_list; - entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *), - GFP_KERNEL); - if (!entity->rq_list) - return -ENOMEM; - - for (i = 0; i < num_rq_list; ++i) - entity->rq_list[i] = rq_list[i]; - entity->last_scheduled = NULL; - - spin_lock_init(&entity->rq_lock); - spsc_queue_init(&entity->job_queue); - - atomic_set(&entity->fence_seq, 0); - entity->fence_context = dma_fence_context_alloc(2); - - return 0; -} -EXPORT_SYMBOL(drm_sched_entity_init); - -/** - * drm_sched_entity_is_idle - Check if entity is idle - * - * @entity: scheduler entity - * - * Returns true if the entity does not have any unscheduled jobs. - */ -static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) -{ - rmb(); - - if (list_empty(&entity->list) || - spsc_queue_peek(&entity->job_queue) == NULL) - return true; - - return false; -} - -/** - * drm_sched_entity_is_ready - Check if entity is ready - * - * @entity: scheduler entity - * - * Return true if entity could provide a job. - */ -static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) -{ - if (spsc_queue_peek(&entity->job_queue) == NULL) - return false; - - if (READ_ONCE(entity->dependency)) - return false; - - return true; -} - -/** - * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load - * - * @entity: scheduler entity - * - * Return the pointer to the rq with least load. - */ -static struct drm_sched_rq * -drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) -{ - struct drm_sched_rq *rq = NULL; - unsigned int min_jobs = UINT_MAX, num_jobs; - int i; - - for (i = 0; i < entity->num_rq_list; ++i) { - num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs); - if (num_jobs < min_jobs) { - min_jobs = num_jobs; - rq = entity->rq_list[i]; - } - } - - return rq; -} - -static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, - struct dma_fence_cb *cb) -{ - struct drm_sched_job *job = container_of(cb, struct drm_sched_job, - finish_cb); - drm_sched_fence_finished(job->s_fence); - WARN_ON(job->s_fence->parent); - dma_fence_put(&job->s_fence->finished); - job->sched->ops->free_job(job); -} - - -/** - * drm_sched_entity_flush - Flush a context entity - * - * @entity: scheduler entity - * @timeout: time to wait in for Q to become empty in jiffies. - * - * Splitting drm_sched_entity_fini() into two functions, The first one does the waiting, - * removes the entity from the runqueue and returns an error when the process was killed. - * - * Returns the remaining time in jiffies left from the input timeout - */ -long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) -{ - struct drm_gpu_scheduler *sched; - struct task_struct *last_user; - long ret = timeout; - - sched = entity->rq->sched; - /** - * The client will not queue more IBs during this fini, consume existing - * queued IBs or discard them on SIGKILL - */ - if (current->flags & PF_EXITING) { - if (timeout) - ret = wait_event_timeout( - sched->job_scheduled, - drm_sched_entity_is_idle(entity), - timeout); - } else - wait_event_killable(sched->job_scheduled, drm_sched_entity_is_idle(entity)); - - - /* For killed process disable any more IBs enqueue right now */ - last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); - if ((!last_user || last_user == current->group_leader) && - (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) - drm_sched_rq_remove_entity(entity->rq, entity); - - return ret; -} -EXPORT_SYMBOL(drm_sched_entity_flush); - -/** - * drm_sched_entity_cleanup - Destroy a context entity - * - * @entity: scheduler entity - * - * This should be called after @drm_sched_entity_do_release. It goes over the - * entity and signals all jobs with an error code if the process was killed. - * - */ -void drm_sched_entity_fini(struct drm_sched_entity *entity) -{ - struct drm_gpu_scheduler *sched; - - sched = entity->rq->sched; - drm_sched_rq_remove_entity(entity->rq, entity); - - /* Consumption of existing IBs wasn't completed. Forcefully - * remove them here. - */ - if (spsc_queue_peek(&entity->job_queue)) { - struct drm_sched_job *job; - int r; - - /* Park the kernel for a moment to make sure it isn't processing - * our enity. - */ - kthread_park(sched->thread); - kthread_unpark(sched->thread); - if (entity->dependency) { - dma_fence_remove_callback(entity->dependency, - &entity->cb); - dma_fence_put(entity->dependency); - entity->dependency = NULL; - } - - while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { - struct drm_sched_fence *s_fence = job->s_fence; - drm_sched_fence_scheduled(s_fence); - dma_fence_set_error(&s_fence->finished, -ESRCH); - - /* - * When pipe is hanged by older entity, new entity might - * not even have chance to submit it's first job to HW - * and so entity->last_scheduled will remain NULL - */ - if (!entity->last_scheduled) { - drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); - } else { - r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb, - drm_sched_entity_kill_jobs_cb); - if (r == -ENOENT) - drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); - else if (r) - DRM_ERROR("fence add callback failed (%d)\n", r); - } - } - } - - dma_fence_put(entity->last_scheduled); - entity->last_scheduled = NULL; - kfree(entity->rq_list); -} -EXPORT_SYMBOL(drm_sched_entity_fini); - -/** - * drm_sched_entity_fini - Destroy a context entity - * - * @entity: scheduler entity - * - * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() - */ -void drm_sched_entity_destroy(struct drm_sched_entity *entity) -{ - drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); - drm_sched_entity_fini(entity); -} -EXPORT_SYMBOL(drm_sched_entity_destroy); - -static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) -{ - struct drm_sched_entity *entity = - container_of(cb, struct drm_sched_entity, cb); - entity->dependency = NULL; - dma_fence_put(f); - drm_sched_wakeup(entity->rq->sched); -} - -static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) -{ - struct drm_sched_entity *entity = - container_of(cb, struct drm_sched_entity, cb); - entity->dependency = NULL; - dma_fence_put(f); -} - -/** - * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority - */ -static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq, - enum drm_sched_priority priority) -{ - *rq = &(*rq)->sched->sched_rq[priority]; -} - -/** - * drm_sched_entity_set_priority - Sets priority of the entity - * - * @entity: scheduler entity - * @priority: scheduler priority - * - * Update the priority of runqueus used for the entity. - */ -void drm_sched_entity_set_priority(struct drm_sched_entity *entity, - enum drm_sched_priority priority) -{ - unsigned int i; - - spin_lock(&entity->rq_lock); - - for (i = 0; i < entity->num_rq_list; ++i) - drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority); - - drm_sched_rq_remove_entity(entity->rq, entity); - drm_sched_entity_set_rq_priority(&entity->rq, priority); - drm_sched_rq_add_entity(entity->rq, entity); - - spin_unlock(&entity->rq_lock); -} -EXPORT_SYMBOL(drm_sched_entity_set_priority); - /** * drm_sched_dependency_optimized * @@ -479,140 +182,6 @@ bool drm_sched_dependency_optimized(struct dma_fence* fence, } EXPORT_SYMBOL(drm_sched_dependency_optimized); -static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) -{ - struct drm_gpu_scheduler *sched = entity->rq->sched; - struct dma_fence * fence = entity->dependency; - struct drm_sched_fence *s_fence; - - if (fence->context == entity->fence_context || - fence->context == entity->fence_context + 1) { - /* - * Fence is a scheduled/finished fence from a job - * which belongs to the same entity, we can ignore - * fences from ourself - */ - dma_fence_put(entity->dependency); - return false; - } - - s_fence = to_drm_sched_fence(fence); - if (s_fence && s_fence->sched == sched) { - - /* - * Fence is from the same scheduler, only need to wait for - * it to be scheduled - */ - fence = dma_fence_get(&s_fence->scheduled); - dma_fence_put(entity->dependency); - entity->dependency = fence; - if (!dma_fence_add_callback(fence, &entity->cb, - drm_sched_entity_clear_dep)) - return true; - - /* Ignore it when it is already scheduled */ - dma_fence_put(fence); - return false; - } - - if (!dma_fence_add_callback(entity->dependency, &entity->cb, - drm_sched_entity_wakeup)) - return true; - - dma_fence_put(entity->dependency); - return false; -} - -static struct drm_sched_job * -drm_sched_entity_pop_job(struct drm_sched_entity *entity) -{ - struct drm_gpu_scheduler *sched = entity->rq->sched; - struct drm_sched_job *sched_job = to_drm_sched_job( - spsc_queue_peek(&entity->job_queue)); - - if (!sched_job) - return NULL; - - while ((entity->dependency = sched->ops->dependency(sched_job, entity))) { - if (drm_sched_entity_add_dependency_cb(entity)) { - - trace_drm_sched_job_wait_dep(sched_job, entity->dependency); - return NULL; - } - } - - /* skip jobs from entity that marked guilty */ - if (entity->guilty && atomic_read(entity->guilty)) - dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); - - dma_fence_put(entity->last_scheduled); - entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); - - spsc_queue_pop(&entity->job_queue); - return sched_job; -} - -/** - * drm_sched_entity_select_rq - select a new rq for the entity - * - * @entity: scheduler entity - * - * Check all prerequisites and select a new rq for the entity for load - * balancing. - */ -static void drm_sched_entity_select_rq(struct drm_sched_entity *entity) -{ - struct dma_fence *fence; - struct drm_sched_rq *rq; - - if (!spsc_queue_count(&entity->job_queue) == 0 || - entity->num_rq_list <= 1) - return; - - fence = READ_ONCE(entity->last_scheduled); - if (fence && !dma_fence_is_signaled(fence)) - return; - - rq = drm_sched_entity_get_free_sched(entity); - spin_lock(&entity->rq_lock); - drm_sched_rq_remove_entity(entity->rq, entity); - entity->rq = rq; - spin_unlock(&entity->rq_lock); -} - -/** - * drm_sched_entity_push_job - Submit a job to the entity's job queue - * - * @sched_job: job to submit - * @entity: scheduler entity - * - * Note: To guarantee that the order of insertion to queue matches - * the job's fence sequence number this function should be - * called with drm_sched_job_init under common lock. - * - * Returns 0 for success, negative error code otherwise. - */ -void drm_sched_entity_push_job(struct drm_sched_job *sched_job, - struct drm_sched_entity *entity) -{ - bool first; - - trace_drm_sched_job(sched_job, entity); - atomic_inc(&entity->rq->sched->num_jobs); - WRITE_ONCE(entity->last_user, current->group_leader); - first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); - - /* first job wakes up scheduler */ - if (first) { - /* Add the entity to the run queue */ - spin_lock(&entity->rq_lock); - drm_sched_rq_add_entity(entity->rq, entity); - spin_unlock(&entity->rq_lock); - drm_sched_wakeup(entity->rq->sched); - } -} -EXPORT_SYMBOL(drm_sched_entity_push_job); - /* job_finish is called after hw fence signaled */ static void drm_sched_job_finish(struct work_struct *work) @@ -840,7 +409,7 @@ static bool drm_sched_ready(struct drm_gpu_scheduler *sched) * @sched: scheduler instance * */ -static void drm_sched_wakeup(struct drm_gpu_scheduler *sched) +void drm_sched_wakeup(struct drm_gpu_scheduler *sched) { if (drm_sched_ready(sched)) wake_up_interruptible(&sched->wake_up_worker); diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c new file mode 100644 index 000000000000..1053f27af9df --- /dev/null +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -0,0 +1,459 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include + +#include "gpu_scheduler_trace.h" + +#define to_drm_sched_job(sched_job) \ + container_of((sched_job), struct drm_sched_job, queue_node) + +/** + * drm_sched_entity_init - Init a context entity used by scheduler when + * submit to HW ring. + * + * @entity: scheduler entity to init + * @rq_list: the list of run queue on which jobs from this + * entity can be submitted + * @num_rq_list: number of run queue in rq_list + * @guilty: atomic_t set to 1 when a job on this queue + * is found to be guilty causing a timeout + * + * Note: the rq_list should have atleast one element to schedule + * the entity + * + * Returns 0 on success or a negative error code on failure. +*/ +int drm_sched_entity_init(struct drm_sched_entity *entity, + struct drm_sched_rq **rq_list, + unsigned int num_rq_list, + atomic_t *guilty) +{ + int i; + + if (!(entity && rq_list && num_rq_list > 0 && rq_list[0])) + return -EINVAL; + + memset(entity, 0, sizeof(struct drm_sched_entity)); + INIT_LIST_HEAD(&entity->list); + entity->rq = rq_list[0]; + entity->guilty = guilty; + entity->num_rq_list = num_rq_list; + entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *), + GFP_KERNEL); + if (!entity->rq_list) + return -ENOMEM; + + for (i = 0; i < num_rq_list; ++i) + entity->rq_list[i] = rq_list[i]; + entity->last_scheduled = NULL; + + spin_lock_init(&entity->rq_lock); + spsc_queue_init(&entity->job_queue); + + atomic_set(&entity->fence_seq, 0); + entity->fence_context = dma_fence_context_alloc(2); + + return 0; +} +EXPORT_SYMBOL(drm_sched_entity_init); + +/** + * drm_sched_entity_is_idle - Check if entity is idle + * + * @entity: scheduler entity + * + * Returns true if the entity does not have any unscheduled jobs. + */ +static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) +{ + rmb(); + + if (list_empty(&entity->list) || + spsc_queue_peek(&entity->job_queue) == NULL) + return true; + + return false; +} + +/** + * drm_sched_entity_is_ready - Check if entity is ready + * + * @entity: scheduler entity + * + * Return true if entity could provide a job. + */ +bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) +{ + if (spsc_queue_peek(&entity->job_queue) == NULL) + return false; + + if (READ_ONCE(entity->dependency)) + return false; + + return true; +} + +/** + * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load + * + * @entity: scheduler entity + * + * Return the pointer to the rq with least load. + */ +static struct drm_sched_rq * +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) +{ + struct drm_sched_rq *rq = NULL; + unsigned int min_jobs = UINT_MAX, num_jobs; + int i; + + for (i = 0; i < entity->num_rq_list; ++i) { + num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs); + if (num_jobs < min_jobs) { + min_jobs = num_jobs; + rq = entity->rq_list[i]; + } + } + + return rq; +} + +static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, + struct dma_fence_cb *cb) +{ + struct drm_sched_job *job = container_of(cb, struct drm_sched_job, + finish_cb); + drm_sched_fence_finished(job->s_fence); + WARN_ON(job->s_fence->parent); + dma_fence_put(&job->s_fence->finished); + job->sched->ops->free_job(job); +} + + +/** + * drm_sched_entity_flush - Flush a context entity + * + * @entity: scheduler entity + * @timeout: time to wait in for Q to become empty in jiffies. + * + * Splitting drm_sched_entity_fini() into two functions, The first one does the waiting, + * removes the entity from the runqueue and returns an error when the process was killed. + * + * Returns the remaining time in jiffies left from the input timeout + */ +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) +{ + struct drm_gpu_scheduler *sched; + struct task_struct *last_user; + long ret = timeout; + + sched = entity->rq->sched; + /** + * The client will not queue more IBs during this fini, consume existing + * queued IBs or discard them on SIGKILL + */ + if (current->flags & PF_EXITING) { + if (timeout) + ret = wait_event_timeout( + sched->job_scheduled, + drm_sched_entity_is_idle(entity), + timeout); + } else { + wait_event_killable(sched->job_scheduled, + drm_sched_entity_is_idle(entity)); + } + + /* For killed process disable any more IBs enqueue right now */ + last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); + if ((!last_user || last_user == current->group_leader) && + (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) + drm_sched_rq_remove_entity(entity->rq, entity); + + return ret; +} +EXPORT_SYMBOL(drm_sched_entity_flush); + +/** + * drm_sched_entity_cleanup - Destroy a context entity + * + * @entity: scheduler entity + * + * This should be called after @drm_sched_entity_do_release. It goes over the + * entity and signals all jobs with an error code if the process was killed. + * + */ +void drm_sched_entity_fini(struct drm_sched_entity *entity) +{ + struct drm_gpu_scheduler *sched; + + sched = entity->rq->sched; + drm_sched_rq_remove_entity(entity->rq, entity); + + /* Consumption of existing IBs wasn't completed. Forcefully + * remove them here. + */ + if (spsc_queue_peek(&entity->job_queue)) { + struct drm_sched_job *job; + int r; + + /* Park the kernel for a moment to make sure it isn't processing + * our enity. + */ + kthread_park(sched->thread); + kthread_unpark(sched->thread); + if (entity->dependency) { + dma_fence_remove_callback(entity->dependency, + &entity->cb); + dma_fence_put(entity->dependency); + entity->dependency = NULL; + } + + while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { + struct drm_sched_fence *s_fence = job->s_fence; + drm_sched_fence_scheduled(s_fence); + dma_fence_set_error(&s_fence->finished, -ESRCH); + + /* + * When pipe is hanged by older entity, new entity might + * not even have chance to submit it's first job to HW + * and so entity->last_scheduled will remain NULL + */ + if (!entity->last_scheduled) { + drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); + } else { + r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb, + drm_sched_entity_kill_jobs_cb); + if (r == -ENOENT) + drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); + else if (r) + DRM_ERROR("fence add callback failed (%d)\n", r); + } + } + } + + dma_fence_put(entity->last_scheduled); + entity->last_scheduled = NULL; + kfree(entity->rq_list); +} +EXPORT_SYMBOL(drm_sched_entity_fini); + +/** + * drm_sched_entity_fini - Destroy a context entity + * + * @entity: scheduler entity + * + * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() + */ +void drm_sched_entity_destroy(struct drm_sched_entity *entity) +{ + drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); + drm_sched_entity_fini(entity); +} +EXPORT_SYMBOL(drm_sched_entity_destroy); + +static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct drm_sched_entity *entity = + container_of(cb, struct drm_sched_entity, cb); + entity->dependency = NULL; + dma_fence_put(f); + drm_sched_wakeup(entity->rq->sched); +} + +static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct drm_sched_entity *entity = + container_of(cb, struct drm_sched_entity, cb); + entity->dependency = NULL; + dma_fence_put(f); +} + +/** + * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority + */ +static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq, + enum drm_sched_priority priority) +{ + *rq = &(*rq)->sched->sched_rq[priority]; +} + +/** + * drm_sched_entity_set_priority - Sets priority of the entity + * + * @entity: scheduler entity + * @priority: scheduler priority + * + * Update the priority of runqueus used for the entity. + */ +void drm_sched_entity_set_priority(struct drm_sched_entity *entity, + enum drm_sched_priority priority) +{ + unsigned int i; + + spin_lock(&entity->rq_lock); + + for (i = 0; i < entity->num_rq_list; ++i) + drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority); + + drm_sched_rq_remove_entity(entity->rq, entity); + drm_sched_entity_set_rq_priority(&entity->rq, priority); + drm_sched_rq_add_entity(entity->rq, entity); + + spin_unlock(&entity->rq_lock); +} +EXPORT_SYMBOL(drm_sched_entity_set_priority); + +static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) +{ + struct drm_gpu_scheduler *sched = entity->rq->sched; + struct dma_fence * fence = entity->dependency; + struct drm_sched_fence *s_fence; + + if (fence->context == entity->fence_context || + fence->context == entity->fence_context + 1) { + /* + * Fence is a scheduled/finished fence from a job + * which belongs to the same entity, we can ignore + * fences from ourself + */ + dma_fence_put(entity->dependency); + return false; + } + + s_fence = to_drm_sched_fence(fence); + if (s_fence && s_fence->sched == sched) { + + /* + * Fence is from the same scheduler, only need to wait for + * it to be scheduled + */ + fence = dma_fence_get(&s_fence->scheduled); + dma_fence_put(entity->dependency); + entity->dependency = fence; + if (!dma_fence_add_callback(fence, &entity->cb, + drm_sched_entity_clear_dep)) + return true; + + /* Ignore it when it is already scheduled */ + dma_fence_put(fence); + return false; + } + + if (!dma_fence_add_callback(entity->dependency, &entity->cb, + drm_sched_entity_wakeup)) + return true; + + dma_fence_put(entity->dependency); + return false; +} + +struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) +{ + struct drm_gpu_scheduler *sched = entity->rq->sched; + struct drm_sched_job *sched_job = to_drm_sched_job( + spsc_queue_peek(&entity->job_queue)); + + if (!sched_job) + return NULL; + + while ((entity->dependency = sched->ops->dependency(sched_job, entity))) { + if (drm_sched_entity_add_dependency_cb(entity)) { + + trace_drm_sched_job_wait_dep(sched_job, entity->dependency); + return NULL; + } + } + + /* skip jobs from entity that marked guilty */ + if (entity->guilty && atomic_read(entity->guilty)) + dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); + + dma_fence_put(entity->last_scheduled); + entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); + + spsc_queue_pop(&entity->job_queue); + return sched_job; +} + +/** + * drm_sched_entity_select_rq - select a new rq for the entity + * + * @entity: scheduler entity + * + * Check all prerequisites and select a new rq for the entity for load + * balancing. + */ +void drm_sched_entity_select_rq(struct drm_sched_entity *entity) +{ + struct dma_fence *fence; + struct drm_sched_rq *rq; + + if (!spsc_queue_count(&entity->job_queue) == 0 || + entity->num_rq_list <= 1) + return; + + fence = READ_ONCE(entity->last_scheduled); + if (fence && !dma_fence_is_signaled(fence)) + return; + + rq = drm_sched_entity_get_free_sched(entity); + spin_lock(&entity->rq_lock); + drm_sched_rq_remove_entity(entity->rq, entity); + entity->rq = rq; + spin_unlock(&entity->rq_lock); +} + +/** + * drm_sched_entity_push_job - Submit a job to the entity's job queue + * + * @sched_job: job to submit + * @entity: scheduler entity + * + * Note: To guarantee that the order of insertion to queue matches + * the job's fence sequence number this function should be + * called with drm_sched_job_init under common lock. + * + * Returns 0 for success, negative error code otherwise. + */ +void drm_sched_entity_push_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *entity) +{ + bool first; + + trace_drm_sched_job(sched_job, entity); + atomic_inc(&entity->rq->sched->num_jobs); + WRITE_ONCE(entity->last_user, current->group_leader); + first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); + + /* first job wakes up scheduler */ + if (first) { + /* Add the entity to the run queue */ + spin_lock(&entity->rq_lock); + drm_sched_rq_add_entity(entity->rq, entity); + spin_unlock(&entity->rq_lock); + drm_sched_wakeup(entity->rq->sched); + } +} +EXPORT_SYMBOL(drm_sched_entity_push_job); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 22c0f88f7d8f..919ae572f775 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -288,6 +288,21 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, uint32_t hw_submission, unsigned hang_limit, long timeout, const char *name); void drm_sched_fini(struct drm_gpu_scheduler *sched); +int drm_sched_job_init(struct drm_sched_job *job, + struct drm_sched_entity *entity, + void *owner); +void drm_sched_wakeup(struct drm_gpu_scheduler *sched); +void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, + struct drm_sched_job *job); +void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); +bool drm_sched_dependency_optimized(struct dma_fence* fence, + struct drm_sched_entity *entity); +void drm_sched_job_kickout(struct drm_sched_job *s_job); + +void drm_sched_rq_add_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); +void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); int drm_sched_entity_init(struct drm_sched_entity *entity, struct drm_sched_rq **rq_list, @@ -296,22 +311,17 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); void drm_sched_entity_fini(struct drm_sched_entity *entity); void drm_sched_entity_destroy(struct drm_sched_entity *entity); +void drm_sched_entity_select_rq(struct drm_sched_entity *entity); +struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity); void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority); +bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); + struct drm_sched_fence *drm_sched_fence_create( struct drm_sched_entity *s_entity, void *owner); void drm_sched_fence_scheduled(struct drm_sched_fence *fence); void drm_sched_fence_finished(struct drm_sched_fence *fence); -int drm_sched_job_init(struct drm_sched_job *job, - struct drm_sched_entity *entity, - void *owner); -void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, - struct drm_sched_job *job); -void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); -bool drm_sched_dependency_optimized(struct dma_fence* fence, - struct drm_sched_entity *entity); -void drm_sched_job_kickout(struct drm_sched_job *s_job); #endif -- cgit v1.2.3 From 62347a33001c27b22465361aa4adcaa432497bdf Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Fri, 17 Aug 2018 10:32:50 -0400 Subject: drm/scheduler: Add stopped flag to drm_sched_entity The flag will prevent another thread from same process to reinsert the entity queue into scheduler's rq after it was already removewd from there by another thread during drm_sched_entity_flush. Signed-off-by: Andrey Grodzovsky Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/sched_entity.c | 12 +++++++++++- include/drm/gpu_scheduler.h | 2 ++ 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 1416edb2642a..812e3530ea25 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -177,8 +177,12 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) /* For killed process disable any more IBs enqueue right now */ last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); if ((!last_user || last_user == current->group_leader) && - (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) + (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) { + spin_lock(&entity->rq_lock); + entity->stopped = true; drm_sched_rq_remove_entity(entity->rq, entity); + spin_unlock(&entity->rq_lock); + } return ret; } @@ -504,6 +508,12 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, if (first) { /* Add the entity to the run queue */ spin_lock(&entity->rq_lock); + if (entity->stopped) { + spin_unlock(&entity->rq_lock); + + DRM_ERROR("Trying to push to a killed entity\n"); + return; + } drm_sched_rq_add_entity(entity->rq, entity); spin_unlock(&entity->rq_lock); drm_sched_wakeup(entity->rq->sched); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 919ae572f775..daec50f887b3 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -70,6 +70,7 @@ enum drm_sched_priority { * @fini_status: contains the exit status in case the process was signalled. * @last_scheduled: points to the finished fence of the last scheduled job. * @last_user: last group leader pushing a job into the entity. + * @stopped: Marks the enity as removed from rq and destined for termination. * * Entities will emit jobs in order to their corresponding hardware * ring, and the scheduler will alternate between entities based on @@ -92,6 +93,7 @@ struct drm_sched_entity { atomic_t *guilty; struct dma_fence *last_scheduled; struct task_struct *last_user; + bool stopped; }; /** -- cgit v1.2.3 From 8c7655a0fdd32ab39cfef604403dbe1013df213b Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 6 Aug 2018 16:46:26 +0800 Subject: drm/ttm: add helper structures for bulk moves on lru list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add bulk move pos to store the pointer of first and last buffer object. The list in between will be bulk moved on lru list. Signed-off-by: Christian König Signed-off-by: Huang Rui Tested-by: Mike Lothian Tested-by: Dieter Nützel Acked-by: Chunming Zhou Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- include/drm/ttm/ttm_bo_driver.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'include/drm') diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 3234cc322e70..e4fee8e02559 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -490,6 +490,34 @@ struct ttm_bo_device { bool no_retry; }; +/** + * struct ttm_lru_bulk_move_pos + * + * @first: first BO in the bulk move range + * @last: last BO in the bulk move range + * + * Positions for a lru bulk move. + */ +struct ttm_lru_bulk_move_pos { + struct ttm_buffer_object *first; + struct ttm_buffer_object *last; +}; + +/** + * struct ttm_lru_bulk_move + * + * @tt: first/last lru entry for BOs in the TT domain + * @vram: first/last lru entry for BOs in the VRAM domain + * @swap: first/last lru entry for BOs on the swap list + * + * Helper structure for bulk moves on the LRU list. + */ +struct ttm_lru_bulk_move { + struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY]; + struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY]; + struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY]; +}; + /** * ttm_flag_masked * -- cgit v1.2.3 From 9a2779528eddacf0123bfd7308b71141b54cc619 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 6 Aug 2018 17:05:30 +0800 Subject: drm/ttm: revise ttm_bo_move_to_lru_tail to support bulk moves MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When move a BO to the end of LRU, it need remember the BO positions. Make sure all moved bo in between "first" and "last". And they will be bulk moving together. Signed-off-by: Christian König Signed-off-by: Huang Rui Tested-by: Mike Lothian Tested-by: Dieter Nützel Acked-by: Chunming Zhou Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 8 ++++---- drivers/gpu/drm/ttm/ttm_bo.c | 26 +++++++++++++++++++++++++- include/drm/ttm/ttm_bo_api.h | 6 +++++- 3 files changed, 34 insertions(+), 6 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 7d7d7e532246..d12bffa5f70c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -297,9 +297,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (bo->parent) { spin_lock(&glob->lru_lock); - ttm_bo_move_to_lru_tail(&bo->tbo); + ttm_bo_move_to_lru_tail(&bo->tbo, NULL); if (bo->shadow) - ttm_bo_move_to_lru_tail(&bo->shadow->tbo); + ttm_bo_move_to_lru_tail(&bo->shadow->tbo, NULL); spin_unlock(&glob->lru_lock); } @@ -319,9 +319,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (!bo->parent) continue; - ttm_bo_move_to_lru_tail(&bo->tbo); + ttm_bo_move_to_lru_tail(&bo->tbo, NULL); if (bo->shadow) - ttm_bo_move_to_lru_tail(&bo->shadow->tbo); + ttm_bo_move_to_lru_tail(&bo->shadow->tbo, NULL); } spin_unlock(&glob->lru_lock); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 7c484729f9b2..7117b6b1e223 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -214,12 +214,36 @@ void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) } EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); -void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) +static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos, + struct ttm_buffer_object *bo) +{ + if (!pos->first) + pos->first = bo; + pos->last = bo; +} + +void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, + struct ttm_lru_bulk_move *bulk) { reservation_object_assert_held(bo->resv); ttm_bo_del_from_lru(bo); ttm_bo_add_to_lru(bo); + + if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { + switch (bo->mem.mem_type) { + case TTM_PL_TT: + ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo); + break; + + case TTM_PL_VRAM: + ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo); + break; + } + if (bo->ttm && !(bo->ttm->page_flags & + (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) + ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo); + } } EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index a01ba2032f0e..0d4eb81423ee 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -51,6 +51,8 @@ struct ttm_placement; struct ttm_place; +struct ttm_lru_bulk_move; + /** * struct ttm_bus_placement * @@ -405,12 +407,14 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); * ttm_bo_move_to_lru_tail * * @bo: The buffer object. + * @bulk: optional bulk move structure to remember BO positions * * Move this BO to the tail of all lru lists used to lookup and reserve an * object. This function must be called with struct ttm_bo_global::lru_lock * held, and is used to make a BO less likely to be considered for eviction. */ -void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); +void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, + struct ttm_lru_bulk_move *bulk); /** * ttm_bo_lock_delayed_workqueue -- cgit v1.2.3 From 7748e2dcdaad901776c0d78e76e066403e95513c Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Mon, 6 Aug 2018 17:28:35 +0800 Subject: drm/ttm: add bulk move function on LRU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This function allow us to bulk move a group of BOs to the tail of their LRU. The positions of group of BOs are stored on the (first, last) bulk_move_pos structure. Signed-off-by: Christian König Signed-off-by: Huang Rui Tested-by: Mike Lothian Tested-by: Dieter Nützel Acked-by: Chunming Zhou Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo.c | 52 ++++++++++++++++++++++++++++++++++++++++++++ include/drm/ttm/ttm_bo_api.h | 10 +++++++++ 2 files changed, 62 insertions(+) (limited to 'include/drm') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 7117b6b1e223..39d9d559b279 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -247,6 +247,58 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); +static void ttm_bo_bulk_move_helper(struct ttm_lru_bulk_move_pos *pos, + struct list_head *lru, bool is_swap) +{ + struct list_head entries, before; + struct list_head *list1, *list2; + + list1 = is_swap ? &pos->last->swap : &pos->last->lru; + list2 = is_swap ? pos->first->swap.prev : pos->first->lru.prev; + + list_cut_position(&entries, lru, list1); + list_cut_position(&before, &entries, list2); + list_splice(&before, lru); + list_splice_tail(&entries, lru); +} + +void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) +{ + unsigned i; + + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + struct ttm_mem_type_manager *man; + + if (!bulk->tt[i].first) + continue; + + man = &bulk->tt[i].first->bdev->man[TTM_PL_TT]; + ttm_bo_bulk_move_helper(&bulk->tt[i], &man->lru[i], false); + } + + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + struct ttm_mem_type_manager *man; + + if (!bulk->vram[i].first) + continue; + + man = &bulk->vram[i].first->bdev->man[TTM_PL_VRAM]; + ttm_bo_bulk_move_helper(&bulk->vram[i], &man->lru[i], false); + } + + for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { + struct ttm_lru_bulk_move_pos *pos = &bulk->swap[i]; + struct list_head *lru; + + if (!pos->first) + continue; + + lru = &pos->first->bdev->glob->swap_lru[i]; + ttm_bo_bulk_move_helper(&bulk->swap[i], lru, true); + } +} +EXPORT_SYMBOL(ttm_bo_bulk_move_lru_tail); + static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem, bool evict, struct ttm_operation_ctx *ctx) diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 0d4eb81423ee..8c19470785e2 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -416,6 +416,16 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo); void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, struct ttm_lru_bulk_move *bulk); +/** + * ttm_bo_bulk_move_lru_tail + * + * @bulk: bulk move structure + * + * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that + * BO order never changes. Should be called with ttm_bo_global::lru_lock held. + */ +void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk); + /** * ttm_bo_lock_delayed_workqueue * -- cgit v1.2.3 From be9699e3923000ea32c2f4522e1e4de333d21d47 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 10 Jul 2018 20:10:05 +0800 Subject: drm/amdgpu: add picasso to asic_type enum Add picasso to amd_asic_type enum and amdgpu_asic_name[]. Signed-off-by: Likun Gao Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + include/drm/amd_asic_type.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d4855d1ef51f..e8083ec3fbc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -89,6 +89,7 @@ static const char *amdgpu_asic_name[] = { "VEGA12", "VEGA20", "RAVEN", + "PICASSO", "LAST", }; diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index dd63d08cc54e..5644fc679d6f 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -49,6 +49,7 @@ enum amd_asic_type { CHIP_VEGA12, CHIP_VEGA20, CHIP_RAVEN, + CHIP_PICASSO, CHIP_LAST, }; -- cgit v1.2.3 From 741deade2a704a434bd5939118c43d38e9ddac25 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 13 Sep 2018 15:41:57 -0500 Subject: drm/amdgpu: simplify Raven, Raven2, and Picasso handling Treat them all as Raven rather than adding a new picasso asic type. This simplifies a lot of code and also handles the case of rv2 chips with the 0x15d8 pci id. It also fixes dmcu fw handling for picasso. Acked-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 10 +--- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 7 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 32 ++--------- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 -- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 11 ++-- drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 5 +- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 11 +--- drivers/gpu/drm/amd/amdgpu/soc15.c | 66 ++++++++++------------ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 +-- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 1 - .../gpu/drm/amd/powerplay/hwmgr/processpptables.c | 8 +-- include/drm/amd_asic_type.h | 1 - 16 files changed, 60 insertions(+), 113 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 762dc5f886cd..354f0557d697 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -91,7 +91,6 @@ static const char *amdgpu_asic_name[] = { "VEGA12", "VEGA20", "RAVEN", - "PICASSO", "LAST", }; @@ -1337,12 +1336,11 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_RAVEN: if (adev->rev_id >= 8) chip_name = "raven2"; + else if (adev->pdev->device == 0x15d8) + chip_name = "picasso"; else chip_name = "raven"; break; - case CHIP_PICASSO: - chip_name = "picasso"; - break; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); @@ -1468,8 +1466,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: - case CHIP_PICASSO: - if ((adev->asic_type == CHIP_RAVEN) || (adev->asic_type == CHIP_PICASSO)) + if (adev->asic_type == CHIP_RAVEN) adev->family = AMDGPU_FAMILY_RV; else adev->family = AMDGPU_FAMILY_AI; @@ -2183,7 +2180,6 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_VEGA20: #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: - case CHIP_PICASSO: #endif return amdgpu_dc != 0; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 33e1856fb8cc..ff10df4f50d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -874,8 +874,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, /* Raven */ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, - /* Picasso */ - {0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PICASSO|AMD_IS_APU}, + {0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, {0, 0, 0} }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 611c06d3600a..bd397d2916fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -56,7 +56,6 @@ static int psp_sw_init(void *handle) psp_v3_1_set_psp_funcs(psp); break; case CHIP_RAVEN: - case CHIP_PICASSO: psp_v10_0_set_psp_funcs(psp); break; case CHIP_VEGA20: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index acb4c66fe89b..1fa8bc337859 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -303,7 +303,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) return AMDGPU_FW_LOAD_SMU; case CHIP_VEGA10: case CHIP_RAVEN: - case CHIP_PICASSO: case CHIP_VEGA12: case CHIP_VEGA20: if (!load_type) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index a74498ce87ff..a73674f9a0f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -63,14 +63,13 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_RAVEN: - if (adev->rev_id >= 8) + if (adev->rev_id >= 8) fw_name = FIRMWARE_RAVEN2; + else if (adev->pdev->device == 0x15d8) + fw_name = FIRMWARE_PICASSO; else fw_name = FIRMWARE_RAVEN; break; - case CHIP_PICASSO: - fw_name = FIRMWARE_PICASSO; - break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 7a9ffe9eb8bb..a7f9aaa47c49 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2981,7 +2981,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_COMPUTE); - if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) + if (adev->asic_type == CHIP_RAVEN) vm->pte_support_ats = true; } else { vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & @@ -3073,7 +3073,7 @@ error_free_sched_entity: */ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid) { - bool pte_support_ats = (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO); + bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); int r; r = amdgpu_bo_reserve(vm->root.base.bo, true); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 4991ae00a4ca..75a91663019f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -277,7 +277,6 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] = #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 -#define PICASSO_GB_ADDR_CONFIG_GOLDEN 0x24000042 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev); @@ -329,14 +328,6 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_9_1_rv1, ARRAY_SIZE(golden_settings_gc_9_1_rv1)); break; - case CHIP_PICASSO: - soc15_program_register_sequence(adev, - golden_settings_gc_9_1, - ARRAY_SIZE(golden_settings_gc_9_1)); - soc15_program_register_sequence(adev, - golden_settings_gc_9_1_rv1, - ARRAY_SIZE(golden_settings_gc_9_1_rv1)); - break; default: break; } @@ -617,12 +608,11 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) case CHIP_RAVEN: if (adev->rev_id >= 8) chip_name = "raven2"; + else if (adev->pdev->device == 0x15d8) + chip_name = "picasso"; else chip_name = "raven"; break; - case CHIP_PICASSO: - chip_name = "picasso"; - break; default: BUG(); } @@ -1076,7 +1066,7 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); } - if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) { + if (adev->asic_type == CHIP_RAVEN) { /* TODO: double check the cp_table_size for RV */ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, @@ -1328,14 +1318,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) else gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; break; - case CHIP_PICASSO: - adev->gfx.config.max_hw_contexts = 8; - adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; - adev->gfx.config.sc_prim_fifo_size_backend = 0x100; - adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; - adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; - gb_addr_config = PICASSO_GB_ADDR_CONFIG_GOLDEN; - break; default: BUG(); break; @@ -1614,7 +1596,6 @@ static int gfx_v9_0_sw_init(void *handle) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: - case CHIP_PICASSO: adev->gfx.mec.num_mec = 2; break; default: @@ -1776,7 +1757,7 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr, (void **)&adev->gfx.rlc.cs_ptr); - if ((adev->asic_type == CHIP_RAVEN) || (adev->asic_type == CHIP_PICASSO)) { + if (adev->asic_type == CHIP_RAVEN) { amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, &adev->gfx.rlc.cp_table_gpu_addr, (void **)&adev->gfx.rlc.cp_table_ptr); @@ -2442,7 +2423,7 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) return r; } - if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) { + if (adev->asic_type == CHIP_RAVEN) { if (amdgpu_lbpw != 0) gfx_v9_0_enable_lbpw(adev, true); else @@ -3846,7 +3827,6 @@ static int gfx_v9_0_set_powergating_state(void *handle, switch (adev->asic_type) { case CHIP_RAVEN: - case CHIP_PICASSO: if (!enable) { amdgpu_gfx_off_ctrl(adev, false); cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); @@ -3901,7 +3881,6 @@ static int gfx_v9_0_set_clockgating_state(void *handle, case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: - case CHIP_PICASSO: gfx_v9_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); break; @@ -4911,7 +4890,6 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: - case CHIP_PICASSO: adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 0ad1586c293f..aad3c7c5fb3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -846,7 +846,6 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) adev->gmc.gart_size = 512ULL << 20; break; case CHIP_RAVEN: /* DCE SG support */ - case CHIP_PICASSO: /* DCE SG support */ adev->gmc.gart_size = 1024ULL << 20; break; } @@ -935,7 +934,6 @@ static int gmc_v9_0_sw_init(void *handle) adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); switch (adev->asic_type) { case CHIP_RAVEN: - case CHIP_PICASSO: if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); } else { @@ -1062,7 +1060,6 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) case CHIP_VEGA12: break; case CHIP_RAVEN: - case CHIP_PICASSO: soc15_program_register_sequence(adev, golden_settings_athub_1_0_0, ARRAY_SIZE(golden_settings_athub_1_0_0)); @@ -1097,7 +1094,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_RAVEN: - case CHIP_PICASSO: mmhub_v1_0_update_power_gating(adev, true); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 2a126c6950c7..80698b5ffa4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -412,7 +412,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad def = data = RREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG); - if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) { + if (adev->asic_type != CHIP_RAVEN) { def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2); } else @@ -428,7 +428,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); - if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) + if (adev->asic_type != CHIP_RAVEN) data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK | DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | @@ -445,7 +445,7 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); - if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) + if (adev->asic_type != CHIP_RAVEN) data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK | DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | @@ -458,13 +458,13 @@ static void mmhub_v1_0_update_medium_grain_clock_gating(struct amdgpu_device *ad WREG32_SOC15(MMHUB, 0, mmATC_L2_MISC_CG, data); if (def1 != data1) { - if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO) + if (adev->asic_type != CHIP_RAVEN) WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1); else WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_RV, data1); } - if (adev->asic_type != CHIP_RAVEN && adev->asic_type != CHIP_PICASSO && def2 != data2) + if (adev->asic_type != CHIP_RAVEN && def2 != data2) WREG32_SOC15(MMHUB, 0, mmDAGB1_CNTL_MISC2, data2); } @@ -528,7 +528,6 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: - case CHIP_PICASSO: mmhub_v1_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); athub_update_medium_grain_clock_gating(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 2cfd1bb559dd..295c2205485a 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -121,12 +121,11 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) case CHIP_RAVEN: if (adev->rev_id >= 0x8) chip_name = "raven2"; + else if (adev->pdev->device == 0x15d8) + chip_name = "picasso"; else chip_name = "raven"; break; - case CHIP_PICASSO: - chip_name = "picasso"; - break; default: BUG(); } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 75be0b9ed2c0..2ea1f0d8f5be 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -229,7 +229,6 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) ARRAY_SIZE(golden_settings_sdma1_4_2)); break; case CHIP_RAVEN: - case CHIP_PICASSO: soc15_program_register_sequence(adev, golden_settings_sdma_4_1, ARRAY_SIZE(golden_settings_sdma_4_1)); @@ -283,12 +282,11 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) case CHIP_RAVEN: if (adev->rev_id >= 8) chip_name = "raven2"; + else if (adev->pdev->device == 0x15d8) + chip_name = "picasso"; else chip_name = "raven"; break; - case CHIP_PICASSO: - chip_name = "picasso"; - break; default: BUG(); } @@ -869,7 +867,6 @@ static void sdma_v4_0_init_pg(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_RAVEN: - case CHIP_PICASSO: sdma_v4_1_init_power_gating(adev); sdma_v4_1_update_power_gating(adev, true); break; @@ -1277,7 +1274,7 @@ static int sdma_v4_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_PICASSO) + if (adev->asic_type == CHIP_RAVEN) adev->sdma.num_instances = 1; else adev->sdma.num_instances = 2; @@ -1620,7 +1617,6 @@ static int sdma_v4_0_set_clockgating_state(void *handle, case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: - case CHIP_PICASSO: sdma_v4_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); sdma_v4_0_update_medium_grain_light_sleep(adev, @@ -1639,7 +1635,6 @@ static int sdma_v4_0_set_powergating_state(void *handle, switch (adev->asic_type) { case CHIP_RAVEN: - case CHIP_PICASSO: sdma_v4_1_update_power_gating(adev, state == AMD_PG_STATE_GATE ? true : false); break; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index f930e09071d4..c4daf1f93486 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -491,7 +491,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_RAVEN: - case CHIP_PICASSO: vega10_reg_base_init(adev); break; case CHIP_VEGA20: @@ -546,7 +545,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); break; case CHIP_RAVEN: - case CHIP_PICASSO: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); @@ -698,6 +696,13 @@ static int soc15_common_early_init(void *handle) break; case CHIP_RAVEN: if (adev->rev_id >= 0x8) + adev->external_rev_id = adev->rev_id + 0x81; + else if (adev->pdev->device == 0x15d8) + adev->external_rev_id = adev->rev_id + 0x41; + else + adev->external_rev_id = 0x1; + + if (adev->rev_id >= 0x8) { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_CP_LS | @@ -713,7 +718,27 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; - else + + adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; + } else if (adev->pdev->device == 0x15d8) { + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_ROM_MGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS; + + adev->pg_flags = AMD_PG_SUPPORT_SDMA | + AMD_PG_SUPPORT_MMHUB | + AMD_PG_SUPPORT_VCN; + } else { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_RLC_LS | @@ -735,43 +760,13 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; - adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; - - if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) - adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_CP | - AMD_PG_SUPPORT_RLC_SMU_HS; - - if (adev->rev_id >= 0x8) - adev->external_rev_id = adev->rev_id + 0x81; - else - adev->external_rev_id = 0x1; - break; - case CHIP_PICASSO: - adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS | - AMD_CG_SUPPORT_GFX_CP_LS | - AMD_CG_SUPPORT_GFX_3D_CGCG | - AMD_CG_SUPPORT_GFX_3D_CGLS | - AMD_CG_SUPPORT_GFX_CGCG | - AMD_CG_SUPPORT_GFX_CGLS | - AMD_CG_SUPPORT_BIF_LS | - AMD_CG_SUPPORT_HDP_LS | - AMD_CG_SUPPORT_ROM_MGCG | - AMD_CG_SUPPORT_MC_MGCG | - AMD_CG_SUPPORT_MC_LS | - AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_SDMA_LS; - - adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_MMHUB | - AMD_PG_SUPPORT_VCN; + adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; + } if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_RLC_SMU_HS; - - adev->external_rev_id = adev->rev_id + 0x41; break; default: /* FIXME: not supported yet */ @@ -973,7 +968,6 @@ static int soc15_common_set_clockgating_state(void *handle, state == AMD_CG_STATE_GATE ? true : false); break; case CHIP_RAVEN: - case CHIP_PICASSO: adev->nbio_funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); adev->nbio_funcs->update_medium_grain_light_sleep(adev, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 985c6291dbfd..47c3453c688a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1215,8 +1215,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA10 || adev->asic_type == CHIP_VEGA12 || adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_RAVEN || - adev->asic_type == CHIP_PICASSO) + adev->asic_type == CHIP_RAVEN) client_id = SOC15_IH_CLIENTID_DCE; int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; @@ -1635,7 +1634,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) break; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: - case CHIP_PICASSO: if (dcn10_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -1862,7 +1860,6 @@ static int dm_early_init(void *handle) break; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: - case CHIP_PICASSO: adev->mode_info.num_crtc = 4; adev->mode_info.num_hpd = 4; adev->mode_info.num_dig = 4; @@ -2111,8 +2108,7 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, if (adev->asic_type == CHIP_VEGA10 || adev->asic_type == CHIP_VEGA12 || adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_RAVEN || - adev->asic_type == CHIP_PICASSO) { + adev->asic_type == CHIP_RAVEN) { /* Fill GFX9 params */ plane_state->tiling_info.gfx9.num_pipes = adev->gfx.config.gb_addr_config_fields.num_pipes; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index a45578e6504a..7500a3e61dba 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -171,7 +171,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) case AMDGPU_FAMILY_RV: switch (hwmgr->chip_id) { case CHIP_RAVEN: - case CHIP_PICASSO: hwmgr->od_enabled = false; hwmgr->smumgr_funcs = &smu10_smu_funcs; smu10_init_function_pointers(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index f6fe9ce793ad..77c14671866c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -832,7 +832,7 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( uint16_t size; if (!table_addr) { - if (hwmgr->chip_id == CHIP_RAVEN || hwmgr->chip_id == CHIP_PICASSO) { + if (hwmgr->chip_id == CHIP_RAVEN) { table_addr = &soft_dummy_pp_table[0]; hwmgr->soft_pp_table = &soft_dummy_pp_table[0]; hwmgr->soft_pp_table_size = sizeof(soft_dummy_pp_table); @@ -1055,7 +1055,7 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr, hwmgr->platform_descriptor.maxOverdriveVDDC = 0; hwmgr->platform_descriptor.overdriveVDDCStep = 0; - if (hwmgr->chip_id == CHIP_RAVEN || hwmgr->chip_id == CHIP_PICASSO) + if (hwmgr->chip_id == CHIP_RAVEN) return 0; /* We assume here that fw_info is unchanged if this call fails.*/ @@ -1595,7 +1595,7 @@ static int pp_tables_initialize(struct pp_hwmgr *hwmgr) int result; const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table; - if (hwmgr->chip_id == CHIP_RAVEN || hwmgr->chip_id == CHIP_PICASSO) + if (hwmgr->chip_id == CHIP_RAVEN) return 0; hwmgr->need_pp_table_upload = true; @@ -1644,7 +1644,7 @@ static int pp_tables_initialize(struct pp_hwmgr *hwmgr) static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr) { - if (hwmgr->chip_id == CHIP_RAVEN || hwmgr->chip_id == CHIP_PICASSO) + if (hwmgr->chip_id == CHIP_RAVEN) return 0; kfree(hwmgr->dyn_state.vddc_dependency_on_sclk); diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index 5644fc679d6f..dd63d08cc54e 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -49,7 +49,6 @@ enum amd_asic_type { CHIP_VEGA12, CHIP_VEGA20, CHIP_RAVEN, - CHIP_PICASSO, CHIP_LAST, }; -- cgit v1.2.3 From 57078338b2e4c07fb76bef23369cae0acfb1f7bc Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 18 Sep 2018 16:20:18 +1000 Subject: drm: fix drm_drv_uses_atomic_modeset on non modesetting drivers. vgem seems to oops on the intel CI due to the vgem debugfs init hitting this path now. Check if we have mode_config funcs before checking one. Signed-off-by: Dave Airlie Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20180918062018.24942-1-airlied@gmail.com --- include/drm/drm_drv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/drm') diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 46a8009784df..152b3055e9e1 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -675,7 +675,7 @@ static inline bool drm_core_check_feature(struct drm_device *dev, int feature) static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) { return drm_core_check_feature(dev, DRIVER_ATOMIC) || - dev->mode_config.funcs->atomic_commit != NULL; + (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL); } -- cgit v1.2.3