diff options
author | Jack Xiao <Jack.Xiao@amd.com> | 2019-01-23 13:54:26 +0800 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2019-06-21 18:58:21 -0500 |
commit | 80f8fb9178eda5a16b5ff8e2b2e8304f0a06f5f4 (patch) | |
tree | b80d1686bafa7785872327b61ca436041c0fff63 /drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | |
parent | 6698a3d05fda57f37add68c55a0696bfa7100413 (diff) | |
download | linux-80f8fb9178eda5a16b5ff8e2b2e8304f0a06f5f4.tar.bz2 |
drm/amdgpu: mark the partial job as preempted in mcbp unit test
In mcbp unit test, the test should detect the preempted job which may
be a partial execution ib and mark it as preempted; so that the gfx
block can correctly generate PM4 frame.
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Jack Xiao <Jack.Xiao@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 44 |
1 files changed, 32 insertions, 12 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 8339f7a47cb2..c0dfad9b06fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -978,12 +978,40 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) spin_unlock(&sched->job_list_lock); } +static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) +{ + struct amdgpu_job *job; + struct drm_sched_job *s_job; + uint32_t preempt_seq; + struct dma_fence *fence, **ptr; + struct amdgpu_fence_driver *drv = &ring->fence_drv; + struct drm_gpu_scheduler *sched = &ring->sched; + + if (ring->funcs->type != AMDGPU_RING_TYPE_GFX) + return; + + preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2)); + if (preempt_seq <= atomic_read(&drv->last_seq)) + return; + + preempt_seq &= drv->num_fences_mask; + ptr = &drv->fences[preempt_seq]; + fence = rcu_dereference_protected(*ptr, 1); + + spin_lock(&sched->job_list_lock); + list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + job = to_amdgpu_job(s_job); + if (job->fence == fence) + /* mark the job as preempted */ + job->preemption_status |= AMDGPU_IB_PREEMPTED; + } + spin_unlock(&sched->job_list_lock); +} + static int amdgpu_debugfs_ib_preempt(void *data, u64 val) { int r, resched, length; struct amdgpu_ring *ring; - struct drm_sched_job *s_job; - struct amdgpu_job *job; struct dma_fence **fences = NULL; struct amdgpu_device *adev = (struct amdgpu_device *)data; @@ -1022,21 +1050,13 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) ring->fence_drv.sync_seq) { DRM_INFO("ring %d was preempted\n", ring->idx); + amdgpu_ib_preempt_mark_partial_job(ring); + /* swap out the old fences */ amdgpu_ib_preempt_fences_swap(ring, fences); amdgpu_fence_driver_force_completion(ring); - s_job = list_first_entry_or_null( - &ring->sched.ring_mirror_list, - struct drm_sched_job, node); - if (s_job) { - job = to_amdgpu_job(s_job); - /* mark the job as preempted */ - /* job->preemption_status |= - AMDGPU_IB_PREEMPTED; */ - } - /* resubmit unfinished jobs */ amdgpu_ib_preempt_job_recovery(&ring->sched); |