summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdkfd
diff options
context:
space:
mode:
authorAlex Sierra <alex.sierra@amd.com>2021-10-07 12:04:09 -0500
committerAlex Deucher <alexander.deucher@amd.com>2021-10-08 13:20:31 -0400
commitec6abe831a843208e99a59adf108adba22166b3f (patch)
tree2152279f4f059b4d6c7f32924170438154302b2e /drivers/gpu/drm/amd/amdkfd
parent097cbf2648e08ef7f24d484ce576902d6f86af42 (diff)
downloadlinux-ec6abe831a843208e99a59adf108adba22166b3f.tar.bz2
drm/amdkfd: rm BO resv on validation to avoid deadlock
This fix the deadlock with the BO reservations during SVM_BO evictions while allocations in VRAM are concurrently performed. More specific, while the ttm waits for the fence to be signaled (ttm_bo_wait), it already has the BO reserved. In parallel, the restore worker might be running, prefetching memory to VRAM. This also requires to reserve the BO, but blocks the mmap semaphore first. The deadlock happens when the SVM_BO eviction worker kicks in and waits for the mmap semaphore held in restore worker. Preventing signal the fence back, causing the deadlock until the ttm times out. We don't need to hold the BO reservation anymore during validation and mapping. Now the physical addresses are taken from hmm_range_fault. We also take migrate_mutex to prevent range migration while validate_and_map update GPU page table. Signed-off-by: Alex Sierra <alex.sierra@amd.com> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Philip Yang <philip.yang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c7
1 files changed, 1 insertions, 6 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 9d0f65a90002..179080329af8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1307,7 +1307,7 @@ struct svm_validate_context {
struct svm_range *prange;
bool intr;
unsigned long bitmap[MAX_GPU_INSTANCE];
- struct ttm_validate_buffer tv[MAX_GPU_INSTANCE+1];
+ struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
struct list_head validate_list;
struct ww_acquire_ctx ticket;
};
@@ -1334,11 +1334,6 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx)
ctx->tv[gpuidx].num_shared = 4;
list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
}
- if (ctx->prange->svm_bo && ctx->prange->ttm_res) {
- ctx->tv[MAX_GPU_INSTANCE].bo = &ctx->prange->svm_bo->bo->tbo;
- ctx->tv[MAX_GPU_INSTANCE].num_shared = 1;
- list_add(&ctx->tv[MAX_GPU_INSTANCE].head, &ctx->validate_list);
- }
r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
ctx->intr, NULL);