From 90b69cdc5f159be09d799257661e119a26ea4233 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 29 Nov 2019 12:44:07 +0100 Subject: drm/amdgpu: stop adding VM updates fences to the resv obj MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't add the VM update fences to the resv object and remove the handling to stop implicitely syncing to them. Ongoing updates prevent page tables from being evicted and we manually block for all updates to complete before releasing PDs and PTS. This way we can do updates even without the resv obj locked. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 145ea95d977a..81cbb8105a68 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -562,8 +562,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, { entry->priority = 0; entry->tv.bo = &vm->root.base.bo->tbo; - /* One for the VM updates, one for TTM and one for the CS job */ - entry->tv.num_shared = 3; + /* One for TTM and one for the CS job */ + entry->tv.num_shared = 2; entry->user_pages = NULL; list_add(&entry->tv.head, validated); } @@ -2518,6 +2518,11 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true)) return false; + /* Don't evict VM page tables while they are updated */ + if (!dma_fence_is_signaled(bo_base->vm->last_direct) || + !dma_fence_is_signaled(bo_base->vm->last_delayed)) + return false; + return true; } @@ -2683,8 +2688,16 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, - true, true, timeout); + timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, + true, true, timeout); + if (timeout <= 0) + return timeout; + + timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout); + if (timeout <= 0) + return timeout; + + return dma_fence_wait_timeout(vm->last_delayed, true, timeout); } /** @@ -2753,6 +2766,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, else vm->update_funcs = &amdgpu_vm_sdma_funcs; vm->last_update = NULL; + vm->last_direct = dma_fence_get_stub(); + vm->last_delayed = dma_fence_get_stub(); amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp); if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) @@ -2803,6 +2818,8 @@ error_free_root: vm->root.base.bo = NULL; error_free_delayed: + dma_fence_put(vm->last_direct); + dma_fence_put(vm->last_delayed); drm_sched_entity_destroy(&vm->delayed); error_free_direct: @@ -3003,6 +3020,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->pasid = 0; } + dma_fence_wait(vm->last_direct, false); + dma_fence_put(vm->last_direct); + dma_fence_wait(vm->last_delayed, false); + dma_fence_put(vm->last_delayed); + list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { amdgpu_vm_prt_fini(adev, vm); -- cgit v1.2.3