diff options
author | Dave Airlie <airlied@redhat.com> | 2017-12-21 11:17:45 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-12-21 11:17:45 +1000 |
commit | df2869abd92b740af141ee2eb081bfc69bd80877 (patch) | |
tree | 61088b24d70246d16fd3d3e04d022255076decc5 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |
parent | 991eecc1c0743be7b942367af36637796e578e08 (diff) | |
parent | 4f4b94ee616500f326650f5b25439f1f7d606ea7 (diff) | |
download | linux-df2869abd92b740af141ee2eb081bfc69bd80877.tar.bz2 |
Merge branch 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux into drm-next
* 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux: (171 commits)
drm/amdgpu: fix test for shadow page tables
drm/amd/display: Expose dpp1_set_cursor_attributes
drm/amd/display: Update FMT and OPPBUF functions
drm/amd/display: check for null before calling is_blanked
drm/amd/display: dal 3.1.27
drm/amd/display: Fix unused variable warnings.
drm/amd/display: Only blank DCN when we have set_blank implementation
drm/amd/display: Put dcn_mi_registers with other structs
drm/amd/display: hubp refactor
drm/amd/display: integrating optc pseudocode
drm/amd/display: Call validate_fbc should_enable_fbc
drm/amd/display: Clean up DCN cursor code
drm/amd/display: fix 180 full screen pipe split
drm/amd/display: reprogram surface config on scaling change
drm/amd/display: Remove dwbc from pipe_ctx
drm/amd/display: Use the maximum link setting which EDP reported.
drm/amd/display: Add hdr_supported flag
drm/amd/display: fix global sync param retrieval when not pipe splitting
drm/amd/display: Update HUBP
drm/amd/display: fix rotated surface scaling
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 398 |
1 files changed, 191 insertions, 207 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 3ecdbdfb04dd..398abbcbf029 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -148,12 +148,23 @@ struct amdgpu_prt_cb { static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev, unsigned level) { - if (level != adev->vm_manager.num_level) - return 9 * (adev->vm_manager.num_level - level - 1) + + unsigned shift = 0xff; + + switch (level) { + case AMDGPU_VM_PDB2: + case AMDGPU_VM_PDB1: + case AMDGPU_VM_PDB0: + shift = 9 * (AMDGPU_VM_PDB0 - level) + adev->vm_manager.block_size; - else - /* For the page tables on the leaves */ - return 0; + break; + case AMDGPU_VM_PTB: + shift = 0; + break; + default: + dev_err(adev->dev, "the level%d isn't supported.\n", level); + } + + return shift; } /** @@ -166,12 +177,13 @@ static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev, static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, unsigned level) { - unsigned shift = amdgpu_vm_level_shift(adev, 0); + unsigned shift = amdgpu_vm_level_shift(adev, + adev->vm_manager.root_level); - if (level == 0) + if (level == adev->vm_manager.root_level) /* For the root directory */ return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift; - else if (level != adev->vm_manager.num_level) + else if (level != AMDGPU_VM_PTB) /* Everything in between */ return 512; else @@ -329,9 +341,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, to >= amdgpu_vm_num_entries(adev, level)) return -EINVAL; - if (to > parent->last_entry_used) - parent->last_entry_used = to; - ++level; saddr = saddr & ((1 << shift) - 1); eaddr = eaddr & ((1 << shift) - 1); @@ -346,7 +355,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, if (vm->pte_support_ats) { init_value = AMDGPU_PTE_DEFAULT_ATC; - if (level != adev->vm_manager.num_level - 1) + if (level != AMDGPU_VM_PTB) init_value |= AMDGPU_PDE_PTE; } @@ -386,10 +395,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, spin_lock(&vm->status_lock); list_add(&entry->base.vm_status, &vm->relocated); spin_unlock(&vm->status_lock); - entry->addr = 0; } - if (level < adev->vm_manager.num_level) { + if (level < AMDGPU_VM_PTB) { uint64_t sub_saddr = (pt_idx == from) ? saddr : 0; uint64_t sub_eaddr = (pt_idx == to) ? eaddr : ((1 << shift) - 1); @@ -435,7 +443,8 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, saddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE; - return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0); + return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, + adev->vm_manager.root_level); } /** @@ -732,7 +741,7 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev) has_compute_vm_bug = false; - ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); if (ip_block) { /* Compute has a VM bug for GFX version < 7. Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ @@ -1060,162 +1069,52 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm, } /* - * amdgpu_vm_update_level - update a single level in the hierarchy + * amdgpu_vm_update_pde - update a single level in the hierarchy * - * @adev: amdgpu_device pointer + * @param: parameters for the update * @vm: requested vm * @parent: parent directory + * @entry: entry to update * - * Makes sure all entries in @parent are up to date. - * Returns 0 for success, error for failure. + * Makes sure the requested entry in parent is up to date. */ -static int amdgpu_vm_update_level(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_vm_pt *parent) +static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params, + struct amdgpu_vm *vm, + struct amdgpu_vm_pt *parent, + struct amdgpu_vm_pt *entry) { - struct amdgpu_bo *shadow; - struct amdgpu_ring *ring = NULL; + struct amdgpu_bo *bo = entry->base.bo, *shadow = NULL, *pbo; uint64_t pd_addr, shadow_addr = 0; - uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0; - unsigned count = 0, pt_idx, ndw = 0; - struct amdgpu_job *job; - struct amdgpu_pte_update_params params; - struct dma_fence *fence = NULL; - uint32_t incr; - - int r; + uint64_t pde, pt, flags; + unsigned level; - if (!parent->entries) - return 0; - - memset(¶ms, 0, sizeof(params)); - params.adev = adev; - shadow = parent->base.bo->shadow; + /* Don't update huge pages here */ + if (entry->huge) + return; if (vm->use_cpu_for_update) { pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo); - r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); - if (unlikely(r)) - return r; - - params.func = amdgpu_vm_cpu_set_ptes; } else { - ring = container_of(vm->entity.sched, struct amdgpu_ring, - sched); - - /* padding, etc. */ - ndw = 64; - - /* assume the worst case */ - ndw += parent->last_entry_used * 6; - pd_addr = amdgpu_bo_gpu_offset(parent->base.bo); - - if (shadow) { + shadow = parent->base.bo->shadow; + if (shadow) shadow_addr = amdgpu_bo_gpu_offset(shadow); - ndw *= 2; - } else { - shadow_addr = 0; - } - - r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); - if (r) - return r; - - params.ib = &job->ibs[0]; - params.func = amdgpu_vm_do_set_ptes; } + for (level = 0, pbo = parent->base.bo->parent; pbo; ++level) + pbo = pbo->parent; - /* walk over the address space and update the directory */ - for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { - struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; - struct amdgpu_bo *bo = entry->base.bo; - uint64_t pde, pt; - - if (bo == NULL) - continue; - - spin_lock(&vm->status_lock); - list_del_init(&entry->base.vm_status); - spin_unlock(&vm->status_lock); - - pt = amdgpu_bo_gpu_offset(bo); - pt = amdgpu_gart_get_vm_pde(adev, pt); - /* Don't update huge pages here */ - if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) || - parent->entries[pt_idx].addr == (pt | AMDGPU_PTE_VALID)) - continue; - - parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID; - - pde = pd_addr + pt_idx * 8; - incr = amdgpu_bo_size(bo); - if (((last_pde + 8 * count) != pde) || - ((last_pt + incr * count) != pt) || - (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { - - if (count) { - if (shadow) - params.func(¶ms, - last_shadow, - last_pt, count, - incr, - AMDGPU_PTE_VALID); - - params.func(¶ms, last_pde, - last_pt, count, incr, - AMDGPU_PTE_VALID); - } - - count = 1; - last_pde = pde; - last_shadow = shadow_addr + pt_idx * 8; - last_pt = pt; - } else { - ++count; - } - } - - if (count) { - if (vm->root.base.bo->shadow) - params.func(¶ms, last_shadow, last_pt, - count, incr, AMDGPU_PTE_VALID); - - params.func(¶ms, last_pde, last_pt, - count, incr, AMDGPU_PTE_VALID); - } - - if (!vm->use_cpu_for_update) { - if (params.ib->length_dw == 0) { - amdgpu_job_free(job); - } else { - amdgpu_ring_pad_ib(ring, params.ib); - amdgpu_sync_resv(adev, &job->sync, - parent->base.bo->tbo.resv, - AMDGPU_FENCE_OWNER_VM, false); - if (shadow) - amdgpu_sync_resv(adev, &job->sync, - shadow->tbo.resv, - AMDGPU_FENCE_OWNER_VM, false); - - WARN_ON(params.ib->length_dw > ndw); - r = amdgpu_job_submit(job, ring, &vm->entity, - AMDGPU_FENCE_OWNER_VM, &fence); - if (r) - goto error_free; - - amdgpu_bo_fence(parent->base.bo, fence, true); - dma_fence_put(vm->last_update); - vm->last_update = fence; - } + level += params->adev->vm_manager.root_level; + pt = amdgpu_bo_gpu_offset(bo); + flags = AMDGPU_PTE_VALID; + amdgpu_gart_get_vm_pde(params->adev, level, &pt, &flags); + if (shadow) { + pde = shadow_addr + (entry - parent->entries) * 8; + params->func(params, pde, pt, 1, 0, flags); } - return 0; - -error_free: - amdgpu_job_free(job); - return r; + pde = pd_addr + (entry - parent->entries) * 8; + params->func(params, pde, pt, 1, 0, flags); } /* @@ -1225,27 +1124,29 @@ error_free: * * Mark all PD level as invalid after an error. */ -static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm, - struct amdgpu_vm_pt *parent) +static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct amdgpu_vm_pt *parent, + unsigned level) { - unsigned pt_idx; + unsigned pt_idx, num_entries; /* * Recurse into the subdirectories. This recursion is harmless because * we only have a maximum of 5 layers. */ - for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) { + num_entries = amdgpu_vm_num_entries(adev, level); + for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) { struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; if (!entry->base.bo) continue; - entry->addr = ~0ULL; spin_lock(&vm->status_lock); if (list_empty(&entry->base.vm_status)) list_add(&entry->base.vm_status, &vm->relocated); spin_unlock(&vm->status_lock); - amdgpu_vm_invalidate_level(vm, entry); + amdgpu_vm_invalidate_level(adev, vm, entry, level + 1); } } @@ -1261,38 +1162,63 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm, int amdgpu_vm_update_directories(struct amdgpu_device *adev, struct amdgpu_vm *vm) { + struct amdgpu_pte_update_params params; + struct amdgpu_job *job; + unsigned ndw = 0; int r = 0; + if (list_empty(&vm->relocated)) + return 0; + +restart: + memset(¶ms, 0, sizeof(params)); + params.adev = adev; + + if (vm->use_cpu_for_update) { + r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); + if (unlikely(r)) + return r; + + params.func = amdgpu_vm_cpu_set_ptes; + } else { + ndw = 512 * 8; + r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); + if (r) + return r; + + params.ib = &job->ibs[0]; + params.func = amdgpu_vm_do_set_ptes; + } + spin_lock(&vm->status_lock); while (!list_empty(&vm->relocated)) { - struct amdgpu_vm_bo_base *bo_base; + struct amdgpu_vm_bo_base *bo_base, *parent; + struct amdgpu_vm_pt *pt, *entry; struct amdgpu_bo *bo; bo_base = list_first_entry(&vm->relocated, struct amdgpu_vm_bo_base, vm_status); + list_del_init(&bo_base->vm_status); spin_unlock(&vm->status_lock); bo = bo_base->bo->parent; - if (bo) { - struct amdgpu_vm_bo_base *parent; - struct amdgpu_vm_pt *pt; - - parent = list_first_entry(&bo->va, - struct amdgpu_vm_bo_base, - bo_list); - pt = container_of(parent, struct amdgpu_vm_pt, base); - - r = amdgpu_vm_update_level(adev, vm, pt); - if (r) { - amdgpu_vm_invalidate_level(vm, &vm->root); - return r; - } - spin_lock(&vm->status_lock); - } else { + if (!bo) { spin_lock(&vm->status_lock); - list_del_init(&bo_base->vm_status); + continue; } + + parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base, + bo_list); + pt = container_of(parent, struct amdgpu_vm_pt, base); + entry = container_of(bo_base, struct amdgpu_vm_pt, base); + + amdgpu_vm_update_pde(¶ms, vm, pt, entry); + + spin_lock(&vm->status_lock); + if (!vm->use_cpu_for_update && + (ndw - params.ib->length_dw) < 32) + break; } spin_unlock(&vm->status_lock); @@ -1300,8 +1226,44 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, /* Flush HDP */ mb(); amdgpu_gart_flush_gpu_tlb(adev, 0); + } else if (params.ib->length_dw == 0) { + amdgpu_job_free(job); + } else { + struct amdgpu_bo *root = vm->root.base.bo; + struct amdgpu_ring *ring; + struct dma_fence *fence; + + ring = container_of(vm->entity.sched, struct amdgpu_ring, + sched); + + amdgpu_ring_pad_ib(ring, params.ib); + amdgpu_sync_resv(adev, &job->sync, root->tbo.resv, + AMDGPU_FENCE_OWNER_VM, false); + if (root->shadow) + amdgpu_sync_resv(adev, &job->sync, + root->shadow->tbo.resv, + AMDGPU_FENCE_OWNER_VM, false); + + WARN_ON(params.ib->length_dw > ndw); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &fence); + if (r) + goto error; + + amdgpu_bo_fence(root, fence, true); + dma_fence_put(vm->last_update); + vm->last_update = fence; } + if (!list_empty(&vm->relocated)) + goto restart; + + return 0; + +error: + amdgpu_vm_invalidate_level(adev, vm, &vm->root, + adev->vm_manager.root_level); + amdgpu_job_free(job); return r; } @@ -1319,19 +1281,19 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr, struct amdgpu_vm_pt **entry, struct amdgpu_vm_pt **parent) { - unsigned level = 0; + unsigned level = p->adev->vm_manager.root_level; *parent = NULL; *entry = &p->vm->root; while ((*entry)->entries) { - unsigned idx = addr >> amdgpu_vm_level_shift(p->adev, level++); + unsigned shift = amdgpu_vm_level_shift(p->adev, level++); - idx %= amdgpu_bo_size((*entry)->base.bo) / 8; *parent = *entry; - *entry = &(*entry)->entries[idx]; + *entry = &(*entry)->entries[addr >> shift]; + addr &= (1ULL << shift) - 1; } - if (level != p->adev->vm_manager.num_level) + if (level != AMDGPU_VM_PTB) *entry = NULL; } @@ -1363,17 +1325,18 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, !(flags & AMDGPU_PTE_VALID)) { dst = amdgpu_bo_gpu_offset(entry->base.bo); - dst = amdgpu_gart_get_vm_pde(p->adev, dst); flags = AMDGPU_PTE_VALID; } else { /* Set the huge page flag to stop scanning at this PDE */ flags |= AMDGPU_PDE_PTE; } - if (entry->addr == (dst | flags)) + if (!entry->huge && !(flags & AMDGPU_PDE_PTE)) return; + entry->huge = !!(flags & AMDGPU_PDE_PTE); - entry->addr = (dst | flags); + amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0, + &dst, &flags); if (use_cpu_update) { /* In case a huge page is replaced with a system @@ -1447,7 +1410,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, amdgpu_vm_handle_huge_pages(params, entry, parent, nptes, dst, flags); /* We don't need to update PTEs for huge pages */ - if (entry->addr & AMDGPU_PDE_PTE) + if (entry->huge) continue; pt = entry->base.bo; @@ -1688,7 +1651,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, error_free: amdgpu_job_free(job); - amdgpu_vm_invalidate_level(vm, &vm->root); + amdgpu_vm_invalidate_level(adev, vm, &vm->root, + adev->vm_manager.root_level); return r; } @@ -2604,7 +2568,19 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, tmp >>= amdgpu_vm_block_size - 9; tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1; adev->vm_manager.num_level = min(max_level, (unsigned)tmp); - + switch (adev->vm_manager.num_level) { + case 3: + adev->vm_manager.root_level = AMDGPU_VM_PDB2; + break; + case 2: + adev->vm_manager.root_level = AMDGPU_VM_PDB1; + break; + case 1: + adev->vm_manager.root_level = AMDGPU_VM_PDB0; + break; + default: + dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n"); + } /* block size depends on vm size and hw setup*/ if (amdgpu_vm_block_size != -1) adev->vm_manager.block_size = @@ -2643,7 +2619,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, AMDGPU_VM_PTE_COUNT(adev) * 8); unsigned ring_instance; struct amdgpu_ring *ring; - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; int r, i; u64 flags; uint64_t init_pde_value = 0; @@ -2663,8 +2639,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); ring_instance %= adev->vm_manager.vm_pte_num_rings; ring = adev->vm_manager.vm_pte_rings[ring_instance]; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; - r = amd_sched_entity_init(&ring->sched, &vm->entity, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + r = drm_sched_entity_init(&ring->sched, &vm->entity, rq, amdgpu_sched_jobs, NULL); if (r) return r; @@ -2698,7 +2674,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_SHADOW); - r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true, + r = amdgpu_bo_create(adev, + amdgpu_vm_bo_size(adev, adev->vm_manager.root_level), + align, true, AMDGPU_GEM_DOMAIN_VRAM, flags, NULL, NULL, init_pde_value, &vm->root.base.bo); @@ -2744,7 +2722,7 @@ error_free_root: vm->root.base.bo = NULL; error_free_sched_entity: - amd_sched_entity_fini(&ring->sched, &vm->entity); + drm_sched_entity_fini(&ring->sched, &vm->entity); return r; } @@ -2752,26 +2730,31 @@ error_free_sched_entity: /** * amdgpu_vm_free_levels - free PD/PT levels * - * @level: PD/PT starting level to free + * @adev: amdgpu device structure + * @parent: PD/PT starting level to free + * @level: level of parent structure * * Free the page directory or page table level and all sub levels. */ -static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level) +static void amdgpu_vm_free_levels(struct amdgpu_device *adev, + struct amdgpu_vm_pt *parent, + unsigned level) { - unsigned i; + unsigned i, num_entries = amdgpu_vm_num_entries(adev, level); - if (level->base.bo) { - list_del(&level->base.bo_list); - list_del(&level->base.vm_status); - amdgpu_bo_unref(&level->base.bo->shadow); - amdgpu_bo_unref(&level->base.bo); + if (parent->base.bo) { + list_del(&parent->base.bo_list); + list_del(&parent->base.vm_status); + amdgpu_bo_unref(&parent->base.bo->shadow); + amdgpu_bo_unref(&parent->base.bo); } - if (level->entries) - for (i = 0; i <= level->last_entry_used; i++) - amdgpu_vm_free_levels(&level->entries[i]); + if (parent->entries) + for (i = 0; i < num_entries; i++) + amdgpu_vm_free_levels(adev, &parent->entries[i], + level + 1); - kvfree(level->entries); + kvfree(parent->entries); } /** @@ -2803,7 +2786,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } - amd_sched_entity_fini(vm->entity.sched, &vm->entity); + drm_sched_entity_fini(vm->entity.sched, &vm->entity); if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { dev_err(adev->dev, "still active bo inside vm\n"); @@ -2829,7 +2812,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) if (r) { dev_err(adev->dev, "Leaking page tables because BO reservation failed\n"); } else { - amdgpu_vm_free_levels(&vm->root); + amdgpu_vm_free_levels(adev, &vm->root, + adev->vm_manager.root_level); amdgpu_bo_unreserve(root); } amdgpu_bo_unref(&root); |