diff options
author | Dave Airlie <airlied@redhat.com> | 2021-06-10 11:03:50 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2021-06-10 11:28:09 +1000 |
commit | 09b020bb05a514f560979438fa40406bc63d5353 (patch) | |
tree | 2568980e133e47bce3509cab92f7adac99b51cc4 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |
parent | 5745d647d5563d3e9d32013ad4e5c629acff04d7 (diff) | |
parent | 5b7a2c92b6102447a973f2f1ef19d660ec329881 (diff) | |
download | linux-09b020bb05a514f560979438fa40406bc63d5353.tar.bz2 |
Merge tag 'drm-misc-next-2021-06-09' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.14:
UAPI Changes:
* drm/panfrost: Export AFBC_FEATURES register to userspace
Cross-subsystem Changes:
* dma-buf: Fix debug printing; Rename dma_resv_*() functions + changes
in callers; Cleanups
Core Changes:
* Add prefetching memcpy for WC
* Avoid circular dependency on CONFIG_FB
* Cleanups
* Documentation fixes throughout DRM
* ttm: Make struct ttm_resource the base of all managers + changes
in all users of TTM; Add a generic memcpy for page-based iomem; Remove
use of VM_MIXEDMAP; Cleanups
Driver Changes:
* drm/bridge: Add TI SN65DSI83 and SN65DSI84 + DT bindings
* drm/hyperv: Add DRM driver for HyperV graphics output
* drm/msm: Fix module dependencies
* drm/panel: KD53T133: Support rotation
* drm/pl111: Fix module dependencies
* drm/qxl: Fixes
* drm/stm: Cleanups
* drm/sun4i: Be explicit about format modifiers
* drm/vc4: Use struct gpio_desc; Cleanups
* drm/vgem: Cleanups
* drm/vmwgfx: Use ttm_bo_move_null() if there's nothing to copy
* fbdev/mach64: Cleanups
* fbdev/mb862xx: Use DEVICE_ATTR_RO
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/YMBw3DF2b9udByfT@linux-uq9g
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8d218c5cfee8..777e8922ecf3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -342,7 +342,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, amdgpu_vm_bo_idle(base); if (bo->preferred_domains & - amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) + amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)) return; /* @@ -658,10 +658,11 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, if (!bo->parent) continue; - ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem, + ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource, &vm->lru_bulk_move); if (shadow) - ttm_bo_move_to_lru_tail(&shadow->tbo, &shadow->tbo.mem, + ttm_bo_move_to_lru_tail(&shadow->tbo, + shadow->tbo.resource, &vm->lru_bulk_move); } spin_unlock(&adev->mman.bdev.lru_lock); @@ -1858,10 +1859,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, struct drm_gem_object *gobj = dma_buf->priv; struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); - if (abo->tbo.mem.mem_type == TTM_PL_VRAM) + if (abo->tbo.resource->mem_type == TTM_PL_VRAM) bo = gem_to_amdgpu_bo(gobj); } - mem = &bo->tbo.mem; + mem = bo->tbo.resource; if (mem->mem_type == TTM_PL_TT || mem->mem_type == AMDGPU_PL_PREEMPT) pages_addr = bo->tbo.ttm->dma_address; @@ -1922,7 +1923,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, * next command submission. */ if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { - uint32_t mem_type = bo->tbo.mem.mem_type; + uint32_t mem_type = bo->tbo.resource->mem_type; if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) @@ -2063,13 +2064,12 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) unsigned i, shared_count; int r; - r = dma_resv_get_fences_rcu(resv, &excl, - &shared_count, &shared); + r = dma_resv_get_fences(resv, &excl, &shared_count, &shared); if (r) { /* Not enough memory to grab the fence list, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout_rcu(resv, true, false, + dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); return; } @@ -2681,7 +2681,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) return true; /* Don't evict VM page tables while they are busy */ - if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true)) + if (!dma_resv_test_signaled(bo->tbo.base.resv, true)) return false; /* Try to block ongoing updates */ @@ -2861,8 +2861,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, - true, true, timeout); + timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true, + true, timeout); if (timeout <= 0) return timeout; |