diff options
author | Dave Airlie <airlied@redhat.com> | 2022-04-22 06:43:52 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2022-04-22 11:15:30 +1000 |
commit | 19df0cfa258cd42f7f106f6085f1e625f26283db (patch) | |
tree | be212959c494f95279fd160ba04e9b498d60be8d /drivers/gpu/drm/ttm | |
parent | c54b39a565227538c52ead2349eb17d54aadd6f7 (diff) | |
parent | 40d8d4bd06720aed6c1125bab7296c57de4f1157 (diff) | |
download | linux-19df0cfa258cd42f7f106f6085f1e625f26283db.tar.bz2 |
Merge tag 'drm-misc-next-2022-04-21' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.19-rc1
UAPI Changes:
Cross-subsystem Changes:
- of: Create a platform_device for offb
Core Changes:
- edid: block read refactoring
- ttm: Add common debugfs code for resource managers
Driver Changes:
- bridges:
- adv7611: Enable DRM_BRIDGE_OP_HPD if there's an interrupt
- anx7625: Fill ELD if no monitor is connected
- dw_hdmi: Add General Parallel Audio support
- icn6211: Add data-lanes DT property
- new driver: Lontium LT9211
- nouveau: make some structures static
- tidss: Reset DISPC on startup
- solomon: SPI Support and DT bindings improvements
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20220421065948.2pyp3j7acxtl6pz5@houat
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 27 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 34 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_execbuf_util.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_resource.c | 34 |
5 files changed, 83 insertions, 57 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c49996cf25d0..75d308ec173d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -223,7 +223,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) struct dma_resv_iter cursor; struct dma_fence *fence; - dma_resv_iter_begin(&cursor, resv, true); + dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP); dma_resv_for_each_fence_unlocked(&cursor, fence) { if (!fence->ops->signaled) dma_fence_enable_sw_signaling(fence); @@ -252,7 +252,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, struct dma_resv *resv = &bo->base._resv; int ret; - if (dma_resv_test_signaled(resv, true)) + if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) ret = 0; else ret = -EBUSY; @@ -264,7 +264,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, dma_resv_unlock(bo->base.resv); spin_unlock(&bo->bdev->lru_lock); - lret = dma_resv_wait_timeout(resv, true, interruptible, + lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, + interruptible, 30 * HZ); if (lret < 0) @@ -367,7 +368,8 @@ static void ttm_bo_release(struct kref *kref) /* Last resort, if we fail to allocate memory for the * fences block for the BO to become idle */ - dma_resv_wait_timeout(bo->base.resv, true, false, + dma_resv_wait_timeout(bo->base.resv, + DMA_RESV_USAGE_BOOKKEEP, false, 30 * HZ); } @@ -378,7 +380,7 @@ static void ttm_bo_release(struct kref *kref) ttm_mem_io_free(bdev, bo->resource); } - if (!dma_resv_test_signaled(bo->base.resv, true) || + if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) || !dma_resv_trylock(bo->base.resv)) { /* The BO is not idle, resurrect it for delayed destroy */ ttm_bo_flush_all_fences(bo); @@ -416,7 +418,6 @@ static void ttm_bo_release(struct kref *kref) dma_resv_unlock(bo->base.resv); atomic_dec(&ttm_glob.bo_count); - dma_fence_put(bo->moving); bo->destroy(bo); } @@ -712,9 +713,8 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo) EXPORT_SYMBOL(ttm_bo_unpin); /* - * Add the last move fence to the BO and reserve a new shared slot. We only use - * a shared slot to avoid unecessary sync and rely on the subsequent bo move to - * either stall or use an exclusive fence respectively set bo->moving. + * Add the last move fence to the BO as kernel dependency and reserve a new + * fence slot. */ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, struct ttm_resource_manager *man, @@ -737,17 +737,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, return ret; } - dma_resv_add_shared_fence(bo->base.resv, fence); + dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); ret = dma_resv_reserve_fences(bo->base.resv, 1); - if (unlikely(ret)) { - dma_fence_put(fence); - return ret; - } - - dma_fence_put(bo->moving); - bo->moving = fence; - return 0; + dma_fence_put(fence); + return ret; } /* @@ -949,7 +943,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, bo->bdev = bdev; bo->type = type; bo->page_alignment = page_alignment; - bo->moving = NULL; bo->pin_count = 0; bo->sg = sg; bo->bulk_move = NULL; @@ -1044,14 +1037,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, long timeout = 15 * HZ; if (no_wait) { - if (dma_resv_test_signaled(bo->base.resv, true)) + if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) return 0; else return -EBUSY; } - timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible, - timeout); + timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, + interruptible, timeout); if (timeout < 0) return timeout; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 1b96b91bf81b..1cbfb00c1d65 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -228,7 +228,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, atomic_inc(&ttm_glob.bo_count); INIT_LIST_HEAD(&fbo->base.ddestroy); - fbo->base.moving = NULL; drm_vma_node_reset(&fbo->base.base.vma_node); kref_init(&fbo->base.kref); @@ -500,14 +499,12 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, * operation has completed. */ - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); - ret = ttm_buffer_object_transfer(bo, &ghost_obj); if (ret) return ret; - dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); + dma_resv_add_fence(&ghost_obj->base._resv, fence, + DMA_RESV_USAGE_KERNEL); /** * If we're not moving to fixed memory, the TTM object @@ -545,9 +542,6 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, spin_unlock(&from->move_lock); ttm_resource_free(bo, &bo->resource); - - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); } int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, @@ -561,7 +555,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); int ret = 0; - dma_resv_add_excl_fence(bo->base.resv, fence); + dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); if (!evict) ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); else if (!from->use_tt && pipeline) @@ -578,6 +572,21 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); +void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, + struct ttm_resource *new_mem) +{ + struct ttm_device *bdev = bo->bdev; + struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); + int ret; + + ret = ttm_bo_wait_free_node(bo, man->use_tt); + if (WARN_ON(ret)) + return; + + ttm_bo_assign_mem(bo, new_mem); +} +EXPORT_SYMBOL(ttm_bo_move_sync_cleanup); + /** * ttm_bo_pipeline_gutting - purge the contents of a bo * @bo: The buffer object diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 08ba083a80d2..5b324f245265 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -46,17 +46,13 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct vm_fault *vmf) { - vm_fault_t ret = 0; - int err = 0; - - if (likely(!bo->moving)) - goto out_unlock; + long err = 0; /* * Quick non-stalling check for idle. */ - if (dma_fence_is_signaled(bo->moving)) - goto out_clear; + if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL)) + return 0; /* * If possible, avoid waiting for GPU with mmap_lock @@ -64,34 +60,30 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, * is the first attempt. */ if (fault_flag_allow_retry_first(vmf->flags)) { - ret = VM_FAULT_RETRY; if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) - goto out_unlock; + return VM_FAULT_RETRY; ttm_bo_get(bo); mmap_read_unlock(vmf->vma->vm_mm); - (void) dma_fence_wait(bo->moving, true); + (void)dma_resv_wait_timeout(bo->base.resv, + DMA_RESV_USAGE_KERNEL, true, + MAX_SCHEDULE_TIMEOUT); dma_resv_unlock(bo->base.resv); ttm_bo_put(bo); - goto out_unlock; + return VM_FAULT_RETRY; } /* * Ordinary wait. */ - err = dma_fence_wait(bo->moving, true); - if (unlikely(err != 0)) { - ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS : + err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true, + MAX_SCHEDULE_TIMEOUT); + if (unlikely(err < 0)) { + return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; - goto out_unlock; } -out_clear: - dma_fence_put(bo->moving); - bo->moving = NULL; - -out_unlock: - return ret; + return 0; } static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 789c645f004e..dbee34a058df 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -101,7 +101,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, continue; } - num_fences = min(entry->num_shared, 1u); + num_fences = max(entry->num_shared, 1u); if (!ret) { ret = dma_resv_reserve_fences(bo->base.resv, num_fences); @@ -154,10 +154,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - if (entry->num_shared) - dma_resv_add_shared_fence(bo->base.resv, fence); - else - dma_resv_add_excl_fence(bo->base.resv, fence); + dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ? + DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE); ttm_bo_move_to_lru_tail_unlocked(bo); dma_resv_unlock(bo->base.resv); } diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 7d5a438180e9..65889b3caf50 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -644,3 +644,37 @@ ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, ttm_mem_io_free(bdev, mem); } + +#if defined(CONFIG_DEBUG_FS) + +static int ttm_resource_manager_show(struct seq_file *m, void *unused) +{ + struct ttm_resource_manager *man = + (struct ttm_resource_manager *)m->private; + struct drm_printer p = drm_seq_file_printer(m); + ttm_resource_manager_debug(man, &p); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager); + +#endif + +/** + * ttm_resource_manager_create_debugfs - Create debugfs entry for specified + * resource manager. + * @man: The TTM resource manager for which the debugfs stats file be creates + * @parent: debugfs directory in which the file will reside + * @name: The filename to create. + * + * This function setups up a debugfs file that can be used to look + * at debug statistics of the specified ttm_resource_manager. + */ +void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, + struct dentry * parent, + const char *name) +{ +#if defined(CONFIG_DEBUG_FS) + debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops); +#endif +} +EXPORT_SYMBOL(ttm_resource_manager_create_debugfs); |