From 1b1f42d8fde4fef1ed7873bf5aa91755f8c3de35 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Wed, 6 Dec 2017 17:49:39 +0100 Subject: drm: move amd_gpu_scheduler into common location MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This moves and renames the AMDGPU scheduler to a common location in DRM in order to facilitate re-use by other drivers. This is mostly a straight forward rename with no code changes. One notable exception is the function to_drm_sched_fence(), which is no longer a inline header function to avoid the need to export the drm_sched_fence_ops_scheduled and drm_sched_fence_ops_finished structures. Reviewed-by: Chunming Zhou Tested-by: Dieter Nützel Acked-by: Alex Deucher Signed-off-by: Lucas Stach Signed-off-by: Alex Deucher --- include/drm/gpu_scheduler.h | 176 ++++++++++++++++++++++++++++++++++++++ include/drm/gpu_scheduler_trace.h | 82 ++++++++++++++++++ include/drm/spsc_queue.h | 122 ++++++++++++++++++++++++++ 3 files changed, 380 insertions(+) create mode 100644 include/drm/gpu_scheduler.h create mode 100644 include/drm/gpu_scheduler_trace.h create mode 100644 include/drm/spsc_queue.h (limited to 'include/drm') diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h new file mode 100644 index 000000000000..d29da4cbb042 --- /dev/null +++ b/include/drm/gpu_scheduler.h @@ -0,0 +1,176 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _DRM_GPU_SCHEDULER_H_ +#define _DRM_GPU_SCHEDULER_H_ + +#include +#include + +struct drm_gpu_scheduler; +struct drm_sched_rq; + +enum drm_sched_priority { + DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_NORMAL, + DRM_SCHED_PRIORITY_HIGH_SW, + DRM_SCHED_PRIORITY_HIGH_HW, + DRM_SCHED_PRIORITY_KERNEL, + DRM_SCHED_PRIORITY_MAX, + DRM_SCHED_PRIORITY_INVALID = -1, + DRM_SCHED_PRIORITY_UNSET = -2 +}; + +/** + * A scheduler entity is a wrapper around a job queue or a group + * of other entities. Entities take turns emitting jobs from their + * job queues to corresponding hardware ring based on scheduling + * policy. +*/ +struct drm_sched_entity { + struct list_head list; + struct drm_sched_rq *rq; + spinlock_t rq_lock; + struct drm_gpu_scheduler *sched; + + spinlock_t queue_lock; + struct spsc_queue job_queue; + + atomic_t fence_seq; + uint64_t fence_context; + + struct dma_fence *dependency; + struct dma_fence_cb cb; + atomic_t *guilty; /* points to ctx's guilty */ +}; + +/** + * Run queue is a set of entities scheduling command submissions for + * one specific ring. It implements the scheduling policy that selects + * the next entity to emit commands from. +*/ +struct drm_sched_rq { + spinlock_t lock; + struct list_head entities; + struct drm_sched_entity *current_entity; +}; + +struct drm_sched_fence { + struct dma_fence scheduled; + struct dma_fence finished; + struct dma_fence_cb cb; + struct dma_fence *parent; + struct drm_gpu_scheduler *sched; + spinlock_t lock; + void *owner; +}; + +struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); + +struct drm_sched_job { + struct spsc_node queue_node; + struct drm_gpu_scheduler *sched; + struct drm_sched_fence *s_fence; + struct dma_fence_cb finish_cb; + struct work_struct finish_work; + struct list_head node; + struct delayed_work work_tdr; + uint64_t id; + atomic_t karma; + enum drm_sched_priority s_priority; +}; + +static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, + int threshold) +{ + return (s_job && atomic_inc_return(&s_job->karma) > threshold); +} + +/** + * Define the backend operations called by the scheduler, + * these functions should be implemented in driver side +*/ +struct drm_sched_backend_ops { + struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity); + struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); + void (*timedout_job)(struct drm_sched_job *sched_job); + void (*free_job)(struct drm_sched_job *sched_job); +}; + +/** + * One scheduler is implemented for each hardware ring +*/ +struct drm_gpu_scheduler { + const struct drm_sched_backend_ops *ops; + uint32_t hw_submission_limit; + long timeout; + const char *name; + struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; + wait_queue_head_t wake_up_worker; + wait_queue_head_t job_scheduled; + atomic_t hw_rq_count; + atomic64_t job_id_count; + struct task_struct *thread; + struct list_head ring_mirror_list; + spinlock_t job_list_lock; + int hang_limit; +}; + +int drm_sched_init(struct drm_gpu_scheduler *sched, + const struct drm_sched_backend_ops *ops, + uint32_t hw_submission, unsigned hang_limit, long timeout, + const char *name); +void drm_sched_fini(struct drm_gpu_scheduler *sched); + +int drm_sched_entity_init(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + struct drm_sched_rq *rq, + uint32_t jobs, atomic_t *guilty); +void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity); +void drm_sched_entity_push_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *entity); +void drm_sched_entity_set_rq(struct drm_sched_entity *entity, + struct drm_sched_rq *rq); + +int drm_sched_fence_slab_init(void); +void drm_sched_fence_slab_fini(void); + +struct drm_sched_fence *drm_sched_fence_create( + struct drm_sched_entity *s_entity, void *owner); +void drm_sched_fence_scheduled(struct drm_sched_fence *fence); +void drm_sched_fence_finished(struct drm_sched_fence *fence); +int drm_sched_job_init(struct drm_sched_job *job, + struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + void *owner); +void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, + struct drm_sched_job *job); +void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); +bool drm_sched_dependency_optimized(struct dma_fence* fence, + struct drm_sched_entity *entity); +void drm_sched_job_kickout(struct drm_sched_job *s_job); + +#endif diff --git a/include/drm/gpu_scheduler_trace.h b/include/drm/gpu_scheduler_trace.h new file mode 100644 index 000000000000..0789e8d0a0e1 --- /dev/null +++ b/include/drm/gpu_scheduler_trace.h @@ -0,0 +1,82 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _GPU_SCHED_TRACE_H_ + +#include +#include +#include + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gpu_scheduler +#define TRACE_INCLUDE_FILE gpu_scheduler_trace + +TRACE_EVENT(drm_sched_job, + TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), + TP_ARGS(sched_job, entity), + TP_STRUCT__entry( + __field(struct drm_sched_entity *, entity) + __field(struct dma_fence *, fence) + __field(const char *, name) + __field(uint64_t, id) + __field(u32, job_count) + __field(int, hw_job_count) + ), + + TP_fast_assign( + __entry->entity = entity; + __entry->id = sched_job->id; + __entry->fence = &sched_job->s_fence->finished; + __entry->name = sched_job->sched->name; + __entry->job_count = spsc_queue_count(&entity->job_queue); + __entry->hw_job_count = atomic_read( + &sched_job->sched->hw_rq_count); + ), + TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->id, + __entry->fence, __entry->name, + __entry->job_count, __entry->hw_job_count) +); + +TRACE_EVENT(drm_sched_process_job, + TP_PROTO(struct drm_sched_fence *fence), + TP_ARGS(fence), + TP_STRUCT__entry( + __field(struct dma_fence *, fence) + ), + + TP_fast_assign( + __entry->fence = &fence->finished; + ), + TP_printk("fence=%p signaled", __entry->fence) +); + +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h new file mode 100644 index 000000000000..125f096c88cb --- /dev/null +++ b/include/drm/spsc_queue.h @@ -0,0 +1,122 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_ +#define DRM_SCHEDULER_SPSC_QUEUE_H_ + +#include +#include + +/** SPSC lockless queue */ + +struct spsc_node { + + /* Stores spsc_node* */ + struct spsc_node *next; +}; + +struct spsc_queue { + + struct spsc_node *head; + + /* atomic pointer to struct spsc_node* */ + atomic_long_t tail; + + atomic_t job_count; +}; + +static inline void spsc_queue_init(struct spsc_queue *queue) +{ + queue->head = NULL; + atomic_long_set(&queue->tail, (long)&queue->head); + atomic_set(&queue->job_count, 0); +} + +static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) +{ + return queue->head; +} + +static inline int spsc_queue_count(struct spsc_queue *queue) +{ + return atomic_read(&queue->job_count); +} + +static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) +{ + struct spsc_node **tail; + + node->next = NULL; + + preempt_disable(); + + tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); + WRITE_ONCE(*tail, node); + atomic_inc(&queue->job_count); + + /* + * In case of first element verify new node will be visible to the consumer + * thread when we ping the kernel thread that there is new work to do. + */ + smp_wmb(); + + preempt_enable(); + + return tail == &queue->head; +} + + +static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue) +{ + struct spsc_node *next, *node; + + /* Verify reading from memory and not the cache */ + smp_rmb(); + + node = READ_ONCE(queue->head); + + if (!node) + return NULL; + + next = READ_ONCE(node->next); + WRITE_ONCE(queue->head, next); + + if (unlikely(!next)) { + /* slowpath for the last element in the queue */ + + if (atomic_long_cmpxchg(&queue->tail, + (long)&node->next, (long) &queue->head) != (long)&node->next) { + /* Updating tail failed wait for new next to appear */ + do { + smp_rmb(); + } while (unlikely(!(queue->head = READ_ONCE(node->next)))); + } + } + + atomic_dec(&queue->job_count); + return node; +} + + + +#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */ -- cgit v1.2.3 From 4983e48c8539282be15f660bdd2c4260467b1190 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Wed, 6 Dec 2017 17:49:40 +0100 Subject: drm/sched: move fence slab handling to module init/exit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is the only part of the scheduler which must not be called from different drivers. Move it to module init/exit so it is done a single time when loading the scheduler. Reviewed-by: Chunming Zhou Tested-by: Dieter Nützel Acked-by: Alex Deucher Signed-off-by: Lucas Stach Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 8 -------- drivers/gpu/drm/scheduler/sched_fence.c | 12 ++++++++---- include/drm/gpu_scheduler.h | 3 --- 3 files changed, 8 insertions(+), 15 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 1d8011bca182..51b76688ab90 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -912,10 +912,6 @@ static int __init amdgpu_init(void) if (r) goto error_fence; - r = drm_sched_fence_slab_init(); - if (r) - goto error_sched; - if (vgacon_text_force()) { DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); return -EINVAL; @@ -928,9 +924,6 @@ static int __init amdgpu_init(void) /* let modprobe override vga console setting */ return pci_register_driver(pdriver); -error_sched: - amdgpu_fence_slab_fini(); - error_fence: amdgpu_sync_fini(); @@ -944,7 +937,6 @@ static void __exit amdgpu_exit(void) pci_unregister_driver(pdriver); amdgpu_unregister_atpx_handler(); amdgpu_sync_fini(); - drm_sched_fence_slab_fini(); amdgpu_fence_slab_fini(); } diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index f6f2955890c4..69aab086b913 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -29,7 +29,7 @@ static struct kmem_cache *sched_fence_slab; -int drm_sched_fence_slab_init(void) +static int __init drm_sched_fence_slab_init(void) { sched_fence_slab = kmem_cache_create( "drm_sched_fence", sizeof(struct drm_sched_fence), 0, @@ -39,14 +39,12 @@ int drm_sched_fence_slab_init(void) return 0; } -EXPORT_SYMBOL_GPL(drm_sched_fence_slab_init); -void drm_sched_fence_slab_fini(void) +static void __exit drm_sched_fence_slab_fini(void) { rcu_barrier(); kmem_cache_destroy(sched_fence_slab); } -EXPORT_SYMBOL_GPL(drm_sched_fence_slab_fini); void drm_sched_fence_scheduled(struct drm_sched_fence *fence) { @@ -185,3 +183,9 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity, return fence; } + +module_init(drm_sched_fence_slab_init); +module_exit(drm_sched_fence_slab_fini); + +MODULE_DESCRIPTION("DRM GPU scheduler"); +MODULE_LICENSE("GPL and additional rights"); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index d29da4cbb042..dfd54fb94e10 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -155,9 +155,6 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, void drm_sched_entity_set_rq(struct drm_sched_entity *entity, struct drm_sched_rq *rq); -int drm_sched_fence_slab_init(void); -void drm_sched_fence_slab_fini(void); - struct drm_sched_fence *drm_sched_fence_create( struct drm_sched_entity *s_entity, void *owner); void drm_sched_fence_scheduled(struct drm_sched_fence *fence); -- cgit v1.2.3 From 8836e4b8d3a04a5097a83c5f9b25662fa6a9ebbc Mon Sep 17 00:00:00 2001 From: Roger He Date: Fri, 8 Dec 2017 11:36:46 +0800 Subject: drm/ttm: add allow_reserved_eviction and resv into ttm_operation_ctx MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit allow_reserved_eviction: Allow eviction of reserved BOs resv: Reservation object to allow reserved evictions with Reviewed-by: Christian König Signed-off-by: Roger He Signed-off-by: Alex Deucher --- include/drm/ttm/ttm_bo_api.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/drm') diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 368eb02b54a9..c1263308145a 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -263,6 +263,8 @@ struct ttm_bo_kmap_obj { * * @interruptible: Sleep interruptible if sleeping. * @no_wait_gpu: Return immediately if the GPU is busy. + * @allow_reserved_eviction: Allow eviction of reserved BOs. + * @resv: Reservation object to allow reserved evictions with. * * Context for TTM operations like changing buffer placement or general memory * allocation. @@ -270,6 +272,8 @@ struct ttm_bo_kmap_obj { struct ttm_operation_ctx { bool interruptible; bool no_wait_gpu; + bool allow_reserved_eviction; + struct reservation_object *resv; uint64_t bytes_moved; }; -- cgit v1.2.3 From 3e98d829ad0a59425f816c94447b4ac39a72f632 Mon Sep 17 00:00:00 2001 From: Roger He Date: Fri, 8 Dec 2017 20:19:32 +0800 Subject: drm/ttm: use an ttm operation ctx for ttm_bo_move_xxx MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit include ttm_bo_move_memcpy and ttm_bo_move_ttm Signed-off-by: Roger He Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 7 +++---- drivers/gpu/drm/nouveau/nouveau_bo.c | 7 +++---- drivers/gpu/drm/qxl/qxl_ttm.c | 3 +-- drivers/gpu/drm/radeon/radeon_ttm.c | 7 +++---- drivers/gpu/drm/ttm/ttm_bo.c | 6 ++---- drivers/gpu/drm/ttm/ttm_bo_util.c | 8 ++++---- include/drm/ttm/ttm_bo_driver.h | 4 ++-- 7 files changed, 18 insertions(+), 24 deletions(-) (limited to 'include/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 7db9556b389b..c307a7d2cf16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -505,7 +505,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, new_mem); + r = ttm_bo_move_ttm(bo, ctx, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; @@ -536,7 +536,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, &tmp_mem); + r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } @@ -597,8 +597,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, ctx->interruptible, - ctx->no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, ctx, new_mem); if (r) { return r; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 949bf6b3feab..6b6fb2080ac3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1226,7 +1226,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) goto out; - ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_reg); + ret = ttm_bo_move_ttm(bo, &ctx, new_reg); out: ttm_bo_mem_put(bo, &tmp_reg); return ret; @@ -1255,7 +1255,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) return ret; - ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_reg); + ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg); if (ret) goto out; @@ -1380,8 +1380,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, /* Fallback to software copy. */ ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); if (ret == 0) - ret = ttm_bo_move_memcpy(bo, ctx->interruptible, - ctx->no_wait_gpu, new_reg); + ret = ttm_bo_move_memcpy(bo, ctx, new_reg); out: if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index d866f329e7d8..78ce118d9157 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -357,8 +357,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, qxl_move_null(bo, new_mem); return 0; } - return ttm_bo_move_memcpy(bo, ctx->interruptible, ctx->no_wait_gpu, - new_mem); + return ttm_bo_move_memcpy(bo, ctx, new_mem); } static void qxl_bo_move_notify(struct ttm_buffer_object *bo, diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 98e30d71d9e0..557fd7915973 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -347,7 +347,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem); + r = ttm_bo_move_ttm(bo, &ctx, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; @@ -380,7 +380,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem); + r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } @@ -445,8 +445,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, ctx->interruptible, - ctx->no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, ctx, new_mem); if (r) { return r; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 17f64c836bf9..ba5b48617bba 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -324,13 +324,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) - ret = ttm_bo_move_ttm(bo, ctx->interruptible, - ctx->no_wait_gpu, mem); + ret = ttm_bo_move_ttm(bo, ctx, mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, ctx, mem); else - ret = ttm_bo_move_memcpy(bo, ctx->interruptible, - ctx->no_wait_gpu, mem); + ret = ttm_bo_move_memcpy(bo, ctx, mem); if (ret) { if (bdev->driver->move_notify) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index e7a519f1849b..9237099a7c49 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -45,7 +45,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) } int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait_gpu, + struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) { struct ttm_tt *ttm = bo->ttm; @@ -53,7 +53,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, int ret; if (old_mem->mem_type != TTM_PL_SYSTEM) { - ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); + ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) @@ -329,7 +329,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, } int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait_gpu, + struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; @@ -345,7 +345,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, unsigned long add = 0; int dir; - ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); + ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); if (ret) return ret; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 6996d884c508..5115718ca607 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -976,7 +976,7 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, */ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait_gpu, + struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem); /** @@ -998,7 +998,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, */ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - bool interruptible, bool no_wait_gpu, + struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem); /** -- cgit v1.2.3