diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_request.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_request.c | 54 |
1 files changed, 28 insertions, 26 deletions
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index dcaa85a91090..e5a55801f753 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -51,7 +51,6 @@ struct execute_cb { static struct i915_global_request { struct i915_global base; struct kmem_cache *slab_requests; - struct kmem_cache *slab_dependencies; struct kmem_cache *slab_execute_cbs; } global; @@ -203,6 +202,19 @@ static void free_capture_list(struct i915_request *request) } } +static void __i915_request_fill(struct i915_request *rq, u8 val) +{ + void *vaddr = rq->ring->vaddr; + u32 head; + + head = rq->infix; + if (rq->postfix < head) { + memset(vaddr + head, val, rq->ring->size - head); + head = 0; + } + memset(vaddr + head, val, rq->postfix - head); +} + static void remove_from_engine(struct i915_request *rq) { struct intel_engine_cs *engine, *locked; @@ -247,6 +259,9 @@ bool i915_request_retire(struct i915_request *rq) */ GEM_BUG_ON(!list_is_first(&rq->link, &i915_request_timeline(rq)->requests)); + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + /* Poison before we release our space in the ring */ + __i915_request_fill(rq, POISON_FREE); rq->ring->head = rq->postfix; /* @@ -903,6 +918,12 @@ emit_semaphore_wait(struct i915_request *to, struct i915_request *from, gfp_t gfp) { + if (!intel_context_use_semaphores(to->context)) + goto await_fence; + + if (!rcu_access_pointer(from->hwsp_cacheline)) + goto await_fence; + /* Just emit the first semaphore we see as request space is limited. */ if (already_busywaiting(to) & from->engine->mask) goto await_fence; @@ -948,12 +969,8 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from) ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, &from->submit, I915_FENCE_GFP); - else if (intel_context_use_semaphores(to->context)) - ret = emit_semaphore_wait(to, from, I915_FENCE_GFP); else - ret = i915_sw_fence_await_dma_fence(&to->submit, - &from->fence, 0, - I915_FENCE_GFP); + ret = emit_semaphore_wait(to, from, I915_FENCE_GFP); if (ret < 0) return ret; @@ -1052,6 +1069,8 @@ __i915_request_await_execution(struct i915_request *to, { int err; + GEM_BUG_ON(intel_context_is_barrier(from->context)); + /* Submit both requests at the same time */ err = __await_execution(to, from, hook, I915_FENCE_GFP); if (err) @@ -1192,9 +1211,6 @@ i915_request_await_object(struct i915_request *to, void i915_request_skip(struct i915_request *rq, int error) { - void *vaddr = rq->ring->vaddr; - u32 head; - GEM_BUG_ON(!IS_ERR_VALUE((long)error)); dma_fence_set_error(&rq->fence, error); @@ -1206,12 +1222,7 @@ void i915_request_skip(struct i915_request *rq, int error) * context, clear out all the user operations leaving the * breadcrumb at the end (so we get the fence notifications). */ - head = rq->infix; - if (rq->postfix < head) { - memset(vaddr + head, 0, rq->ring->size - head); - head = 0; - } - memset(vaddr + head, 0, rq->postfix - head); + __i915_request_fill(rq, 0); rq->infix = rq->postfix; } @@ -1586,6 +1597,8 @@ long i915_request_wait(struct i915_request *rq, break; } + intel_engine_flush_submission(rq->engine); + if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; break; @@ -1596,7 +1609,6 @@ long i915_request_wait(struct i915_request *rq, break; } - intel_engine_flush_submission(rq->engine); timeout = io_schedule_timeout(timeout); } __set_current_state(TASK_RUNNING); @@ -1616,14 +1628,12 @@ out: static void i915_global_request_shrink(void) { - kmem_cache_shrink(global.slab_dependencies); kmem_cache_shrink(global.slab_execute_cbs); kmem_cache_shrink(global.slab_requests); } static void i915_global_request_exit(void) { - kmem_cache_destroy(global.slab_dependencies); kmem_cache_destroy(global.slab_execute_cbs); kmem_cache_destroy(global.slab_requests); } @@ -1653,17 +1663,9 @@ int __init i915_global_request_init(void) if (!global.slab_execute_cbs) goto err_requests; - global.slab_dependencies = KMEM_CACHE(i915_dependency, - SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT); - if (!global.slab_dependencies) - goto err_execute_cbs; - i915_global_register(&global.base); return 0; -err_execute_cbs: - kmem_cache_destroy(global.slab_execute_cbs); err_requests: kmem_cache_destroy(global.slab_requests); return -ENOMEM; |