diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2018-07-06 11:39:42 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2018-07-06 18:22:34 +0100 |
commit | da99fe5f858525b0eac5a5296bba5d6bedb81abd (patch) | |
tree | 318e800431e703f72a77e855e87245117d7ca8cd /drivers/gpu/drm/i915/i915_gem_execbuffer.c | |
parent | 8fdbfd8686329e286465bcef11a7ef20be84d6b6 (diff) | |
download | linux-da99fe5f858525b0eac5a5296bba5d6bedb81abd.tar.bz2 |
drm/i915: Refactor export_fence() after i915_vma_move_to_active()
Currently all callers are responsible for adding the vma to the active
timeline and then exporting its fence. Combine the two operations into
i915_vma_move_to_active() to move all the extra handling from the
callers to the single site.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180706103947.15919-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 47 |
1 files changed, 21 insertions, 26 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index c2dd9b4cdace..91f20445147f 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1166,15 +1166,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); i915_vma_move_to_active(batch, rq, 0); - reservation_object_lock(batch->resv, NULL); - reservation_object_add_excl_fence(batch->resv, &rq->fence); - reservation_object_unlock(batch->resv); i915_vma_unpin(batch); i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - reservation_object_lock(vma->resv, NULL); - reservation_object_add_excl_fence(vma->resv, &rq->fence); - reservation_object_unlock(vma->resv); rq->batch = batch; @@ -1771,25 +1765,6 @@ slow: return eb_relocate_slow(eb); } -static void eb_export_fence(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags) -{ - struct reservation_object *resv = vma->resv; - - /* - * Ignore errors from failing to allocate the new fence, we can't - * handle an error right now. Worst case should be missed - * synchronisation leading to rendering corruption. - */ - reservation_object_lock(resv, NULL); - if (flags & EXEC_OBJECT_WRITE) - reservation_object_add_excl_fence(resv, &rq->fence); - else if (reservation_object_reserve_shared(resv) == 0) - reservation_object_add_shared_fence(resv, &rq->fence); - reservation_object_unlock(resv); -} - static int eb_move_to_gpu(struct i915_execbuffer *eb) { const unsigned int count = eb->buffer_count; @@ -1844,7 +1819,6 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) struct i915_vma *vma = eb->vma[i]; i915_vma_move_to_active(vma, eb->request, flags); - eb_export_fence(vma, eb->request, flags); __eb_unreserve_vma(vma, flags); vma->exec_flags = NULL; @@ -1884,6 +1858,25 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) return true; } +static void export_fence(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags) +{ + struct reservation_object *resv = vma->resv; + + /* + * Ignore errors from failing to allocate the new fence, we can't + * handle an error right now. Worst case should be missed + * synchronisation leading to rendering corruption. + */ + reservation_object_lock(resv, NULL); + if (flags & EXEC_OBJECT_WRITE) + reservation_object_add_excl_fence(resv, &rq->fence); + else if (reservation_object_reserve_shared(resv) == 0) + reservation_object_add_shared_fence(resv, &rq->fence); + reservation_object_unlock(resv); +} + void i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, unsigned int flags) @@ -1921,6 +1914,8 @@ void i915_vma_move_to_active(struct i915_vma *vma, if (flags & EXEC_OBJECT_NEEDS_FENCE) i915_gem_active_set(&vma->last_fence, rq); + + export_fence(vma, rq, flags); } static int i915_reset_gen7_sol_offsets(struct i915_request *rq) |