From e766fde6511e2be83acbca1d603035e08de23f3b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 1 May 2019 12:45:36 +0100 Subject: drm/i915: Delay semaphore submission until the start of the signaler Currently we submit the semaphore busywait as soon as the signaler is submitted to HW. However, we may submit the signaler as the tail of a batch of requests, and even not as the first context in the HW list, i.e. the busywait may start spinning far in advance of the signaler even starting. If we wait until the request before the signaler is completed before submitting the busywait, we prevent the busywait from starting too early, if the signaler is not first in submission port. To handle the case where the signaler is at the start of the second (or later) submission port, we will need to delay the execution callback until we know the context is promoted to port0. A challenge for later. Fixes: e88619646971 ("drm/i915: Use HW semaphores for inter-engine synchronisation on gen8+") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190501114541.10077-9-chris@chris-wilson.co.uk (cherry picked from commit 0d90ccb70211cbf55140e91bd39db684aa4c16e9) [Joonas: edited Fixes: tag into single line.] Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_request.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'drivers/gpu/drm/i915/i915_request.c') diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index b836721d3b13..20c7c77b3768 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -798,6 +798,21 @@ err_unreserve: return ERR_PTR(ret); } +static int +i915_request_await_start(struct i915_request *rq, struct i915_request *signal) +{ + if (list_is_first(&signal->ring_link, &signal->ring->request_list)) + return 0; + + signal = list_prev_entry(signal, ring_link); + if (i915_timeline_sync_is_later(rq->timeline, &signal->fence)) + return 0; + + return i915_sw_fence_await_dma_fence(&rq->submit, + &signal->fence, 0, + I915_FENCE_GFP); +} + static int emit_semaphore_wait(struct i915_request *to, struct i915_request *from, @@ -816,6 +831,10 @@ emit_semaphore_wait(struct i915_request *to, &from->fence, 0, I915_FENCE_GFP); + err = i915_request_await_start(to, from); + if (err < 0) + return err; + err = i915_sw_fence_await_dma_fence(&to->semaphore, &from->fence, 0, I915_FENCE_GFP); -- cgit v1.2.3 From 2564fe708b580c1ef12b2b527ab6e8afe11ad444 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 4 May 2019 08:07:07 +0100 Subject: drm/i915: Disable semaphore busywaits on saturated systems Asking the GPU to busywait on a memory address, perhaps not unexpectedly in hindsight for a shared system, leads to bus contention that affects CPU programs trying to concurrently access memory. This can manifest as a drop in transcode throughput on highly over-saturated workloads. The only clue offered by perf, is that the bus-cycles (perf stat -e bus-cycles) jumped by 50% when enabling semaphores. This corresponds with extra CPU active cycles being attributed to intel_idle's mwait. This patch introduces a heuristic to try and detect when more than one client is submitting to the GPU pushing it into an oversaturated state. As we already keep track of when the semaphores are signaled, we can inspect their state on submitting the busywait batch and if we planned to use a semaphore but were too late, conclude that the GPU is overloaded and not try to use semaphores in future requests. In practice, this means we optimistically try to use semaphores for the first frame of a transcode job split over multiple engines, and fail if there are multiple clients active and continue not to use semaphores for the subsequent frames in the sequence. Periodically, we try to optimistically switch semaphores back on whenever the client waits to catch up with the transcode results. With 1 client, on Broxton J3455, with the relative fps normalized by %cpu: x no semaphores + drm-tip * patched +------------------------------------------------------------------------+ | * | | *+ | | **+ | | **+ x | | x * +**+ x | | x x * * +***x xx | | x x * * *+***x *x | | x x* + * * *****x *x x | | + x xx+x* + *** * ********* x * | | + x xx+x* * *** +** ********* xx * | | * + ++++* + x*x****+*+* ***+*************+x* * | |*+ +** *+ + +* + *++****** *xxx**********x***+*****************+*++ *| | |__________A_____M_____| | | |_______________A____M_________| | | |____________A___M________| | +------------------------------------------------------------------------+ N Min Max Median Avg Stddev x 120 2.60475 3.50941 3.31123 3.2143953 0.21117399 + 120 2.3826 3.57077 3.25101 3.1414161 0.28146407 Difference at 95.0% confidence -0.0729792 +/- 0.0629585 -2.27039% +/- 1.95864% (Student's t, pooled s = 0.248814) * 120 2.35536 3.66713 3.2849 3.2059917 0.24618565 No difference proven at 95.0% confidence With 10 clients over-saturating the pipeline: x no semaphores + drm-tip * patched +------------------------------------------------------------------------+ | ++ ** | | ++ ** | | ++ ** | | ++ ** | | ++ xx *** | | ++ xx *** | | ++ xxx*** | | ++ xxx*** | | +++ xxx*** | | +++ xx**** | | +++ xx**** | | +++ xx**** | | +++ xx**** | | ++++ xx**** | | +++++ xx**** | | +++++ x x****** | | ++++++ xxx******* | | ++++++ xxx******* | | ++++++ xxx******* | | ++++++ xx******** | | ++++++ xxxx******** | | ++++++ xxxx******** | | ++++++++ xxxxx********* | |+ + + + ++++++++ xxx*xx**********x* *| | |__A__| | | |__AM__| | | |__A_| | +------------------------------------------------------------------------+ N Min Max Median Avg Stddev x 120 2.47855 2.8972 2.72376 2.7193402 0.074604933 + 120 1.17367 1.77459 1.71977 1.6966782 0.085850697 Difference at 95.0% confidence -1.02266 +/- 0.0203502 -37.607% +/- 0.748352% (Student's t, pooled s = 0.0804246) * 120 2.57868 3.00821 2.80142 2.7923878 0.058646477 Difference at 95.0% confidence 0.0730476 +/- 0.0169791 2.68622% +/- 0.624383% (Student's t, pooled s = 0.0671018) Indicating that we've recovered the regression from enabling semaphores on this saturated setup, with a hint towards an overall improvement. Very similar, but of smaller magnitude, results are observed on both Skylake(gt2) and Kabylake(gt4). This may be due to the reduced impact of bus-cycles, where we see a 50% hit on Broxton, it is only 10% on the big core, in this particular test. One observation to make here is that for a greedy client trying to maximise its own throughput, using semaphores is the right choice. It is only the holistic system-wide view that semaphores of one client impacts another and reduces the overall throughput where we would choose to disable semaphores. The most noticeable negactive impact this has is on the no-op microbenchmarks, which are also very notable for having no cpu bus load. In particular, this increases the runtime and energy consumption of gem_exec_whisper. Fixes: e88619646971 ("drm/i915: Use HW semaphores for inter-engine synchronisation on gen8+") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Dmitry Rogozhkin Cc: Dmitry Ermilov Cc: Joonas Lahtinen Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190504070707.30902-1-chris@chris-wilson.co.uk (cherry picked from commit ca6e56f654e7b241256ffba78cd2abb22aa3bc97) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_request.c | 40 +++++++++++++++++++++++++++++- drivers/gpu/drm/i915/intel_context.c | 1 + drivers/gpu/drm/i915/intel_context_types.h | 3 +++ 3 files changed, 43 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/i915_request.c') diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 20c7c77b3768..ce342f7f7ddb 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -425,6 +425,26 @@ void __i915_request_submit(struct i915_request *request) if (i915_gem_context_is_banned(request->gem_context)) i915_request_skip(request, -EIO); + /* + * Are we using semaphores when the gpu is already saturated? + * + * Using semaphores incurs a cost in having the GPU poll a + * memory location, busywaiting for it to change. The continual + * memory reads can have a noticeable impact on the rest of the + * system with the extra bus traffic, stalling the cpu as it too + * tries to access memory across the bus (perf stat -e bus-cycles). + * + * If we installed a semaphore on this request and we only submit + * the request after the signaler completed, that indicates the + * system is overloaded and using semaphores at this time only + * increases the amount of work we are doing. If so, we disable + * further use of semaphores until we are idle again, whence we + * optimistically try again. + */ + if (request->sched.semaphores && + i915_sw_fence_signaled(&request->semaphore)) + request->hw_context->saturated |= request->sched.semaphores; + /* We may be recursing from the signal callback of another i915 fence */ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); @@ -813,6 +833,24 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal) I915_FENCE_GFP); } +static intel_engine_mask_t +already_busywaiting(struct i915_request *rq) +{ + /* + * Polling a semaphore causes bus traffic, delaying other users of + * both the GPU and CPU. We want to limit the impact on others, + * while taking advantage of early submission to reduce GPU + * latency. Therefore we restrict ourselves to not using more + * than one semaphore from each source, and not using a semaphore + * if we have detected the engine is saturated (i.e. would not be + * submitted early and cause bus traffic reading an already passed + * semaphore). + * + * See the are-we-too-late? check in __i915_request_submit(). + */ + return rq->sched.semaphores | rq->hw_context->saturated; +} + static int emit_semaphore_wait(struct i915_request *to, struct i915_request *from, @@ -826,7 +864,7 @@ emit_semaphore_wait(struct i915_request *to, GEM_BUG_ON(INTEL_GEN(to->i915) < 8); /* Just emit the first semaphore we see as request space is limited. */ - if (to->sched.semaphores & from->engine->mask) + if (already_busywaiting(to) & from->engine->mask) return i915_sw_fence_await_dma_fence(&to->submit, &from->fence, 0, I915_FENCE_GFP); diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c index 8931e0fee873..924cc556223a 100644 --- a/drivers/gpu/drm/i915/intel_context.c +++ b/drivers/gpu/drm/i915/intel_context.c @@ -230,6 +230,7 @@ intel_context_init(struct intel_context *ce, ce->gem_context = ctx; ce->engine = engine; ce->ops = engine->cops; + ce->saturated = 0; INIT_LIST_HEAD(&ce->signal_link); INIT_LIST_HEAD(&ce->signals); diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h index 68b4ca1611e0..339c7437fe82 100644 --- a/drivers/gpu/drm/i915/intel_context_types.h +++ b/drivers/gpu/drm/i915/intel_context_types.h @@ -14,6 +14,7 @@ #include #include "i915_active_types.h" +#include "intel_engine_types.h" struct i915_gem_context; struct i915_vma; @@ -58,6 +59,8 @@ struct intel_context { atomic_t pin_count; struct mutex pin_mutex; /* guards pinning and associated on-gpuing */ + intel_engine_mask_t saturated; /* submitting semaphores too late? */ + /** * active_tracker: Active tracker for the external rq activity * on this intel_context object. -- cgit v1.2.3 From c36beba6b296b3c05a0f29753b04775e5ae23886 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 May 2019 12:24:52 +0100 Subject: drm/i915: Seal races between async GPU cancellation, retirement and signaling Currently there is an underlying assumption that i915_request_unsubmit() is synchronous wrt the GPU -- that is the request is no longer in flight as we remove it. In the near future that may change, and this may upset our signaling as we can process an interrupt for that request while it is no longer in flight. CPU0 CPU1 intel_engine_breadcrumbs_irq (queue request completion) i915_request_cancel_signaling ... ... i915_request_enable_signaling dma_fence_signal Hence in the time it took us to drop the lock to signal the request, a preemption event may have occurred and re-queued the request. In the process, that request would have seen I915_FENCE_FLAG_SIGNAL clear and so reused the rq->signal_link that was in use on CPU0, leading to bad pointer chasing in intel_engine_breadcrumbs_irq. A related issue was that if someone started listening for a signal on a completed but no longer in-flight request, we missed the opportunity to immediately signal that request. Furthermore, as intel_contexts may be immediately released during request retirement, in order to be entirely sure that intel_engine_breadcrumbs_irq may no longer dereference the intel_context (ce->signals and ce->signal_link), we must wait for irq spinlock. In order to prevent the race, we use a bit in the fence.flags to signal the transfer onto the signal list inside intel_engine_breadcrumbs_irq. For simplicity, we use the DMA_FENCE_FLAG_SIGNALED_BIT as it then quickly signals to any outside observer that the fence is indeed signaled. v2: Sketch out potential dma-fence API for manual signaling v3: And the test_and_set_bit() Fixes: 52c0fdb25c7c ("drm/i915: Replace global breadcrumbs with per-context interrupt tracking") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190508112452.18942-1-chris@chris-wilson.co.uk (cherry picked from commit 0152b3b3f49b36b0f1a1bf9f0353dc636f41d8f0) Signed-off-by: Joonas Lahtinen --- drivers/dma-buf/dma-fence.c | 1 + drivers/gpu/drm/i915/i915_request.c | 1 + drivers/gpu/drm/i915/intel_breadcrumbs.c | 78 +++++++++++++++++++++-------- drivers/gpu/drm/i915/intel_guc_submission.c | 1 - 4 files changed, 59 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_request.c') diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 3aa8733f832a..9bf06042619a 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -29,6 +29,7 @@ EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); +EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled); static DEFINE_SPINLOCK(dma_fence_stub_lock); static struct dma_fence dma_fence_stub; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index ce342f7f7ddb..f6c78c0fa74b 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -452,6 +452,7 @@ void __i915_request_submit(struct i915_request *request) set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) && !i915_request_enable_breadcrumb(request)) intel_engine_queue_breadcrumbs(engine); diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 3cbffd400b1b..832cb6b1e9bd 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -23,6 +23,7 @@ */ #include +#include #include #include "i915_drv.h" @@ -80,9 +81,39 @@ static inline bool __request_completed(const struct i915_request *rq) return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); } +static bool +__dma_fence_signal(struct dma_fence *fence) +{ + return !test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags); +} + +static void +__dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp) +{ + fence->timestamp = timestamp; + set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); + trace_dma_fence_signaled(fence); +} + +static void +__dma_fence_signal__notify(struct dma_fence *fence) +{ + struct dma_fence_cb *cur, *tmp; + + lockdep_assert_held(fence->lock); + lockdep_assert_irqs_disabled(); + + list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { + INIT_LIST_HEAD(&cur->node); + cur->func(fence, cur); + } + INIT_LIST_HEAD(&fence->cb_list); +} + void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) { struct intel_breadcrumbs *b = &engine->breadcrumbs; + const ktime_t timestamp = ktime_get(); struct intel_context *ce, *cn; struct list_head *pos, *next; LIST_HEAD(signal); @@ -104,6 +135,10 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); + clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + + if (!__dma_fence_signal(&rq->fence)) + continue; /* * Queue for execution after dropping the signaling @@ -111,14 +146,6 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) * more signalers to the same context or engine. */ i915_request_get(rq); - - /* - * We may race with direct invocation of - * dma_fence_signal(), e.g. i915_request_retire(), - * so we need to acquire our reference to the request - * before we cancel the breadcrumb. - */ - clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); list_add_tail(&rq->signal_link, &signal); } @@ -141,7 +168,12 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) struct i915_request *rq = list_entry(pos, typeof(*rq), signal_link); - dma_fence_signal(&rq->fence); + __dma_fence_signal__timestamp(&rq->fence, timestamp); + + spin_lock(&rq->lock); + __dma_fence_signal__notify(&rq->fence); + spin_unlock(&rq->lock); + i915_request_put(rq); } } @@ -243,19 +275,17 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) bool i915_request_enable_breadcrumb(struct i915_request *rq) { - struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; - - GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); + lockdep_assert_held(&rq->lock); + lockdep_assert_irqs_disabled(); - if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) - return true; - - spin_lock(&b->irq_lock); - if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags) && - !__request_completed(rq)) { + if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { + struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; struct intel_context *ce = rq->hw_context; struct list_head *pos; + spin_lock(&b->irq_lock); + GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); + __intel_breadcrumbs_arm_irq(b); /* @@ -284,8 +314,8 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq) list_move_tail(&ce->signal_link, &b->signalers); set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + spin_unlock(&b->irq_lock); } - spin_unlock(&b->irq_lock); return !__request_completed(rq); } @@ -294,9 +324,15 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq) { struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; - if (!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) - return; + lockdep_assert_held(&rq->lock); + lockdep_assert_irqs_disabled(); + /* + * We must wait for b->irq_lock so that we know the interrupt handler + * has released its reference to the intel_context and has completed + * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if + * required). + */ spin_lock(&b->irq_lock); if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { struct intel_context *ce = rq->hw_context; diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 37f60cb8e9e1..46cd0e70aecb 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -23,7 +23,6 @@ */ #include -#include #include "intel_guc_submission.h" #include "intel_lrc_reg.h" -- cgit v1.2.3