From 572f2a5cd9742c52f6d4d659409180168a169a24 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Tue, 15 Jun 2021 17:13:01 -0700 Subject: drm/i915/guc: Update firmware to v62.0.0 Most of the changes to the 62.0.0 firmware revolved around CTB communication channel. Conform to the new (stable) CTB protocol. v2: (Michal) Add values back to kernel DOC for actions (Docs) Add 'CT buffer' back in to fix warning Signed-off-by: John Harrison Signed-off-by: Michal Wajdeczko Signed-off-by: Matthew Brost Reviewed-by: Daniele Ceraolo Spurio [mattrope: Tweaked kerneldoc while pushing as suggested by Daniele/Michal] Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20210616001302.84233-3-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc.c | 107 ++++++++++++++++++++++----------- 1 file changed, 72 insertions(+), 35 deletions(-) (limited to 'drivers/gpu/drm/i915/gt/uc/intel_guc.c') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index f147cb389a20..6661dcb02239 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -219,24 +219,19 @@ static u32 guc_ctl_log_params_flags(struct intel_guc *guc) BUILD_BUG_ON(!CRASH_BUFFER_SIZE); BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT)); - BUILD_BUG_ON(!DPC_BUFFER_SIZE); - BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT)); - BUILD_BUG_ON(!ISR_BUFFER_SIZE); - BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT)); + BUILD_BUG_ON(!DEBUG_BUFFER_SIZE); + BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, UNIT)); BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) > (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT)); - BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) > - (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT)); - BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) > - (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT)); + BUILD_BUG_ON((DEBUG_BUFFER_SIZE / UNIT - 1) > + (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT)); flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL | FLAG | ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) | - ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) | - ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) | + ((DEBUG_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DEBUG_SHIFT) | (offset << GUC_LOG_BUF_ADDR_SHIFT); #undef UNIT @@ -376,29 +371,27 @@ void intel_guc_fini(struct intel_guc *guc) /* * This function implements the MMIO based host to GuC interface. */ -int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, +int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len, u32 *response_buf, u32 response_buf_size) { + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; struct intel_uncore *uncore = guc_to_gt(guc)->uncore; - u32 status; + u32 header; int i; int ret; GEM_BUG_ON(!len); GEM_BUG_ON(len > guc->send_regs.count); - /* We expect only action code */ - GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK); - - /* If CT is available, we expect to use MMIO only during init/fini */ - GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER && - *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != GUC_HXG_ORIGIN_HOST); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != GUC_HXG_TYPE_REQUEST); mutex_lock(&guc->send_mutex); intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains); +retry: for (i = 0; i < len; i++) - intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]); + intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]); intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1)); @@ -410,30 +403,74 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, */ ret = __intel_wait_for_register_fw(uncore, guc_send_reg(guc, 0), - INTEL_GUC_MSG_TYPE_MASK, - INTEL_GUC_MSG_TYPE_RESPONSE << - INTEL_GUC_MSG_TYPE_SHIFT, - 10, 10, &status); - /* If GuC explicitly returned an error, convert it to -EIO */ - if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status)) - ret = -EIO; + GUC_HXG_MSG_0_ORIGIN, + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, + GUC_HXG_ORIGIN_GUC), + 10, 10, &header); + if (unlikely(ret)) { +timeout: + drm_err(&i915->drm, "mmio request %#x: no reply %x\n", + request[0], header); + goto out; + } - if (ret) { - DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n", - action[0], ret, status); + if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) { +#define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \ + FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC || \ + FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_NO_RESPONSE_BUSY; }) + + ret = wait_for(done, 1000); + if (unlikely(ret)) + goto timeout; + if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != + GUC_HXG_ORIGIN_GUC)) + goto proto; +#undef done + } + + if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) { + u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header); + + drm_dbg(&i915->drm, "mmio request %#x: retrying, reason %u\n", + request[0], reason); + goto retry; + } + + if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_RESPONSE_FAILURE) { + u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header); + u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header); + + drm_err(&i915->drm, "mmio request %#x: failure %x/%u\n", + request[0], error, hint); + ret = -ENXIO; + goto out; + } + + if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) { +proto: + drm_err(&i915->drm, "mmio request %#x: unexpected reply %#x\n", + request[0], header); + ret = -EPROTO; goto out; } if (response_buf) { - int count = min(response_buf_size, guc->send_regs.count - 1); + int count = min(response_buf_size, guc->send_regs.count); - for (i = 0; i < count; i++) + GEM_BUG_ON(!count); + + response_buf[0] = header; + + for (i = 1; i < count; i++) response_buf[i] = intel_uncore_read(uncore, - guc_send_reg(guc, i + 1)); - } + guc_send_reg(guc, i)); - /* Use data from the GuC response as our return value */ - ret = INTEL_GUC_MSG_TO_DATA(status); + /* Use number of copied dwords as our return value */ + ret = count; + } else { + /* Use data from the GuC response as our return value */ + ret = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header); + } out: intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains); -- cgit v1.2.3 From eb5e7da736f36f558b9f8992f5e691a5b3b46b84 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Mon, 26 Jul 2021 17:23:23 -0700 Subject: drm/i915/guc: Reset implementation for new GuC interface Reset implementation for new GuC interface. This is the legacy reset implementation which is called when the i915 owns the engine hang check. Future patches will offload the engine hang check to GuC but we will continue to maintain this legacy path as a fallback and this code path is also required if the GuC dies. With the new GuC interface it is not possible to reset individual engines - it is only possible to reset the GPU entirely. This patch forces an entire chip reset if any engine hangs. v2: (Michal) - Check for -EPIPE rather than -EIO (CT deadlock/corrupt check) v3: (John H) - Split into a series of smaller patches v4: (John H) - Fix typo - Add braces around if statements in reset code v5: (Checkpatch) - Fix warnings Cc: John Harrison Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20210727002348.97202-9-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 6 +- drivers/gpu/drm/i915/gt/intel_reset.c | 18 +- drivers/gpu/drm/i915/gt/uc/intel_guc.c | 13 - drivers/gpu/drm/i915/gt/uc/intel_guc.h | 8 +- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 562 ++++++++++++++++++---- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 38 +- drivers/gpu/drm/i915/gt/uc/intel_uc.h | 3 + 7 files changed, 516 insertions(+), 132 deletions(-) (limited to 'drivers/gpu/drm/i915/gt/uc/intel_guc.c') diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index d86825437516..cd7b96005d29 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -170,8 +170,6 @@ static void gt_sanitize(struct intel_gt *gt, bool force) if (intel_gt_is_wedged(gt)) intel_gt_unset_wedged(gt); - intel_uc_sanitize(>->uc); - for_each_engine(engine, gt, id) if (engine->reset.prepare) engine->reset.prepare(engine); @@ -187,6 +185,8 @@ static void gt_sanitize(struct intel_gt *gt, bool force) __intel_engine_reset(engine, false); } + intel_uc_reset(>->uc, false); + for_each_engine(engine, gt, id) if (engine->reset.finish) engine->reset.finish(engine); @@ -239,6 +239,8 @@ int intel_gt_resume(struct intel_gt *gt) goto err_wedged; } + intel_uc_reset_finish(>->uc); + intel_rps_enable(>->rps); intel_llc_enable(>->llc); diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 9586613ee399..721a10e2215e 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -832,6 +832,8 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) __intel_engine_reset(engine, stalled_mask & engine->mask); local_bh_enable(); + intel_uc_reset(>->uc, true); + intel_ggtt_restore_fences(gt->ggtt); return err; @@ -856,6 +858,8 @@ static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake) if (awake & engine->mask) intel_engine_pm_put(engine); } + + intel_uc_reset_finish(>->uc); } static void nop_submit_request(struct i915_request *request) @@ -909,6 +913,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) for_each_engine(engine, gt, id) if (engine->reset.cancel) engine->reset.cancel(engine); + intel_uc_cancel_requests(>->uc); local_bh_enable(); reset_finish(gt, awake); @@ -1197,6 +1202,9 @@ int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg) ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags); GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags)); + if (intel_engine_uses_guc(engine)) + return -ENODEV; + if (!intel_engine_pm_get_if_awake(engine)) return 0; @@ -1207,13 +1215,10 @@ int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg) "Resetting %s for %s\n", engine->name, msg); atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]); - if (intel_engine_uses_guc(engine)) - ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine); - else - ret = intel_gt_reset_engine(engine); + ret = intel_gt_reset_engine(engine); if (ret) { /* If we fail here, we expect to fallback to a global reset */ - ENGINE_TRACE(engine, "Failed to reset, err: %d\n", ret); + ENGINE_TRACE(engine, "Failed to reset %s, err: %d\n", engine->name, ret); goto out; } @@ -1347,7 +1352,8 @@ void intel_gt_handle_error(struct intel_gt *gt, * Try engine reset when available. We fall back to full reset if * single reset fails. */ - if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) { + if (!intel_uc_uses_guc_submission(>->uc) && + intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) { local_bh_disable(); for_each_engine_masked(engine, gt, engine_mask, tmp) { BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 6661dcb02239..9b09395b998f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -572,19 +572,6 @@ int intel_guc_suspend(struct intel_guc *guc) return 0; } -/** - * intel_guc_reset_engine() - ask GuC to reset an engine - * @guc: intel_guc structure - * @engine: engine to be reset - */ -int intel_guc_reset_engine(struct intel_guc *guc, - struct intel_engine_cs *engine) -{ - /* XXX: to be implemented with submission interface rework */ - - return -ENODEV; -} - /** * intel_guc_resume() - notify GuC resuming from suspend state * @guc: the guc diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index d4987cd789ea..a068e3262676 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -249,14 +249,16 @@ static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask) int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout); -int intel_guc_reset_engine(struct intel_guc *guc, - struct intel_engine_cs *engine); - int intel_guc_deregister_done_process_msg(struct intel_guc *guc, const u32 *msg, u32 len); int intel_guc_sched_done_process_msg(struct intel_guc *guc, const u32 *msg, u32 len); +void intel_guc_submission_reset_prepare(struct intel_guc *guc); +void intel_guc_submission_reset(struct intel_guc *guc, bool stalled); +void intel_guc_submission_reset_finish(struct intel_guc *guc); +void intel_guc_submission_cancel_requests(struct intel_guc *guc); + void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 2d49fbcfb88b..be13a33ef17e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -141,7 +141,7 @@ context_wait_for_deregister_to_register(struct intel_context *ce) static inline void set_context_wait_for_deregister_to_register(struct intel_context *ce) { - /* Only should be called from guc_lrc_desc_pin() */ + /* Only should be called from guc_lrc_desc_pin() without lock */ ce->guc_state.sched_state |= SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER; } @@ -239,15 +239,31 @@ static int guc_lrc_desc_pool_create(struct intel_guc *guc) static void guc_lrc_desc_pool_destroy(struct intel_guc *guc) { + guc->lrc_desc_pool_vaddr = NULL; i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP); } +static inline bool guc_submission_initialized(struct intel_guc *guc) +{ + return !!guc->lrc_desc_pool_vaddr; +} + static inline void reset_lrc_desc(struct intel_guc *guc, u32 id) { - struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); + if (likely(guc_submission_initialized(guc))) { + struct guc_lrc_desc *desc = __get_lrc_desc(guc, id); + unsigned long flags; - memset(desc, 0, sizeof(*desc)); - xa_erase_irq(&guc->context_lookup, id); + memset(desc, 0, sizeof(*desc)); + + /* + * xarray API doesn't have xa_erase_irqsave wrapper, so calling + * the lower level functions directly. + */ + xa_lock_irqsave(&guc->context_lookup, flags); + __xa_erase(&guc->context_lookup, id); + xa_unlock_irqrestore(&guc->context_lookup, flags); + } } static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id) @@ -258,7 +274,15 @@ static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id) static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id, struct intel_context *ce) { - xa_store_irq(&guc->context_lookup, id, ce, GFP_ATOMIC); + unsigned long flags; + + /* + * xarray API doesn't have xa_save_irqsave wrapper, so calling the + * lower level functions directly. + */ + xa_lock_irqsave(&guc->context_lookup, flags); + __xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC); + xa_unlock_irqrestore(&guc->context_lookup, flags); } static int guc_submission_send_busy_loop(struct intel_guc *guc, @@ -327,6 +351,8 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout) true, timeout); } +static int guc_lrc_desc_pin(struct intel_context *ce, bool loop); + static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) { int err; @@ -334,11 +360,22 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) u32 action[3]; int len = 0; u32 g2h_len_dw = 0; - bool enabled = context_enabled(ce); + bool enabled; GEM_BUG_ON(!atomic_read(&ce->guc_id_ref)); GEM_BUG_ON(context_guc_id_invalid(ce)); + /* + * Corner case where the GuC firmware was blown away and reloaded while + * this context was pinned. + */ + if (unlikely(!lrc_desc_registered(guc, ce->guc_id))) { + err = guc_lrc_desc_pin(ce, false); + if (unlikely(err)) + goto out; + } + enabled = context_enabled(ce); + if (!enabled) { action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET; action[len++] = ce->guc_id; @@ -361,6 +398,7 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) intel_context_put(ce); } +out: return err; } @@ -415,15 +453,10 @@ done: if (submit) { guc_set_lrc_tail(last); resubmit: - /* - * We only check for -EBUSY here even though it is possible for - * -EDEADLK to be returned. If -EDEADLK is returned, the GuC has - * died and a full GT reset needs to be done. The hangcheck will - * eventually detect that the GuC has died and trigger this - * reset so no need to handle -EDEADLK here. - */ ret = guc_add_request(guc, last); - if (ret == -EBUSY) { + if (unlikely(ret == -EPIPE)) + goto deadlk; + else if (ret == -EBUSY) { tasklet_schedule(&sched_engine->tasklet); guc->stalled_request = last; return false; @@ -433,6 +466,11 @@ resubmit: guc->stalled_request = NULL; return submit; + +deadlk: + sched_engine->tasklet.callback = NULL; + tasklet_disable_nosync(&sched_engine->tasklet); + return false; } static void guc_submission_tasklet(struct tasklet_struct *t) @@ -459,27 +497,167 @@ static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir) intel_engine_signal_breadcrumbs(engine); } -static void guc_reset_prepare(struct intel_engine_cs *engine) +static void __guc_context_destroy(struct intel_context *ce); +static void release_guc_id(struct intel_guc *guc, struct intel_context *ce); +static void guc_signal_context_fence(struct intel_context *ce); + +static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) +{ + struct intel_context *ce; + unsigned long index, flags; + bool pending_disable, pending_enable, deregister, destroyed; + + xa_for_each(&guc->context_lookup, index, ce) { + /* Flush context */ + spin_lock_irqsave(&ce->guc_state.lock, flags); + spin_unlock_irqrestore(&ce->guc_state.lock, flags); + + /* + * Once we are at this point submission_disabled() is guaranteed + * to be visible to all callers who set the below flags (see above + * flush and flushes in reset_prepare). If submission_disabled() + * is set, the caller shouldn't set these flags. + */ + + destroyed = context_destroyed(ce); + pending_enable = context_pending_enable(ce); + pending_disable = context_pending_disable(ce); + deregister = context_wait_for_deregister_to_register(ce); + init_sched_state(ce); + + if (pending_enable || destroyed || deregister) { + atomic_dec(&guc->outstanding_submission_g2h); + if (deregister) + guc_signal_context_fence(ce); + if (destroyed) { + release_guc_id(guc, ce); + __guc_context_destroy(ce); + } + if (pending_enable || deregister) + intel_context_put(ce); + } + + /* Not mutualy exclusive with above if statement. */ + if (pending_disable) { + guc_signal_context_fence(ce); + intel_context_sched_disable_unpin(ce); + atomic_dec(&guc->outstanding_submission_g2h); + intel_context_put(ce); + } + } +} + +static inline bool +submission_disabled(struct intel_guc *guc) +{ + struct i915_sched_engine * const sched_engine = guc->sched_engine; + + return unlikely(!sched_engine || + !__tasklet_is_enabled(&sched_engine->tasklet)); +} + +static void disable_submission(struct intel_guc *guc) +{ + struct i915_sched_engine * const sched_engine = guc->sched_engine; + + if (__tasklet_is_enabled(&sched_engine->tasklet)) { + GEM_BUG_ON(!guc->ct.enabled); + __tasklet_disable_sync_once(&sched_engine->tasklet); + sched_engine->tasklet.callback = NULL; + } +} + +static void enable_submission(struct intel_guc *guc) { - ENGINE_TRACE(engine, "\n"); + struct i915_sched_engine * const sched_engine = guc->sched_engine; + unsigned long flags; + + spin_lock_irqsave(&guc->sched_engine->lock, flags); + sched_engine->tasklet.callback = guc_submission_tasklet; + wmb(); /* Make sure callback visible */ + if (!__tasklet_is_enabled(&sched_engine->tasklet) && + __tasklet_enable(&sched_engine->tasklet)) { + GEM_BUG_ON(!guc->ct.enabled); + + /* And kick in case we missed a new request submission. */ + tasklet_hi_schedule(&sched_engine->tasklet); + } + spin_unlock_irqrestore(&guc->sched_engine->lock, flags); +} + +static void guc_flush_submissions(struct intel_guc *guc) +{ + struct i915_sched_engine * const sched_engine = guc->sched_engine; + unsigned long flags; + + spin_lock_irqsave(&sched_engine->lock, flags); + spin_unlock_irqrestore(&sched_engine->lock, flags); +} + +void intel_guc_submission_reset_prepare(struct intel_guc *guc) +{ + int i; + + if (unlikely(!guc_submission_initialized(guc))) { + /* Reset called during driver load? GuC not yet initialised! */ + return; + } + + disable_submission(guc); + guc->interrupts.disable(guc); + + /* Flush IRQ handler */ + spin_lock_irq(&guc_to_gt(guc)->irq_lock); + spin_unlock_irq(&guc_to_gt(guc)->irq_lock); + + guc_flush_submissions(guc); /* - * Prevent request submission to the hardware until we have - * completed the reset in i915_gem_reset_finish(). If a request - * is completed by one engine, it may then queue a request - * to a second via its execlists->tasklet *just* as we are - * calling engine->init_hw() and also writing the ELSP. - * Turning off the execlists->tasklet until the reset is over - * prevents the race. + * Handle any outstanding G2Hs before reset. Call IRQ handler directly + * each pass as interrupt have been disabled. We always scrub for + * outstanding G2H as it is possible for outstanding_submission_g2h to + * be incremented after the context state update. */ - __tasklet_disable_sync_once(&engine->sched_engine->tasklet); + for (i = 0; i < 4 && atomic_read(&guc->outstanding_submission_g2h); ++i) { + intel_guc_to_host_event_handler(guc); +#define wait_for_reset(guc, wait_var) \ + guc_wait_for_pending_msg(guc, wait_var, false, (HZ / 20)) + do { + wait_for_reset(guc, &guc->outstanding_submission_g2h); + } while (!list_empty(&guc->ct.requests.incoming)); + } + scrub_guc_desc_for_outstanding_g2h(guc); } -static void guc_reset_state(struct intel_context *ce, - struct intel_engine_cs *engine, - u32 head, - bool scrub) +static struct intel_engine_cs * +guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling) { + struct intel_engine_cs *engine; + intel_engine_mask_t tmp, mask = ve->mask; + unsigned int num_siblings = 0; + + for_each_engine_masked(engine, ve->gt, mask, tmp) + if (num_siblings++ == sibling) + return engine; + + return NULL; +} + +static inline struct intel_engine_cs * +__context_to_physical_engine(struct intel_context *ce) +{ + struct intel_engine_cs *engine = ce->engine; + + if (intel_engine_is_virtual(engine)) + engine = guc_virtual_get_sibling(engine, 0); + + return engine; +} + +static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub) +{ + struct intel_engine_cs *engine = __context_to_physical_engine(ce); + GEM_BUG_ON(!intel_context_is_pinned(ce)); /* @@ -497,42 +675,148 @@ static void guc_reset_state(struct intel_context *ce, lrc_update_regs(ce, engine, head); } -static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled) +static void guc_reset_nop(struct intel_engine_cs *engine) { - struct intel_engine_execlists * const execlists = &engine->execlists; - struct i915_request *rq; +} + +static void guc_rewind_nop(struct intel_engine_cs *engine, bool stalled) +{ +} + +static void +__unwind_incomplete_requests(struct intel_context *ce) +{ + struct i915_request *rq, *rn; + struct list_head *pl; + int prio = I915_PRIORITY_INVALID; + struct i915_sched_engine * const sched_engine = + ce->engine->sched_engine; + unsigned long flags; + + spin_lock_irqsave(&sched_engine->lock, flags); + spin_lock(&ce->guc_active.lock); + list_for_each_entry_safe(rq, rn, + &ce->guc_active.requests, + sched.link) { + if (i915_request_completed(rq)) + continue; + + list_del_init(&rq->sched.link); + spin_unlock(&ce->guc_active.lock); + + __i915_request_unsubmit(rq); + + /* Push the request back into the queue for later resubmission. */ + GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); + if (rq_prio(rq) != prio) { + prio = rq_prio(rq); + pl = i915_sched_lookup_priolist(sched_engine, prio); + } + GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine)); + + list_add_tail(&rq->sched.link, pl); + set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); + + spin_lock(&ce->guc_active.lock); + } + spin_unlock(&ce->guc_active.lock); + spin_unlock_irqrestore(&sched_engine->lock, flags); +} + +static struct i915_request *context_find_active_request(struct intel_context *ce) +{ + struct i915_request *rq, *active = NULL; unsigned long flags; - spin_lock_irqsave(&engine->sched_engine->lock, flags); + spin_lock_irqsave(&ce->guc_active.lock, flags); + list_for_each_entry_reverse(rq, &ce->guc_active.requests, + sched.link) { + if (i915_request_completed(rq)) + break; + + active = rq; + } + spin_unlock_irqrestore(&ce->guc_active.lock, flags); + + return active; +} + +static void __guc_reset_context(struct intel_context *ce, bool stalled) +{ + struct i915_request *rq; + u32 head; + + /* + * GuC will implicitly mark the context as non-schedulable + * when it sends the reset notification. Make sure our state + * reflects this change. The context will be marked enabled + * on resubmission. + */ + clr_context_enabled(ce); - /* Push back any incomplete requests for replay after the reset. */ - rq = execlists_unwind_incomplete_requests(execlists); - if (!rq) - goto out_unlock; + rq = context_find_active_request(ce); + if (!rq) { + head = ce->ring->tail; + stalled = false; + goto out_replay; + } if (!i915_request_started(rq)) stalled = false; + GEM_BUG_ON(i915_active_is_idle(&ce->active)); + head = intel_ring_wrap(ce->ring, rq->head); __i915_request_reset(rq, stalled); - guc_reset_state(rq->context, engine, rq->head, stalled); -out_unlock: - spin_unlock_irqrestore(&engine->sched_engine->lock, flags); +out_replay: + guc_reset_state(ce, head, stalled); + __unwind_incomplete_requests(ce); +} + +void intel_guc_submission_reset(struct intel_guc *guc, bool stalled) +{ + struct intel_context *ce; + unsigned long index; + + if (unlikely(!guc_submission_initialized(guc))) { + /* Reset called during driver load? GuC not yet initialised! */ + return; + } + + xa_for_each(&guc->context_lookup, index, ce) + if (intel_context_is_pinned(ce)) + __guc_reset_context(ce, stalled); + + /* GuC is blown away, drop all references to contexts */ + xa_destroy(&guc->context_lookup); +} + +static void guc_cancel_context_requests(struct intel_context *ce) +{ + struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine; + struct i915_request *rq; + unsigned long flags; + + /* Mark all executing requests as skipped. */ + spin_lock_irqsave(&sched_engine->lock, flags); + spin_lock(&ce->guc_active.lock); + list_for_each_entry(rq, &ce->guc_active.requests, sched.link) + i915_request_put(i915_request_mark_eio(rq)); + spin_unlock(&ce->guc_active.lock); + spin_unlock_irqrestore(&sched_engine->lock, flags); } -static void guc_reset_cancel(struct intel_engine_cs *engine) +static void +guc_cancel_sched_engine_requests(struct i915_sched_engine *sched_engine) { - struct i915_sched_engine * const sched_engine = engine->sched_engine; struct i915_request *rq, *rn; struct rb_node *rb; unsigned long flags; /* Can be called during boot if GuC fails to load */ - if (!engine->gt) + if (!sched_engine) return; - ENGINE_TRACE(engine, "\n"); - /* * Before we call engine->cancel_requests(), we should have exclusive * access to the submission state. This is arranged for us by the @@ -549,21 +833,16 @@ static void guc_reset_cancel(struct intel_engine_cs *engine) */ spin_lock_irqsave(&sched_engine->lock, flags); - /* Mark all executing requests as skipped. */ - list_for_each_entry(rq, &sched_engine->requests, sched.link) { - i915_request_set_error_once(rq, -EIO); - i915_request_mark_complete(rq); - } - /* Flush the queued requests to the timeline list (for retiring). */ while ((rb = rb_first_cached(&sched_engine->queue))) { struct i915_priolist *p = to_priolist(rb); priolist_for_each_request_consume(rq, rn, p) { list_del_init(&rq->sched.link); + __i915_request_submit(rq); - dma_fence_set_error(&rq->fence, -EIO); - i915_request_mark_complete(rq); + + i915_request_put(i915_request_mark_eio(rq)); } rb_erase_cached(&p->node, &sched_engine->queue); @@ -578,14 +857,39 @@ static void guc_reset_cancel(struct intel_engine_cs *engine) spin_unlock_irqrestore(&sched_engine->lock, flags); } -static void guc_reset_finish(struct intel_engine_cs *engine) +void intel_guc_submission_cancel_requests(struct intel_guc *guc) { - if (__tasklet_enable(&engine->sched_engine->tasklet)) - /* And kick in case we missed a new request submission. */ - tasklet_hi_schedule(&engine->sched_engine->tasklet); + struct intel_context *ce; + unsigned long index; + + xa_for_each(&guc->context_lookup, index, ce) + if (intel_context_is_pinned(ce)) + guc_cancel_context_requests(ce); + + guc_cancel_sched_engine_requests(guc->sched_engine); + + /* GuC is blown away, drop all references to contexts */ + xa_destroy(&guc->context_lookup); +} + +void intel_guc_submission_reset_finish(struct intel_guc *guc) +{ + /* Reset called during driver load or during wedge? */ + if (unlikely(!guc_submission_initialized(guc) || + test_bit(I915_WEDGED, &guc_to_gt(guc)->reset.flags))) { + return; + } + + /* + * Technically possible for either of these values to be non-zero here, + * but very unlikely + harmless. Regardless let's add a warn so we can + * see in CI if this happens frequently / a precursor to taking down the + * machine. + */ + GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h)); + atomic_set(&guc->outstanding_submission_g2h, 0); - ENGINE_TRACE(engine, "depth->%d\n", - atomic_read(&engine->sched_engine->tasklet.count)); + enable_submission(guc); } /* @@ -652,6 +956,9 @@ static int guc_bypass_tasklet_submit(struct intel_guc *guc, else trace_i915_request_guc_submit(rq); + if (unlikely(ret == -EPIPE)) + disable_submission(guc); + return ret; } @@ -664,7 +971,8 @@ static void guc_submit_request(struct i915_request *rq) /* Will be called from irq-context when using foreign fences. */ spin_lock_irqsave(&sched_engine->lock, flags); - if (guc->stalled_request || !i915_sched_engine_is_empty(sched_engine)) + if (submission_disabled(guc) || guc->stalled_request || + !i915_sched_engine_is_empty(sched_engine)) queue_request(sched_engine, rq, rq_prio(rq)); else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY) tasklet_hi_schedule(&sched_engine->tasklet); @@ -807,7 +1115,8 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce) static int __guc_action_register_context(struct intel_guc *guc, u32 guc_id, - u32 offset) + u32 offset, + bool loop) { u32 action[] = { INTEL_GUC_ACTION_REGISTER_CONTEXT, @@ -816,10 +1125,10 @@ static int __guc_action_register_context(struct intel_guc *guc, }; return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), - 0, true); + 0, loop); } -static int register_context(struct intel_context *ce) +static int register_context(struct intel_context *ce, bool loop) { struct intel_guc *guc = ce_to_guc(ce); u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) + @@ -827,11 +1136,12 @@ static int register_context(struct intel_context *ce) trace_intel_context_register(ce); - return __guc_action_register_context(guc, ce->guc_id, offset); + return __guc_action_register_context(guc, ce->guc_id, offset, loop); } static int __guc_action_deregister_context(struct intel_guc *guc, - u32 guc_id) + u32 guc_id, + bool loop) { u32 action[] = { INTEL_GUC_ACTION_DEREGISTER_CONTEXT, @@ -840,16 +1150,16 @@ static int __guc_action_deregister_context(struct intel_guc *guc, return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), G2H_LEN_DW_DEREGISTER_CONTEXT, - true); + loop); } -static int deregister_context(struct intel_context *ce, u32 guc_id) +static int deregister_context(struct intel_context *ce, u32 guc_id, bool loop) { struct intel_guc *guc = ce_to_guc(ce); trace_intel_context_deregister(ce); - return __guc_action_deregister_context(guc, guc_id); + return __guc_action_deregister_context(guc, guc_id, loop); } static intel_engine_mask_t adjust_engine_mask(u8 class, intel_engine_mask_t mask) @@ -878,7 +1188,7 @@ static void guc_context_policy_init(struct intel_engine_cs *engine, desc->preemption_timeout = CONTEXT_POLICY_DEFAULT_PREEMPTION_TIME_US; } -static int guc_lrc_desc_pin(struct intel_context *ce) +static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) { struct intel_engine_cs *engine = ce->engine; struct intel_runtime_pm *runtime_pm = engine->uncore->rpm; @@ -923,18 +1233,46 @@ static int guc_lrc_desc_pin(struct intel_context *ce) */ if (context_registered) { trace_intel_context_steal_guc_id(ce); - set_context_wait_for_deregister_to_register(ce); - intel_context_get(ce); + if (!loop) { + set_context_wait_for_deregister_to_register(ce); + intel_context_get(ce); + } else { + bool disabled; + unsigned long flags; + + /* Seal race with Reset */ + spin_lock_irqsave(&ce->guc_state.lock, flags); + disabled = submission_disabled(guc); + if (likely(!disabled)) { + set_context_wait_for_deregister_to_register(ce); + intel_context_get(ce); + } + spin_unlock_irqrestore(&ce->guc_state.lock, flags); + if (unlikely(disabled)) { + reset_lrc_desc(guc, desc_idx); + return 0; /* Will get registered later */ + } + } /* * If stealing the guc_id, this ce has the same guc_id as the * context whose guc_id was stolen. */ with_intel_runtime_pm(runtime_pm, wakeref) - ret = deregister_context(ce, ce->guc_id); + ret = deregister_context(ce, ce->guc_id, loop); + if (unlikely(ret == -EBUSY)) { + clr_context_wait_for_deregister_to_register(ce); + intel_context_put(ce); + } else if (unlikely(ret == -ENODEV)) { + ret = 0; /* Will get registered later */ + } } else { with_intel_runtime_pm(runtime_pm, wakeref) - ret = register_context(ce); + ret = register_context(ce, loop); + if (unlikely(ret == -EBUSY)) + reset_lrc_desc(guc, desc_idx); + else if (unlikely(ret == -ENODEV)) + ret = 0; /* Will get registered later */ } return ret; @@ -1002,7 +1340,6 @@ static void __guc_context_sched_disable(struct intel_guc *guc, GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID); trace_intel_context_sched_disable(ce); - intel_context_get(ce); guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true); @@ -1014,6 +1351,7 @@ static u16 prep_context_pending_disable(struct intel_context *ce) set_context_pending_disable(ce); clr_context_enabled(ce); + intel_context_get(ce); return ce->guc_id; } @@ -1026,7 +1364,7 @@ static void guc_context_sched_disable(struct intel_context *ce) u16 guc_id; intel_wakeref_t wakeref; - if (context_guc_id_invalid(ce) || + if (submission_disabled(guc) || context_guc_id_invalid(ce) || !lrc_desc_registered(guc, ce->guc_id)) { clr_context_enabled(ce); goto unpin; @@ -1065,17 +1403,12 @@ unpin: static inline void guc_lrc_desc_unpin(struct intel_context *ce) { struct intel_guc *guc = ce_to_guc(ce); - unsigned long flags; GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id)); GEM_BUG_ON(ce != __get_context(guc, ce->guc_id)); GEM_BUG_ON(context_enabled(ce)); - spin_lock_irqsave(&ce->guc_state.lock, flags); - set_context_destroyed(ce); - spin_unlock_irqrestore(&ce->guc_state.lock, flags); - - deregister_context(ce, ce->guc_id); + deregister_context(ce, ce->guc_id, true); } static void __guc_context_destroy(struct intel_context *ce) @@ -1103,16 +1436,18 @@ static void guc_context_destroy(struct kref *kref) struct intel_guc *guc = ce_to_guc(ce); intel_wakeref_t wakeref; unsigned long flags; + bool disabled; /* * If the guc_id is invalid this context has been stolen and we can free * it immediately. Also can be freed immediately if the context is not - * registered with the GuC. + * registered with the GuC or the GuC is in the middle of a reset. */ if (context_guc_id_invalid(ce)) { __guc_context_destroy(ce); return; - } else if (!lrc_desc_registered(guc, ce->guc_id)) { + } else if (submission_disabled(guc) || + !lrc_desc_registered(guc, ce->guc_id)) { release_guc_id(guc, ce); __guc_context_destroy(ce); return; @@ -1137,6 +1472,18 @@ static void guc_context_destroy(struct kref *kref) list_del_init(&ce->guc_id_link); spin_unlock_irqrestore(&guc->contexts_lock, flags); + /* Seal race with Reset */ + spin_lock_irqsave(&ce->guc_state.lock, flags); + disabled = submission_disabled(guc); + if (likely(!disabled)) + set_context_destroyed(ce); + spin_unlock_irqrestore(&ce->guc_state.lock, flags); + if (unlikely(disabled)) { + release_guc_id(guc, ce); + __guc_context_destroy(ce); + return; + } + /* * We defer GuC context deregistration until the context is destroyed * in order to save on CTBs. With this optimization ideally we only need @@ -1224,8 +1571,6 @@ static void guc_signal_context_fence(struct intel_context *ce) { unsigned long flags; - GEM_BUG_ON(!context_wait_for_deregister_to_register(ce)); - spin_lock_irqsave(&ce->guc_state.lock, flags); clr_context_wait_for_deregister_to_register(ce); __guc_signal_context_fence(ce); @@ -1234,8 +1579,9 @@ static void guc_signal_context_fence(struct intel_context *ce) static bool context_needs_register(struct intel_context *ce, bool new_guc_id) { - return new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) || - !lrc_desc_registered(ce_to_guc(ce), ce->guc_id); + return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) || + !lrc_desc_registered(ce_to_guc(ce), ce->guc_id)) && + !submission_disabled(ce_to_guc(ce)); } static int guc_request_alloc(struct i915_request *rq) @@ -1293,8 +1639,12 @@ static int guc_request_alloc(struct i915_request *rq) if (unlikely(ret < 0)) return ret; if (context_needs_register(ce, !!ret)) { - ret = guc_lrc_desc_pin(ce); + ret = guc_lrc_desc_pin(ce, true); if (unlikely(ret)) { /* unwind */ + if (ret == -EPIPE) { + disable_submission(guc); + goto out; /* GPU will be reset */ + } atomic_dec(&ce->guc_id_ref); unpin_guc_id(guc, ce); return ret; @@ -1331,20 +1681,6 @@ out: return 0; } -static struct intel_engine_cs * -guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling) -{ - struct intel_engine_cs *engine; - intel_engine_mask_t tmp, mask = ve->mask; - unsigned int num_siblings = 0; - - for_each_engine_masked(engine, ve->gt, mask, tmp) - if (num_siblings++ == sibling) - return engine; - - return NULL; -} - static int guc_virtual_context_pre_pin(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr) @@ -1540,7 +1876,7 @@ static inline void guc_kernel_context_pin(struct intel_guc *guc, { if (context_guc_id_invalid(ce)) pin_guc_id(guc, ce); - guc_lrc_desc_pin(ce); + guc_lrc_desc_pin(ce, true); } static inline void guc_init_lrc_mapping(struct intel_guc *guc) @@ -1600,10 +1936,10 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) engine->sched_engine->schedule = i915_schedule; - engine->reset.prepare = guc_reset_prepare; - engine->reset.rewind = guc_reset_rewind; - engine->reset.cancel = guc_reset_cancel; - engine->reset.finish = guc_reset_finish; + engine->reset.prepare = guc_reset_nop; + engine->reset.rewind = guc_rewind_nop; + engine->reset.cancel = guc_reset_nop; + engine->reset.finish = guc_reset_nop; engine->emit_flush = gen8_emit_flush_xcs; engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; @@ -1652,6 +1988,17 @@ static inline void guc_default_irqs(struct intel_engine_cs *engine) intel_engine_set_irq_handler(engine, cs_irq_handler); } +static void guc_sched_engine_destroy(struct kref *kref) +{ + struct i915_sched_engine *sched_engine = + container_of(kref, typeof(*sched_engine), ref); + struct intel_guc *guc = sched_engine->private_data; + + guc->sched_engine = NULL; + tasklet_kill(&sched_engine->tasklet); /* flush the callback */ + kfree(sched_engine); +} + int intel_guc_submission_setup(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; @@ -1670,6 +2017,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine) guc->sched_engine->schedule = i915_schedule; guc->sched_engine->private_data = guc; + guc->sched_engine->destroy = guc_sched_engine_destroy; tasklet_setup(&guc->sched_engine->tasklet, guc_submission_tasklet); } @@ -1776,7 +2124,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc, * register this context. */ with_intel_runtime_pm(runtime_pm, wakeref) - register_context(ce); + register_context(ce, true); guc_signal_context_fence(ce); intel_context_put(ce); } else if (context_destroyed(ce)) { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 6d8b9233214e..9a5ef1dca022 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -565,12 +565,48 @@ void intel_uc_reset_prepare(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; - if (!intel_guc_is_ready(guc)) + /* Nothing to do if GuC isn't supported */ + if (!intel_uc_supports_guc(uc)) return; + /* Firmware expected to be running when this function is called */ + if (!intel_guc_is_ready(guc)) + goto sanitize; + + if (intel_uc_uses_guc_submission(uc)) + intel_guc_submission_reset_prepare(guc); + +sanitize: __uc_sanitize(uc); } +void intel_uc_reset(struct intel_uc *uc, bool stalled) +{ + struct intel_guc *guc = &uc->guc; + + /* Firmware can not be running when this function is called */ + if (intel_uc_uses_guc_submission(uc)) + intel_guc_submission_reset(guc, stalled); +} + +void intel_uc_reset_finish(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + + /* Firmware expected to be running when this function is called */ + if (intel_guc_is_fw_running(guc) && intel_uc_uses_guc_submission(uc)) + intel_guc_submission_reset_finish(guc); +} + +void intel_uc_cancel_requests(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + + /* Firmware can not be running when this function is called */ + if (intel_uc_uses_guc_submission(uc)) + intel_guc_submission_cancel_requests(guc); +} + void intel_uc_runtime_suspend(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index c4cef885e984..eaa3202192ac 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -37,6 +37,9 @@ void intel_uc_driver_late_release(struct intel_uc *uc); void intel_uc_driver_remove(struct intel_uc *uc); void intel_uc_init_mmio(struct intel_uc *uc); void intel_uc_reset_prepare(struct intel_uc *uc); +void intel_uc_reset(struct intel_uc *uc, bool stalled); +void intel_uc_reset_finish(struct intel_uc *uc); +void intel_uc_cancel_requests(struct intel_uc *uc); void intel_uc_suspend(struct intel_uc *uc); void intel_uc_runtime_suspend(struct intel_uc *uc); int intel_uc_resume(struct intel_uc *uc); -- cgit v1.2.3 From cad46a332f3d0f0ffeb9f9069499fb19a2ab43f4 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Mon, 26 Jul 2021 17:23:26 -0700 Subject: drm/i915/guc: Suspend/resume implementation for new interface The new GuC interface introduces an MMIO H2G command, INTEL_GUC_ACTION_RESET_CLIENT, which is used to implement suspend. This MMIO tears down any active contexts generating a context reset G2H CTB for each. Once that step completes the GuC tears down the CTB channels. It is safe to suspend once this MMIO H2G command completes and all G2H CTBs have been processed. In practice the i915 will likely never receive a G2H as suspend should only be called after the GPU is idle. Resume is implemented in the same manner as before - simply reload the GuC firmware and reinitialize everything (e.g. CTB channels, contexts, etc..). v2: (Michel / John H) - INTEL_GUC_ACTION_RESET_CLIENT 0x5B01 -> 0x5507 Cc: John Harrison Signed-off-by: Matthew Brost Signed-off-by: Michal Wajdeczko Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20210727002348.97202-12-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h | 1 + drivers/gpu/drm/i915/gt/uc/intel_guc.c | 64 +++++++++-------------- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 15 +++--- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h | 5 ++ drivers/gpu/drm/i915/gt/uc/intel_uc.c | 20 ++++--- 5 files changed, 54 insertions(+), 51 deletions(-) (limited to 'drivers/gpu/drm/i915/gt/uc/intel_guc.c') diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h index 57e18babdf4b..d832c8f11c11 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h @@ -142,6 +142,7 @@ enum intel_guc_action { INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600, + INTEL_GUC_ACTION_RESET_CLIENT = 0x5507, INTEL_GUC_ACTION_LIMIT }; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 9b09395b998f..68266cbffd1f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -524,51 +524,34 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset) */ int intel_guc_suspend(struct intel_guc *guc) { - struct intel_uncore *uncore = guc_to_gt(guc)->uncore; int ret; - u32 status; u32 action[] = { - INTEL_GUC_ACTION_ENTER_S_STATE, - GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */ + INTEL_GUC_ACTION_RESET_CLIENT, }; - /* - * If GuC communication is enabled but submission is not supported, - * we do not need to suspend the GuC. - */ - if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc)) + if (!intel_guc_is_ready(guc)) return 0; - /* - * The ENTER_S_STATE action queues the save/restore operation in GuC FW - * and then returns, so waiting on the H2G is not enough to guarantee - * GuC is done. When all the processing is done, GuC writes - * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll - * on that. Note that GuC does not ensure that the value in the register - * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is - * in progress so we need to take care of that ourselves as well. - */ - - intel_uncore_write(uncore, SOFT_SCRATCH(14), - INTEL_GUC_SLEEP_STATE_INVALID_MASK); - - ret = intel_guc_send(guc, action, ARRAY_SIZE(action)); - if (ret) - return ret; - - ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14), - INTEL_GUC_SLEEP_STATE_INVALID_MASK, - 0, 0, 10, &status); - if (ret) - return ret; - - if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) { - DRM_ERROR("GuC failed to change sleep state. " - "action=0x%x, err=%u\n", - action[0], status); - return -EIO; + if (intel_guc_submission_is_used(guc)) { + /* + * This H2G MMIO command tears down the GuC in two steps. First it will + * generate a G2H CTB for every active context indicating a reset. In + * practice the i915 shouldn't ever get a G2H as suspend should only be + * called when the GPU is idle. Next, it tears down the CTBs and this + * H2G MMIO command completes. + * + * Don't abort on a failure code from the GuC. Keep going and do the + * clean up in santize() and re-initialisation on resume and hopefully + * the error here won't be problematic. + */ + ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); + if (ret) + DRM_ERROR("GuC suspend: RESET_CLIENT action failed with error %d!\n", ret); } + /* Signal that the GuC isn't running. */ + intel_guc_sanitize(guc); + return 0; } @@ -578,7 +561,12 @@ int intel_guc_suspend(struct intel_guc *guc) */ int intel_guc_resume(struct intel_guc *guc) { - /* XXX: to be implemented with submission interface rework */ + /* + * NB: This function can still be called even if GuC submission is + * disabled, e.g. if GuC is enabled for HuC authentication only. Thus, + * if any code is later added here, it must be support doing nothing + * if submission is disabled (as per intel_guc_suspend). + */ return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index dd4ea1ef5731..d39a312e980a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -302,10 +302,10 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc, return err; } -static int guc_wait_for_pending_msg(struct intel_guc *guc, - atomic_t *wait_var, - bool interruptible, - long timeout) +int intel_guc_wait_for_pending_msg(struct intel_guc *guc, + atomic_t *wait_var, + bool interruptible, + long timeout) { const int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; @@ -348,8 +348,9 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout) if (!intel_uc_uses_guc_submission(&guc_to_gt(guc)->uc)) return 0; - return guc_wait_for_pending_msg(guc, &guc->outstanding_submission_g2h, - true, timeout); + return intel_guc_wait_for_pending_msg(guc, + &guc->outstanding_submission_g2h, + true, timeout); } static int guc_lrc_desc_pin(struct intel_context *ce, bool loop); @@ -623,7 +624,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc) for (i = 0; i < 4 && atomic_read(&guc->outstanding_submission_g2h); ++i) { intel_guc_to_host_event_handler(guc); #define wait_for_reset(guc, wait_var) \ - guc_wait_for_pending_msg(guc, wait_var, false, (HZ / 20)) + intel_guc_wait_for_pending_msg(guc, wait_var, false, (HZ / 20)) do { wait_for_reset(guc, &guc->outstanding_submission_g2h); } while (!list_empty(&guc->ct.requests.incoming)); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h index 5f263ac4f46a..08ff77c5c50e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -28,6 +28,11 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve); +int intel_guc_wait_for_pending_msg(struct intel_guc *guc, + atomic_t *wait_var, + bool interruptible, + long timeout); + static inline bool intel_guc_submission_is_supported(struct intel_guc *guc) { /* XXX: GuC submission is unavailable for now */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 05a2001afde9..e07c989968f9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -595,14 +595,18 @@ void intel_uc_cancel_requests(struct intel_uc *uc) void intel_uc_runtime_suspend(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; - int err; if (!intel_guc_is_ready(guc)) return; - err = intel_guc_suspend(guc); - if (err) - DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err); + /* + * Wait for any outstanding CTB before tearing down communication /w the + * GuC. + */ +#define OUTSTANDING_CTB_TIMEOUT_PERIOD (HZ / 5) + intel_guc_wait_for_pending_msg(guc, &guc->outstanding_submission_g2h, + false, OUTSTANDING_CTB_TIMEOUT_PERIOD); + GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h)); guc_disable_communication(guc); } @@ -611,12 +615,16 @@ void intel_uc_suspend(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; intel_wakeref_t wakeref; + int err; if (!intel_guc_is_ready(guc)) return; - with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref) - intel_uc_runtime_suspend(uc); + with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) { + err = intel_guc_suspend(guc); + if (err) + DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err); + } } static int __uc_resume(struct intel_uc *uc, bool enable_communication) -- cgit v1.2.3 From 481d458caede241607e8463b9920ff9e29cece38 Mon Sep 17 00:00:00 2001 From: John Harrison Date: Mon, 26 Jul 2021 17:23:38 -0700 Subject: drm/i915/guc: Add golden context to GuC ADS The media watchdog mechanism involves GuC doing a silent reset and continue of the hung context. This requires the i915 driver provide a golden context to GuC in the ADS. v2: (Matthew Brost): - Fix memory corruption in shmem_read (John H) - Use locals rather than defines for LR_* + SKIP_SIZE Signed-off-by: John Harrison Signed-off-by: Matthew Brost Reviewed-by: Matthew Brost Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20210727002348.97202-24-matthew.brost@intel.com --- drivers/gpu/drm/i915/gt/intel_gt.c | 2 + drivers/gpu/drm/i915/gt/uc/intel_guc.c | 5 + drivers/gpu/drm/i915/gt/uc/intel_guc.h | 2 + drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 216 +++++++++++++++++++++++++---- drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h | 1 + drivers/gpu/drm/i915/gt/uc/intel_uc.c | 5 + drivers/gpu/drm/i915/gt/uc/intel_uc.h | 1 + 7 files changed, 202 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm/i915/gt/uc/intel_guc.c') diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 46441607d18b..a64aa43f7cd9 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -654,6 +654,8 @@ int intel_gt_init(struct intel_gt *gt) if (err) goto err_gt; + intel_uc_init_late(>->uc); + err = i915_inject_probe_error(gt->i915, -EIO); if (err) goto err_gt; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 68266cbffd1f..979128e28372 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -180,6 +180,11 @@ void intel_guc_init_early(struct intel_guc *guc) } } +void intel_guc_init_late(struct intel_guc *guc) +{ + intel_guc_ads_init_late(guc); +} + static u32 guc_ctl_debug_flags(struct intel_guc *guc) { u32 level = intel_guc_log_get_level(&guc->log); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 8c8cf842c29a..1875303c3bca 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -60,6 +60,7 @@ struct intel_guc { struct i915_vma *ads_vma; struct __guc_ads_blob *ads_blob; u32 ads_regset_size; + u32 ads_golden_ctxt_size; struct i915_vma *lrc_desc_pool; void *lrc_desc_pool_vaddr; @@ -183,6 +184,7 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, } void intel_guc_init_early(struct intel_guc *guc); +void intel_guc_init_late(struct intel_guc *guc); void intel_guc_init_send_regs(struct intel_guc *guc); void intel_guc_write_params(struct intel_guc *guc); int intel_guc_init(struct intel_guc *guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 51fc14ee79cd..6926919bcac6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -7,6 +7,7 @@ #include "gt/intel_gt.h" #include "gt/intel_lrc.h" +#include "gt/shmem_utils.h" #include "intel_guc_ads.h" #include "intel_guc_fwif.h" #include "intel_uc.h" @@ -33,6 +34,10 @@ * +---------------------------------------+ <== dynamic * | padding | * +---------------------------------------+ <== 4K aligned + * | golden contexts | + * +---------------------------------------+ + * | padding | + * +---------------------------------------+ <== 4K aligned * | private data | * +---------------------------------------+ * | padding | @@ -52,6 +57,11 @@ static u32 guc_ads_regset_size(struct intel_guc *guc) return guc->ads_regset_size; } +static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc) +{ + return PAGE_ALIGN(guc->ads_golden_ctxt_size); +} + static u32 guc_ads_private_data_size(struct intel_guc *guc) { return PAGE_ALIGN(guc->fw.private_data_size); @@ -62,12 +72,23 @@ static u32 guc_ads_regset_offset(struct intel_guc *guc) return offsetof(struct __guc_ads_blob, regset); } -static u32 guc_ads_private_data_offset(struct intel_guc *guc) +static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc) { u32 offset; offset = guc_ads_regset_offset(guc) + guc_ads_regset_size(guc); + + return PAGE_ALIGN(offset); +} + +static u32 guc_ads_private_data_offset(struct intel_guc *guc) +{ + u32 offset; + + offset = guc_ads_golden_ctxt_offset(guc) + + guc_ads_golden_ctxt_size(guc); + return PAGE_ALIGN(offset); } @@ -319,53 +340,166 @@ static void guc_mmio_reg_state_init(struct intel_guc *guc, GEM_BUG_ON(temp_set.size); } -/* - * The first 80 dwords of the register state context, containing the - * execlists and ppgtt registers. - */ -#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32)) +static void fill_engine_enable_masks(struct intel_gt *gt, + struct guc_gt_system_info *info) +{ + info->engine_enabled_masks[GUC_RENDER_CLASS] = 1; + info->engine_enabled_masks[GUC_BLITTER_CLASS] = 1; + info->engine_enabled_masks[GUC_VIDEO_CLASS] = VDBOX_MASK(gt); + info->engine_enabled_masks[GUC_VIDEOENHANCE_CLASS] = VEBOX_MASK(gt); +} -static void __guc_ads_init(struct intel_guc *guc) +static int guc_prep_golden_context(struct intel_guc *guc, + struct __guc_ads_blob *blob) { struct intel_gt *gt = guc_to_gt(guc); - struct drm_i915_private *i915 = gt->i915; + u32 addr_ggtt, offset; + u32 total_size = 0, alloc_size, real_size; + u8 engine_class, guc_class; + struct guc_gt_system_info *info, local_info; + + /* + * Reserve the memory for the golden contexts and point GuC at it but + * leave it empty for now. The context data will be filled in later + * once there is something available to put there. + * + * Note that the HWSP and ring context are not included. + * + * Note also that the storage must be pinned in the GGTT, so that the + * address won't change after GuC has been told where to find it. The + * GuC will also validate that the LRC base + size fall within the + * allowed GGTT range. + */ + if (blob) { + offset = guc_ads_golden_ctxt_offset(guc); + addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset; + info = &blob->system_info; + } else { + memset(&local_info, 0, sizeof(local_info)); + info = &local_info; + fill_engine_enable_masks(gt, info); + } + + for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) { + if (engine_class == OTHER_CLASS) + continue; + + guc_class = engine_class_to_guc_class(engine_class); + + if (!info->engine_enabled_masks[guc_class]) + continue; + + real_size = intel_engine_context_size(gt, engine_class); + alloc_size = PAGE_ALIGN(real_size); + total_size += alloc_size; + + if (!blob) + continue; + + blob->ads.eng_state_size[guc_class] = real_size; + blob->ads.golden_context_lrca[guc_class] = addr_ggtt; + addr_ggtt += alloc_size; + } + + if (!blob) + return total_size; + + GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size); + return total_size; +} + +static struct intel_engine_cs *find_engine_state(struct intel_gt *gt, u8 engine_class) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) { + if (engine->class != engine_class) + continue; + + if (!engine->default_state) + continue; + + return engine; + } + + return NULL; +} + +static void guc_init_golden_context(struct intel_guc *guc) +{ struct __guc_ads_blob *blob = guc->ads_blob; - const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; - u32 base; + struct intel_engine_cs *engine; + struct intel_gt *gt = guc_to_gt(guc); + u32 addr_ggtt, offset; + u32 total_size = 0, alloc_size, real_size; u8 engine_class, guc_class; + u8 *ptr; - /* GuC scheduling policies */ - guc_policies_init(guc, &blob->policies); + /* Skip execlist and PPGTT registers + HWSP */ + const u32 lr_hw_context_size = 80 * sizeof(u32); + const u32 skip_size = LRC_PPHWSP_SZ * PAGE_SIZE + + lr_hw_context_size; + + if (!intel_uc_uses_guc_submission(>->uc)) + return; + + GEM_BUG_ON(!blob); /* - * GuC expects a per-engine-class context image and size - * (minus hwsp and ring context). The context image will be - * used to reinitialize engines after a reset. It must exist - * and be pinned in the GGTT, so that the address won't change after - * we have told GuC where to find it. The context size will be used - * to validate that the LRC base + size fall within allowed GGTT. + * Go back and fill in the golden context data now that it is + * available. */ + offset = guc_ads_golden_ctxt_offset(guc); + addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset; + ptr = ((u8 *)blob) + offset; + for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) { if (engine_class == OTHER_CLASS) continue; guc_class = engine_class_to_guc_class(engine_class); - /* - * TODO: Set context pointer to default state to allow - * GuC to re-init guilty contexts after internal reset. - */ - blob->ads.golden_context_lrca[guc_class] = 0; - blob->ads.eng_state_size[guc_class] = - intel_engine_context_size(gt, engine_class) - - skipped_size; + if (!blob->system_info.engine_enabled_masks[guc_class]) + continue; + + real_size = intel_engine_context_size(gt, engine_class); + alloc_size = PAGE_ALIGN(real_size); + total_size += alloc_size; + + engine = find_engine_state(gt, engine_class); + if (!engine) { + drm_err(>->i915->drm, "No engine state recorded for class %d!\n", + engine_class); + blob->ads.eng_state_size[guc_class] = 0; + blob->ads.golden_context_lrca[guc_class] = 0; + continue; + } + + GEM_BUG_ON(blob->ads.eng_state_size[guc_class] != real_size); + GEM_BUG_ON(blob->ads.golden_context_lrca[guc_class] != addr_ggtt); + addr_ggtt += alloc_size; + + shmem_read(engine->default_state, skip_size, ptr + skip_size, + real_size - skip_size); + ptr += alloc_size; } + GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size); +} + +static void __guc_ads_init(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = gt->i915; + struct __guc_ads_blob *blob = guc->ads_blob; + u32 base; + + /* GuC scheduling policies */ + guc_policies_init(guc, &blob->policies); + /* System info */ - blob->system_info.engine_enabled_masks[GUC_RENDER_CLASS] = 1; - blob->system_info.engine_enabled_masks[GUC_BLITTER_CLASS] = 1; - blob->system_info.engine_enabled_masks[GUC_VIDEO_CLASS] = VDBOX_MASK(gt); - blob->system_info.engine_enabled_masks[GUC_VIDEOENHANCE_CLASS] = VEBOX_MASK(gt); + fill_engine_enable_masks(gt, &blob->system_info); blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED] = hweight8(gt->info.sseu.slice_mask); @@ -380,6 +514,9 @@ static void __guc_ads_init(struct intel_guc *guc) GEN12_DOORBELLS_PER_SQIDI) + 1; } + /* Golden contexts for re-initialising after a watchdog reset */ + guc_prep_golden_context(guc, blob); + guc_mapping_table_init(guc_to_gt(guc), &blob->system_info); base = intel_guc_ggtt_offset(guc, guc->ads_vma); @@ -417,6 +554,13 @@ int intel_guc_ads_create(struct intel_guc *guc) return ret; guc->ads_regset_size = ret; + /* Likewise the golden contexts: */ + ret = guc_prep_golden_context(guc, NULL); + if (ret < 0) + return ret; + guc->ads_golden_ctxt_size = ret; + + /* Now the total size can be determined: */ size = guc_ads_blob_size(guc); ret = intel_guc_allocate_and_map_vma(guc, size, &guc->ads_vma, @@ -429,6 +573,18 @@ int intel_guc_ads_create(struct intel_guc *guc) return 0; } +void intel_guc_ads_init_late(struct intel_guc *guc) +{ + /* + * The golden context setup requires the saved engine state from + * __engines_record_defaults(). However, that requires engines to be + * operational which means the ADS must already have been configured. + * Fortunately, the golden context state is not needed until a hang + * occurs, so it can be filled in during this late init phase. + */ + guc_init_golden_context(guc); +} + void intel_guc_ads_destroy(struct intel_guc *guc) { i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h index bdcb339a5321..3d85051d57e4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h @@ -11,6 +11,7 @@ struct drm_printer; int intel_guc_ads_create(struct intel_guc *guc); void intel_guc_ads_destroy(struct intel_guc *guc); +void intel_guc_ads_init_late(struct intel_guc *guc); void intel_guc_ads_reset(struct intel_guc *guc); void intel_guc_ads_print_policy_info(struct intel_guc *guc, struct drm_printer *p); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 77c1fe2ed883..7a69c3c027e9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -120,6 +120,11 @@ void intel_uc_init_early(struct intel_uc *uc) uc->ops = &uc_ops_off; } +void intel_uc_init_late(struct intel_uc *uc) +{ + intel_guc_init_late(&uc->guc); +} + void intel_uc_driver_late_release(struct intel_uc *uc) { } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index 91315e3f1c58..e2da2b6e76e1 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -35,6 +35,7 @@ struct intel_uc { }; void intel_uc_init_early(struct intel_uc *uc); +void intel_uc_init_late(struct intel_uc *uc); void intel_uc_driver_late_release(struct intel_uc *uc); void intel_uc_driver_remove(struct intel_uc *uc); void intel_uc_init_mmio(struct intel_uc *uc); -- cgit v1.2.3 From dff0fc4990929858eccab824bd310e7fb4bb20ee Mon Sep 17 00:00:00 2001 From: Vinay Belgaumkar Date: Fri, 30 Jul 2021 13:21:06 -0700 Subject: drm/i915/guc/slpc: Initial definitions for SLPC Add macros to check for SLPC support. This feature is currently supported for Gen12+ and enabled whenever GuC submission is enabled/selected. Include templates for SLPC init/fini and enable. v2: Move SLPC helper functions to intel_guc_slpc.c/.h. Define basic template for SLPC structure in intel_guc_slpc_types.h. Fix copyright (Michal W) v3: Review comments (Michal W) v4: Include supported/selected inside slpc struct (Michal W) Reviewed-by: Michal Wajdeczko Signed-off-by: Vinay Belgaumkar Signed-off-by: Sundaresan Sujaritha Signed-off-by: Daniele Ceraolo Spurio Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20210730202119.23810-2-vinay.belgaumkar@intel.com --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gt/uc/intel_guc.c | 2 + drivers/gpu/drm/i915/gt/uc/intel_guc.h | 2 + drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c | 45 +++++++++++++++++++++++ drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h | 33 +++++++++++++++++ drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h | 16 ++++++++ drivers/gpu/drm/i915/gt/uc/intel_uc.c | 6 ++- drivers/gpu/drm/i915/gt/uc/intel_uc.h | 2 + 8 files changed, 105 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h (limited to 'drivers/gpu/drm/i915/gt/uc/intel_guc.c') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index a985c92c8c19..134b0edf4a2e 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -186,6 +186,7 @@ i915-y += gt/uc/intel_uc.o \ gt/uc/intel_guc_fw.o \ gt/uc/intel_guc_log.o \ gt/uc/intel_guc_log_debugfs.o \ + gt/uc/intel_guc_slpc.o \ gt/uc/intel_guc_submission.o \ gt/uc/intel_huc.o \ gt/uc/intel_huc_debugfs.o \ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 979128e28372..39bc3c16057b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -7,6 +7,7 @@ #include "gt/intel_gt_irq.h" #include "gt/intel_gt_pm_irq.h" #include "intel_guc.h" +#include "intel_guc_slpc.h" #include "intel_guc_ads.h" #include "intel_guc_submission.h" #include "i915_drv.h" @@ -157,6 +158,7 @@ void intel_guc_init_early(struct intel_guc *guc) intel_guc_ct_init_early(&guc->ct); intel_guc_log_init_early(&guc->log); intel_guc_submission_init_early(guc); + intel_guc_slpc_init_early(&guc->slpc); mutex_init(&guc->send_mutex); spin_lock_init(&guc->irq_lock); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index a9547069ee7e..7da11a0b6059 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -15,6 +15,7 @@ #include "intel_guc_ct.h" #include "intel_guc_log.h" #include "intel_guc_reg.h" +#include "intel_guc_slpc_types.h" #include "intel_uc_fw.h" #include "i915_utils.h" #include "i915_vma.h" @@ -30,6 +31,7 @@ struct intel_guc { struct intel_uc_fw fw; struct intel_guc_log log; struct intel_guc_ct ct; + struct intel_guc_slpc slpc; /* Global engine used to submit requests to GuC */ struct i915_sched_engine *sched_engine; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c new file mode 100644 index 000000000000..40950f1bf05c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_guc_slpc.h" +#include "gt/intel_gt.h" + +static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc) +{ + return container_of(slpc, struct intel_guc, slpc); +} + +static bool __detect_slpc_supported(struct intel_guc *guc) +{ + /* GuC SLPC is unavailable for pre-Gen12 */ + return guc->submission_supported && + GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12; +} + +static bool __guc_slpc_selected(struct intel_guc *guc) +{ + if (!intel_guc_slpc_is_supported(guc)) + return false; + + return guc->submission_selected; +} + +void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc) +{ + struct intel_guc *guc = slpc_to_guc(slpc); + + slpc->supported = __detect_slpc_supported(guc); + slpc->selected = __guc_slpc_selected(guc); +} + +int intel_guc_slpc_init(struct intel_guc_slpc *slpc) +{ + return 0; +} + +void intel_guc_slpc_fini(struct intel_guc_slpc *slpc) +{ +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h new file mode 100644 index 000000000000..bc139682ad0f --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _INTEL_GUC_SLPC_H_ +#define _INTEL_GUC_SLPC_H_ + +#include "intel_guc_submission.h" +#include "intel_guc_slpc_types.h" + +static inline bool intel_guc_slpc_is_supported(struct intel_guc *guc) +{ + return guc->slpc.supported; +} + +static inline bool intel_guc_slpc_is_wanted(struct intel_guc *guc) +{ + return guc->slpc.selected; +} + +static inline bool intel_guc_slpc_is_used(struct intel_guc *guc) +{ + return intel_guc_submission_is_used(guc) && intel_guc_slpc_is_wanted(guc); +} + +void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc); + +int intel_guc_slpc_init(struct intel_guc_slpc *slpc); +int intel_guc_slpc_enable(struct intel_guc_slpc *slpc); +void intel_guc_slpc_fini(struct intel_guc_slpc *slpc); + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h new file mode 100644 index 000000000000..769c162305a0 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _INTEL_GUC_SLPC_TYPES_H_ +#define _INTEL_GUC_SLPC_TYPES_H_ + +#include + +struct intel_guc_slpc { + bool supported; + bool selected; +}; + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index da57d18d9f6b..e6bd9406c7b2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -75,16 +75,18 @@ static void __confirm_options(struct intel_uc *uc) struct drm_i915_private *i915 = uc_to_gt(uc)->i915; drm_dbg(&i915->drm, - "enable_guc=%d (guc:%s submission:%s huc:%s)\n", + "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n", i915->params.enable_guc, yesno(intel_uc_wants_guc(uc)), yesno(intel_uc_wants_guc_submission(uc)), - yesno(intel_uc_wants_huc(uc))); + yesno(intel_uc_wants_huc(uc)), + yesno(intel_uc_wants_guc_slpc(uc))); if (i915->params.enable_guc == 0) { GEM_BUG_ON(intel_uc_wants_guc(uc)); GEM_BUG_ON(intel_uc_wants_guc_submission(uc)); GEM_BUG_ON(intel_uc_wants_huc(uc)); + GEM_BUG_ON(intel_uc_wants_guc_slpc(uc)); return; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index e2da2b6e76e1..925a58ca6b94 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -8,6 +8,7 @@ #include "intel_guc.h" #include "intel_guc_submission.h" +#include "intel_guc_slpc.h" #include "intel_huc.h" #include "i915_params.h" @@ -83,6 +84,7 @@ __uc_state_checker(x, func, uses, used) uc_state_checkers(guc, guc); uc_state_checkers(huc, huc); uc_state_checkers(guc, guc_submission); +uc_state_checkers(guc, guc_slpc); #undef uc_state_checkers #undef __uc_state_checker -- cgit v1.2.3 From 7695d08f1e30f361502e9c19b6d330234d4ca755 Mon Sep 17 00:00:00 2001 From: Vinay Belgaumkar Date: Fri, 30 Jul 2021 13:21:08 -0700 Subject: drm/i915/guc/slpc: Adding SLPC communication interfaces Add constants and params that are needed to configure SLPC. v2: Add a new abi header for SLPC. Replace bitfields with genmasks. Address other comments from Michal W. v3: Add slpc H2G format in abi, other review commments (Michal W) v4: Update status bits according to latest spec v5: checkpatch() Reviewed-by: Michal Wajdeczko Signed-off-by: Vinay Belgaumkar Signed-off-by: Sundaresan Sujaritha Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20210730202119.23810-4-vinay.belgaumkar@intel.com --- drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h | 1 - .../gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h | 235 +++++++++++++++++++++ drivers/gpu/drm/i915/gt/uc/intel_guc.c | 3 + drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 7 + 4 files changed, 245 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h (limited to 'drivers/gpu/drm/i915/gt/uc/intel_guc.c') diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h index d832c8f11c11..ca538e5de940 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h @@ -135,7 +135,6 @@ enum intel_guc_action { INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007, INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008, INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009, - INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003, INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000, INTEL_GUC_ACTION_REGISTER_CONTEXT = 0x4502, INTEL_GUC_ACTION_DEREGISTER_CONTEXT = 0x4503, diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h new file mode 100644 index 000000000000..7a8d4bfc5f6a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h @@ -0,0 +1,235 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _GUC_ACTIONS_SLPC_ABI_H_ +#define _GUC_ACTIONS_SLPC_ABI_H_ + +#include +#include "i915_reg.h" + +/** + * DOC: SLPC SHARED DATA STRUCTURE + * + * +----+------+--------------------------------------------------------------+ + * | CL | Bytes| Description | + * +====+======+==============================================================+ + * | 1 | 0-3 | SHARED DATA SIZE | + * | +------+--------------------------------------------------------------+ + * | | 4-7 | GLOBAL STATE | + * | +------+--------------------------------------------------------------+ + * | | 8-11 | DISPLAY DATA ADDRESS | + * | +------+--------------------------------------------------------------+ + * | | 12:63| PADDING | + * +----+------+--------------------------------------------------------------+ + * | | 0:63 | PADDING(PLATFORM INFO) | + * +----+------+--------------------------------------------------------------+ + * | 3 | 0-3 | TASK STATE DATA | + * + +------+--------------------------------------------------------------+ + * | | 4:63 | PADDING | + * +----+------+--------------------------------------------------------------+ + * |4-21|0:1087| OVERRIDE PARAMS AND BIT FIELDS | + * +----+------+--------------------------------------------------------------+ + * | | | PADDING + EXTRA RESERVED PAGE | + * +----+------+--------------------------------------------------------------+ + */ + +/* + * SLPC exposes certain parameters for global configuration by the host. + * These are referred to as override parameters, because in most cases + * the host will not need to modify the default values used by SLPC. + * SLPC remembers the default values which allows the host to easily restore + * them by simply unsetting the override. The host can set or unset override + * parameters during SLPC (re-)initialization using the SLPC Reset event. + * The host can also set or unset override parameters on the fly using the + * Parameter Set and Parameter Unset events + */ + +#define SLPC_MAX_OVERRIDE_PARAMETERS 256 +#define SLPC_OVERRIDE_BITFIELD_SIZE \ + (SLPC_MAX_OVERRIDE_PARAMETERS / 32) + +#define SLPC_PAGE_SIZE_BYTES 4096 +#define SLPC_CACHELINE_SIZE_BYTES 64 +#define SLPC_SHARED_DATA_SIZE_BYTE_HEADER SLPC_CACHELINE_SIZE_BYTES +#define SLPC_SHARED_DATA_SIZE_BYTE_PLATFORM_INFO SLPC_CACHELINE_SIZE_BYTES +#define SLPC_SHARED_DATA_SIZE_BYTE_TASK_STATE SLPC_CACHELINE_SIZE_BYTES +#define SLPC_SHARED_DATA_MODE_DEFN_TABLE_SIZE SLPC_PAGE_SIZE_BYTES +#define SLPC_SHARED_DATA_SIZE_BYTE_MAX (2 * SLPC_PAGE_SIZE_BYTES) + +/* + * Cacheline size aligned (Total size needed for + * SLPM_KMD_MAX_OVERRIDE_PARAMETERS=256 is 1088 bytes) + */ +#define SLPC_OVERRIDE_PARAMS_TOTAL_BYTES (((((SLPC_MAX_OVERRIDE_PARAMETERS * 4) \ + + ((SLPC_MAX_OVERRIDE_PARAMETERS / 32) * 4)) \ + + (SLPC_CACHELINE_SIZE_BYTES - 1)) / SLPC_CACHELINE_SIZE_BYTES) * \ + SLPC_CACHELINE_SIZE_BYTES) + +#define SLPC_SHARED_DATA_SIZE_BYTE_OTHER (SLPC_SHARED_DATA_SIZE_BYTE_MAX - \ + (SLPC_SHARED_DATA_SIZE_BYTE_HEADER \ + + SLPC_SHARED_DATA_SIZE_BYTE_PLATFORM_INFO \ + + SLPC_SHARED_DATA_SIZE_BYTE_TASK_STATE \ + + SLPC_OVERRIDE_PARAMS_TOTAL_BYTES \ + + SLPC_SHARED_DATA_MODE_DEFN_TABLE_SIZE)) + +enum slpc_task_enable { + SLPC_PARAM_TASK_DEFAULT = 0, + SLPC_PARAM_TASK_ENABLED, + SLPC_PARAM_TASK_DISABLED, + SLPC_PARAM_TASK_UNKNOWN +}; + +enum slpc_global_state { + SLPC_GLOBAL_STATE_NOT_RUNNING = 0, + SLPC_GLOBAL_STATE_INITIALIZING = 1, + SLPC_GLOBAL_STATE_RESETTING = 2, + SLPC_GLOBAL_STATE_RUNNING = 3, + SLPC_GLOBAL_STATE_SHUTTING_DOWN = 4, + SLPC_GLOBAL_STATE_ERROR = 5 +}; + +enum slpc_param_id { + SLPC_PARAM_TASK_ENABLE_GTPERF = 0, + SLPC_PARAM_TASK_DISABLE_GTPERF = 1, + SLPC_PARAM_TASK_ENABLE_BALANCER = 2, + SLPC_PARAM_TASK_DISABLE_BALANCER = 3, + SLPC_PARAM_TASK_ENABLE_DCC = 4, + SLPC_PARAM_TASK_DISABLE_DCC = 5, + SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ = 6, + SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ = 7, + SLPC_PARAM_GLOBAL_MIN_GT_SLICE_FREQ_MHZ = 8, + SLPC_PARAM_GLOBAL_MAX_GT_SLICE_FREQ_MHZ = 9, + SLPC_PARAM_GTPERF_THRESHOLD_MAX_FPS = 10, + SLPC_PARAM_GLOBAL_DISABLE_GT_FREQ_MANAGEMENT = 11, + SLPC_PARAM_GTPERF_ENABLE_FRAMERATE_STALLING = 12, + SLPC_PARAM_GLOBAL_DISABLE_RC6_MODE_CHANGE = 13, + SLPC_PARAM_GLOBAL_OC_UNSLICE_FREQ_MHZ = 14, + SLPC_PARAM_GLOBAL_OC_SLICE_FREQ_MHZ = 15, + SLPC_PARAM_GLOBAL_ENABLE_IA_GT_BALANCING = 16, + SLPC_PARAM_GLOBAL_ENABLE_ADAPTIVE_BURST_TURBO = 17, + SLPC_PARAM_GLOBAL_ENABLE_EVAL_MODE = 18, + SLPC_PARAM_GLOBAL_ENABLE_BALANCER_IN_NON_GAMING_MODE = 19, + SLPC_PARAM_GLOBAL_RT_MODE_TURBO_FREQ_DELTA_MHZ = 20, + SLPC_PARAM_PWRGATE_RC_MODE = 21, + SLPC_PARAM_EDR_MODE_COMPUTE_TIMEOUT_MS = 22, + SLPC_PARAM_EDR_QOS_FREQ_MHZ = 23, + SLPC_PARAM_MEDIA_FF_RATIO_MODE = 24, + SLPC_PARAM_ENABLE_IA_FREQ_LIMITING = 25, + SLPC_PARAM_STRATEGIES = 26, + SLPC_PARAM_POWER_PROFILE = 27, + SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY = 28, + SLPC_MAX_PARAM = 32, +}; + +enum slpc_event_id { + SLPC_EVENT_RESET = 0, + SLPC_EVENT_SHUTDOWN = 1, + SLPC_EVENT_PLATFORM_INFO_CHANGE = 2, + SLPC_EVENT_DISPLAY_MODE_CHANGE = 3, + SLPC_EVENT_FLIP_COMPLETE = 4, + SLPC_EVENT_QUERY_TASK_STATE = 5, + SLPC_EVENT_PARAMETER_SET = 6, + SLPC_EVENT_PARAMETER_UNSET = 7, +}; + +struct slpc_task_state_data { + union { + u32 task_status_padding; + struct { + u32 status; +#define SLPC_GTPERF_TASK_ENABLED REG_BIT(0) +#define SLPC_DCC_TASK_ENABLED REG_BIT(11) +#define SLPC_IN_DCC REG_BIT(12) +#define SLPC_BALANCER_ENABLED REG_BIT(15) +#define SLPC_IBC_TASK_ENABLED REG_BIT(16) +#define SLPC_BALANCER_IA_LMT_ENABLED REG_BIT(17) +#define SLPC_BALANCER_IA_LMT_ACTIVE REG_BIT(18) + }; + }; + union { + u32 freq_padding; + struct { +#define SLPC_MAX_UNSLICE_FREQ_MASK REG_GENMASK(7, 0) +#define SLPC_MIN_UNSLICE_FREQ_MASK REG_GENMASK(15, 8) +#define SLPC_MAX_SLICE_FREQ_MASK REG_GENMASK(23, 16) +#define SLPC_MIN_SLICE_FREQ_MASK REG_GENMASK(31, 24) + u32 freq; + }; + }; +} __packed; + +struct slpc_shared_data_header { + /* Total size in bytes of this shared buffer. */ + u32 size; + u32 global_state; + u32 display_data_addr; +} __packed; + +struct slpc_override_params { + u32 bits[SLPC_OVERRIDE_BITFIELD_SIZE]; + u32 values[SLPC_MAX_OVERRIDE_PARAMETERS]; +} __packed; + +struct slpc_shared_data { + struct slpc_shared_data_header header; + u8 shared_data_header_pad[SLPC_SHARED_DATA_SIZE_BYTE_HEADER - + sizeof(struct slpc_shared_data_header)]; + + u8 platform_info_pad[SLPC_SHARED_DATA_SIZE_BYTE_PLATFORM_INFO]; + + struct slpc_task_state_data task_state_data; + u8 task_state_data_pad[SLPC_SHARED_DATA_SIZE_BYTE_TASK_STATE - + sizeof(struct slpc_task_state_data)]; + + struct slpc_override_params override_params; + u8 override_params_pad[SLPC_OVERRIDE_PARAMS_TOTAL_BYTES - + sizeof(struct slpc_override_params)]; + + u8 shared_data_pad[SLPC_SHARED_DATA_SIZE_BYTE_OTHER]; + + /* PAGE 2 (4096 bytes), mode based parameter will be removed soon */ + u8 reserved_mode_definition[4096]; +} __packed; + +/** + * DOC: SLPC H2G MESSAGE FORMAT + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | DATA0 = MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_HOST2GUC_PC_SLPM_REQUEST` = 0x3003 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:8 | **EVENT_ID** | + * + +-------+--------------------------------------------------------------+ + * | | 7:0 | **EVENT_ARGC** - number of data arguments | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **EVENT_DATA1** | + * +---+-------+--------------------------------------------------------------+ + * |...| 31:0 | ... | + * +---+-------+--------------------------------------------------------------+ + * |2+n| 31:0 | **EVENT_DATAn** | + * +---+-------+--------------------------------------------------------------+ + */ + +#define GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST 0x3003 + +#define HOST2GUC_PC_SLPC_REQUEST_MSG_MIN_LEN \ + (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u) +#define HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS 9 +#define HOST2GUC_PC_SLPC_REQUEST_MSG_MAX_LEN \ + (HOST2GUC_PC_SLPC_REQUEST_REQUEST_MSG_MIN_LEN + \ + HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS) +#define HOST2GUC_PC_SLPC_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 +#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID (0xff << 8) +#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC (0xff << 0) +#define HOST2GUC_PC_SLPC_REQUEST_MSG_N_EVENT_DATA_N GUC_HXG_REQUEST_MSG_n_DATAn + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 39bc3c16057b..5b0f8c541b69 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -208,6 +208,9 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc) if (!intel_guc_submission_is_used(guc)) flags |= GUC_CTL_DISABLE_SCHEDULER; + if (intel_guc_slpc_is_used(guc)) + flags |= GUC_CTL_ENABLE_SLPC; + return flags; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 82534259b7ad..fa4be13c8854 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -12,6 +12,7 @@ #include "gt/intel_engine_types.h" #include "abi/guc_actions_abi.h" +#include "abi/guc_actions_slpc_abi.h" #include "abi/guc_errors_abi.h" #include "abi/guc_communication_mmio_abi.h" #include "abi/guc_communication_ctb_abi.h" @@ -95,6 +96,7 @@ #define GUC_CTL_WA 1 #define GUC_CTL_FEATURE 2 #define GUC_CTL_DISABLE_SCHEDULER (1 << 14) +#define GUC_CTL_ENABLE_SLPC BIT(2) #define GUC_CTL_DEBUG 3 #define GUC_LOG_VERBOSITY_SHIFT 0 @@ -141,6 +143,11 @@ #define GUC_ID_TO_ENGINE_INSTANCE(guc_id) \ (((guc_id) & GUC_ENGINE_INSTANCE_MASK) >> GUC_ENGINE_INSTANCE_SHIFT) +#define SLPC_EVENT(id, c) (\ +FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \ +FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, c) \ +) + static inline u8 engine_class_to_guc_class(u8 class) { BUILD_BUG_ON(GUC_RENDER_CLASS != RENDER_CLASS); -- cgit v1.2.3 From 869cd27ece296be710d2039ba930abea3185268f Mon Sep 17 00:00:00 2001 From: Vinay Belgaumkar Date: Fri, 30 Jul 2021 13:21:09 -0700 Subject: drm/i915/guc/slpc: Allocate, initialize and release SLPC Allocate data structures for SLPC and functions for initializing on host side. v2: Address review comments (Michal W) v3: Remove unnecessary header includes (Michal W) v4: Rebase v5: Move allocation of shared data into slpc_init() (Michal W) Reviewed-by: Michal Wajdeczko Signed-off-by: Vinay Belgaumkar Signed-off-by: Sundaresan Sujaritha Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20210730202119.23810-5-vinay.belgaumkar@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc.c | 11 ++++++++ drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c | 31 ++++++++++++++++++++++- drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h | 2 ++ 3 files changed, 43 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gt/uc/intel_guc.c') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 5b0f8c541b69..13d162353b1a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -336,6 +336,12 @@ int intel_guc_init(struct intel_guc *guc) goto err_ct; } + if (intel_guc_slpc_is_used(guc)) { + ret = intel_guc_slpc_init(&guc->slpc); + if (ret) + goto err_submission; + } + /* now that everything is perma-pinned, initialize the parameters */ guc_init_params(guc); @@ -346,6 +352,8 @@ int intel_guc_init(struct intel_guc *guc) return 0; +err_submission: + intel_guc_submission_fini(guc); err_ct: intel_guc_ct_fini(&guc->ct); err_ads: @@ -368,6 +376,9 @@ void intel_guc_fini(struct intel_guc *guc) i915_ggtt_disable_guc(gt->ggtt); + if (intel_guc_slpc_is_used(guc)) + intel_guc_slpc_fini(&guc->slpc); + if (intel_guc_submission_is_used(guc)) intel_guc_submission_fini(guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 40950f1bf05c..377e09187e9f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -12,6 +12,16 @@ static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc) return container_of(slpc, struct intel_guc, slpc); } +static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc) +{ + return guc_to_gt(slpc_to_guc(slpc)); +} + +static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc) +{ + return slpc_to_gt(slpc)->i915; +} + static bool __detect_slpc_supported(struct intel_guc *guc) { /* GuC SLPC is unavailable for pre-Gen12 */ @@ -37,9 +47,28 @@ void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc) int intel_guc_slpc_init(struct intel_guc_slpc *slpc) { - return 0; + struct intel_guc *guc = slpc_to_guc(slpc); + struct drm_i915_private *i915 = slpc_to_i915(slpc); + u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); + int err; + + GEM_BUG_ON(slpc->vma); + + err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); + if (unlikely(err)) { + drm_err(&i915->drm, + "Failed to allocate SLPC struct (err=%pe)\n", + ERR_PTR(err)); + return err; + } + + return err; } void intel_guc_slpc_fini(struct intel_guc_slpc *slpc) { + if (!slpc->vma) + return; + + i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h index 769c162305a0..8bd753167234 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h @@ -9,6 +9,8 @@ #include struct intel_guc_slpc { + struct i915_vma *vma; + struct slpc_shared_data *vaddr; bool supported; bool selected; }; -- cgit v1.2.3 From 216d56c5da5c93cb2ae91b911ad6ccfc33e03bee Mon Sep 17 00:00:00 2001 From: Vinay Belgaumkar Date: Fri, 30 Jul 2021 13:21:19 -0700 Subject: drm/i915/guc/rc: Setup and enable GuCRC feature This feature hands over the control of HW RC6 to the GuC. GuC decides when to put HW into RC6 based on it's internal busyness algorithms. GuCRC needs GuC submission to be enabled, and only supported on Gen12+ for now. When GuCRC is enabled, do not set HW RC6. Use a H2G message to tell GuC to enable GuCRC. When disabling RC6, tell GuC to revert RC6 control back to KMD. KMD is still responsible for enabling everything related to Coarse Power Gating though. v2: Address comments (Michal W) v3: Don't set hysterisis values when GuCRC is used (Matt Roper) v4: checkpatch() Reviewed-by: Michal Wajdeczko Signed-off-by: Vinay Belgaumkar Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20210730202119.23810-15-vinay.belgaumkar@intel.com --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gt/intel_rc6.c | 47 +++++++++----- drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h | 6 ++ drivers/gpu/drm/i915/gt/uc/intel_guc.c | 1 + drivers/gpu/drm/i915/gt/uc/intel_guc.h | 2 + drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c | 80 ++++++++++++++++++++++++ drivers/gpu/drm/i915/gt/uc/intel_guc_rc.h | 31 +++++++++ drivers/gpu/drm/i915/gt/uc/intel_uc.h | 2 + 8 files changed, 155 insertions(+), 15 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_rc.h (limited to 'drivers/gpu/drm/i915/gt/uc/intel_guc.c') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 134b0edf4a2e..949719ba048e 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -186,6 +186,7 @@ i915-y += gt/uc/intel_uc.o \ gt/uc/intel_guc_fw.o \ gt/uc/intel_guc_log.o \ gt/uc/intel_guc_log_debugfs.o \ + gt/uc/intel_guc_rc.o \ gt/uc/intel_guc_slpc.o \ gt/uc/intel_guc_submission.o \ gt/uc/intel_huc.o \ diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index a7d13fe35b2e..799d382eea79 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -62,20 +62,25 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) u32 pg_enable; int i; - /* 2b: Program RC6 thresholds.*/ - set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85); - set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150); + /* + * With GuCRC, these parameters are set by GuC + */ + if (!intel_uc_uses_guc_rc(>->uc)) { + /* 2b: Program RC6 thresholds.*/ + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85); + set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150); - set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ - set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_engine(engine, rc6_to_gt(rc6), id) - set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + for_each_engine(engine, rc6_to_gt(rc6), id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); - set(uncore, GUC_MAX_IDLE_COUNT, 0xA); + set(uncore, GUC_MAX_IDLE_COUNT, 0xA); - set(uncore, GEN6_RC_SLEEP, 0); + set(uncore, GEN6_RC_SLEEP, 0); - set(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ + set(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ + } /* * 2c: Program Coarse Power Gating Policies. @@ -98,11 +103,19 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 60); set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 60); - /* 3a: Enable RC6 */ - rc6->ctl_enable = - GEN6_RC_CTL_HW_ENABLE | - GEN6_RC_CTL_RC6_ENABLE | - GEN6_RC_CTL_EI_MODE(1); + /* 3a: Enable RC6 + * + * With GuCRC, we do not enable bit 31 of RC_CTL, + * thus allowing GuC to control RC6 entry/exit fully instead. + * We will not set the HW ENABLE and EI bits + */ + if (!intel_guc_rc_enable(>->uc.guc)) + rc6->ctl_enable = GEN6_RC_CTL_RC6_ENABLE; + else + rc6->ctl_enable = + GEN6_RC_CTL_HW_ENABLE | + GEN6_RC_CTL_RC6_ENABLE | + GEN6_RC_CTL_EI_MODE(1); pg_enable = GEN9_RENDER_PG_ENABLE | @@ -513,6 +526,10 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6) { struct drm_i915_private *i915 = rc6_to_i915(rc6); struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_gt *gt = rc6_to_gt(rc6); + + /* Take control of RC6 back from GuC */ + intel_guc_rc_disable(>->uc.guc); intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); if (GRAPHICS_VER(i915) >= 9) diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h index ca538e5de940..8ff582222aff 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h @@ -135,6 +135,7 @@ enum intel_guc_action { INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007, INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008, INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009, + INTEL_GUC_ACTION_SETUP_PC_GUCRC = 0x3004, INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000, INTEL_GUC_ACTION_REGISTER_CONTEXT = 0x4502, INTEL_GUC_ACTION_DEREGISTER_CONTEXT = 0x4503, @@ -145,6 +146,11 @@ enum intel_guc_action { INTEL_GUC_ACTION_LIMIT }; +enum intel_guc_rc_options { + INTEL_GUCRC_HOST_CONTROL, + INTEL_GUCRC_FIRMWARE_CONTROL, +}; + enum intel_guc_preempt_options { INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4, INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8, diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 13d162353b1a..fbfcae727d7f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -159,6 +159,7 @@ void intel_guc_init_early(struct intel_guc *guc) intel_guc_log_init_early(&guc->log); intel_guc_submission_init_early(guc); intel_guc_slpc_init_early(&guc->slpc); + intel_guc_rc_init_early(guc); mutex_init(&guc->send_mutex); spin_lock_init(&guc->irq_lock); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 7da11a0b6059..2e27fe59786b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -59,6 +59,8 @@ struct intel_guc { bool submission_supported; bool submission_selected; + bool rc_supported; + bool rc_selected; struct i915_vma *ads_vma; struct __guc_ads_blob *ads_blob; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c new file mode 100644 index 000000000000..fc805d466d99 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "intel_guc_rc.h" +#include "gt/intel_gt.h" +#include "i915_drv.h" + +static bool __guc_rc_supported(struct intel_guc *guc) +{ + /* GuC RC is unavailable for pre-Gen12 */ + return guc->submission_supported && + GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12; +} + +static bool __guc_rc_selected(struct intel_guc *guc) +{ + if (!intel_guc_rc_is_supported(guc)) + return false; + + return guc->submission_selected; +} + +void intel_guc_rc_init_early(struct intel_guc *guc) +{ + guc->rc_supported = __guc_rc_supported(guc); + guc->rc_selected = __guc_rc_selected(guc); +} + +static int guc_action_control_gucrc(struct intel_guc *guc, bool enable) +{ + u32 rc_mode = enable ? INTEL_GUCRC_FIRMWARE_CONTROL : + INTEL_GUCRC_HOST_CONTROL; + u32 action[] = { + INTEL_GUC_ACTION_SETUP_PC_GUCRC, + rc_mode + }; + int ret; + + ret = intel_guc_send(guc, action, ARRAY_SIZE(action)); + ret = ret > 0 ? -EPROTO : ret; + + return ret; +} + +static int __guc_rc_control(struct intel_guc *guc, bool enable) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct drm_device *drm = &guc_to_gt(guc)->i915->drm; + int ret; + + if (!intel_uc_uses_guc_rc(>->uc)) + return -EOPNOTSUPP; + + if (!intel_guc_is_ready(guc)) + return -EINVAL; + + ret = guc_action_control_gucrc(guc, enable); + if (ret) { + drm_err(drm, "Failed to %s GuC RC (%pe)\n", + enabledisable(enable), ERR_PTR(ret)); + return ret; + } + + drm_info(>->i915->drm, "GuC RC: %s\n", + enableddisabled(enable)); + + return 0; +} + +int intel_guc_rc_enable(struct intel_guc *guc) +{ + return __guc_rc_control(guc, true); +} + +int intel_guc_rc_disable(struct intel_guc *guc) +{ + return __guc_rc_control(guc, false); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.h new file mode 100644 index 000000000000..57e86c337838 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _INTEL_GUC_RC_H_ +#define _INTEL_GUC_RC_H_ + +#include "intel_guc_submission.h" + +void intel_guc_rc_init_early(struct intel_guc *guc); + +static inline bool intel_guc_rc_is_supported(struct intel_guc *guc) +{ + return guc->rc_supported; +} + +static inline bool intel_guc_rc_is_wanted(struct intel_guc *guc) +{ + return guc->submission_selected && intel_guc_rc_is_supported(guc); +} + +static inline bool intel_guc_rc_is_used(struct intel_guc *guc) +{ + return intel_guc_submission_is_used(guc) && intel_guc_rc_is_wanted(guc); +} + +int intel_guc_rc_enable(struct intel_guc *guc); +int intel_guc_rc_disable(struct intel_guc *guc); + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index 925a58ca6b94..866b462821c0 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -7,6 +7,7 @@ #define _INTEL_UC_H_ #include "intel_guc.h" +#include "intel_guc_rc.h" #include "intel_guc_submission.h" #include "intel_guc_slpc.h" #include "intel_huc.h" @@ -85,6 +86,7 @@ uc_state_checkers(guc, guc); uc_state_checkers(huc, huc); uc_state_checkers(guc, guc_submission); uc_state_checkers(guc, guc_slpc); +uc_state_checkers(guc, guc_rc); #undef uc_state_checkers #undef __uc_state_checker -- cgit v1.2.3