diff options
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
29 files changed, 509 insertions, 4837 deletions
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 90721b54e7ae..1e1f83326a96 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -26,6 +26,7 @@ #include <linux/prime_numbers.h> +#include "igt_gem_utils.h" #include "mock_drm.h" #include "i915_random.h" @@ -980,7 +981,7 @@ static int gpu_write(struct i915_vma *vma, if (IS_ERR(batch)) return PTR_ERR(batch); - rq = i915_request_alloc(engine, ctx); + rq = igt_request_alloc(ctx, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_batch; diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index 27d8f853111b..eee838dc0634 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -46,7 +46,7 @@ static int __live_active_setup(struct drm_i915_private *i915, for_each_engine(engine, i915, id) { struct i915_request *rq; - rq = i915_request_alloc(engine, i915->kernel_context); + rq = i915_request_create(engine->kernel_context); if (IS_ERR(rq)) { err = PTR_ERR(rq); break; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 6fd70d326468..c6a9bff85311 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -8,34 +8,27 @@ #include "../i915_selftest.h" -#include "mock_context.h" +#include "igt_gem_utils.h" #include "igt_flush_test.h" +#include "mock_context.h" static int switch_to_context(struct drm_i915_private *i915, struct i915_gem_context *ctx) { struct intel_engine_cs *engine; enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = 0; - - wakeref = intel_runtime_pm_get(i915); for_each_engine(engine, i915, id) { struct i915_request *rq; - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - break; - } + rq = igt_request_alloc(ctx, engine); + if (IS_ERR(rq)) + return PTR_ERR(rq); i915_request_add(rq); } - intel_runtime_pm_put(i915, wakeref); - - return err; + return 0; } static void trash_stolen(struct drm_i915_private *i915) @@ -120,7 +113,7 @@ static void pm_resume(struct drm_i915_private *i915) * that runtime-pm just works. */ with_intel_runtime_pm(i915, wakeref) { - intel_engines_sanitize(i915, false); + intel_gt_sanitize(i915, false); i915_gem_sanitize(i915); i915_gem_resume(i915); } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index e43630b40fce..046a38743152 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -202,7 +202,7 @@ static int gpu_set(struct drm_i915_gem_object *obj, if (IS_ERR(vma)) return PTR_ERR(vma); - rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context); + rq = i915_request_create(i915->engine[RCS0]->kernel_context); if (IS_ERR(rq)) { i915_vma_unpin(vma); return PTR_ERR(rq); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 4e1b6efc6b22..34ac5cc6d59f 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -24,10 +24,12 @@ #include <linux/prime_numbers.h> -#include "../i915_reset.h" -#include "../i915_selftest.h" +#include "gt/intel_reset.h" +#include "i915_selftest.h" + #include "i915_random.h" #include "igt_flush_test.h" +#include "igt_gem_utils.h" #include "igt_live_test.h" #include "igt_reset.h" #include "igt_spinner.h" @@ -90,7 +92,7 @@ static int live_nop_switch(void *arg) times[0] = ktime_get_raw(); for (n = 0; n < nctx; n++) { - rq = i915_request_alloc(engine, ctx[n]); + rq = igt_request_alloc(ctx[n], engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out_unlock; @@ -120,7 +122,7 @@ static int live_nop_switch(void *arg) times[1] = ktime_get_raw(); for (n = 0; n < prime; n++) { - rq = i915_request_alloc(engine, ctx[n % nctx]); + rq = igt_request_alloc(ctx[n % nctx], engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out_unlock; @@ -300,7 +302,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj, goto err_vma; } - rq = i915_request_alloc(engine, ctx); + rq = igt_request_alloc(ctx, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_batch; @@ -754,8 +756,7 @@ err: static int emit_rpcs_query(struct drm_i915_gem_object *obj, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, + struct intel_context *ce, struct i915_request **rq_out) { struct i915_request *rq; @@ -763,9 +764,9 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, struct i915_vma *vma; int err; - GEM_BUG_ON(!intel_engine_can_store_dword(engine)); + GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); - vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); + vma = i915_vma_instance(obj, &ce->gem_context->ppgtt->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -783,13 +784,15 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, goto err_vma; } - rq = i915_request_alloc(engine, ctx); + rq = i915_request_create(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_batch; } - err = engine->emit_bb_start(rq, batch->node.start, batch->node.size, 0); + err = rq->engine->emit_bb_start(rq, + batch->node.start, batch->node.size, + 0); if (err) goto err_request; @@ -833,8 +836,7 @@ static int __sseu_prepare(struct drm_i915_private *i915, const char *name, unsigned int flags, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, + struct intel_context *ce, struct igt_spinner **spin) { struct i915_request *rq; @@ -852,7 +854,10 @@ __sseu_prepare(struct drm_i915_private *i915, if (ret) goto err_free; - rq = igt_spinner_create_request(*spin, ctx, engine, MI_NOOP); + rq = igt_spinner_create_request(*spin, + ce->gem_context, + ce->engine, + MI_NOOP); if (IS_ERR(rq)) { ret = PTR_ERR(rq); goto err_fini; @@ -879,8 +884,7 @@ err_free: static int __read_slice_count(struct drm_i915_private *i915, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, + struct intel_context *ce, struct drm_i915_gem_object *obj, struct igt_spinner *spin, u32 *rpcs) @@ -891,7 +895,7 @@ __read_slice_count(struct drm_i915_private *i915, u32 *buf, val; long ret; - ret = emit_rpcs_query(obj, ctx, engine, &rq); + ret = emit_rpcs_query(obj, ce, &rq); if (ret) return ret; @@ -955,31 +959,29 @@ static int __sseu_finish(struct drm_i915_private *i915, const char *name, unsigned int flags, - struct i915_gem_context *ctx, - struct i915_gem_context *kctx, - struct intel_engine_cs *engine, + struct intel_context *ce, struct drm_i915_gem_object *obj, unsigned int expected, struct igt_spinner *spin) { - unsigned int slices = - hweight32(intel_device_default_sseu(i915).slice_mask); + unsigned int slices = hweight32(ce->engine->sseu.slice_mask); u32 rpcs = 0; int ret = 0; if (flags & TEST_RESET) { - ret = i915_reset_engine(engine, "sseu"); + ret = i915_reset_engine(ce->engine, "sseu"); if (ret) goto out; } - ret = __read_slice_count(i915, ctx, engine, obj, + ret = __read_slice_count(i915, ce, obj, flags & TEST_RESET ? NULL : spin, &rpcs); ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!"); if (ret) goto out; - ret = __read_slice_count(i915, kctx, engine, obj, NULL, &rpcs); + ret = __read_slice_count(i915, ce->engine->kernel_context, obj, + NULL, &rpcs); ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!"); out: @@ -993,7 +995,7 @@ out: if (ret) return ret; - ret = __read_slice_count(i915, ctx, engine, obj, NULL, &rpcs); + ret = __read_slice_count(i915, ce, obj, NULL, &rpcs); ret = __check_rpcs(name, rpcs, ret, expected, "Context", " after idle!"); } @@ -1005,28 +1007,22 @@ static int __sseu_test(struct drm_i915_private *i915, const char *name, unsigned int flags, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, + struct intel_context *ce, struct drm_i915_gem_object *obj, struct intel_sseu sseu) { struct igt_spinner *spin = NULL; - struct i915_gem_context *kctx; int ret; - kctx = kernel_context(i915); - if (IS_ERR(kctx)) - return PTR_ERR(kctx); - - ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin); + ret = __sseu_prepare(i915, name, flags, ce, &spin); if (ret) - goto out_context; + return ret; - ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu); + ret = __intel_context_reconfigure_sseu(ce, sseu); if (ret) goto out_spin; - ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj, + ret = __sseu_finish(i915, name, flags, ce, obj, hweight32(sseu.slice_mask), spin); out_spin: @@ -1035,10 +1031,6 @@ out_spin: igt_spinner_fini(spin); kfree(spin); } - -out_context: - kernel_context_close(kctx); - return ret; } @@ -1047,10 +1039,11 @@ __igt_ctx_sseu(struct drm_i915_private *i915, const char *name, unsigned int flags) { - struct intel_sseu default_sseu = intel_device_default_sseu(i915); struct intel_engine_cs *engine = i915->engine[RCS0]; + struct intel_sseu default_sseu = engine->sseu; struct drm_i915_gem_object *obj; struct i915_gem_context *ctx; + struct intel_context *ce; struct intel_sseu pg_sseu; intel_wakeref_t wakeref; struct drm_file *file; @@ -1102,23 +1095,33 @@ __igt_ctx_sseu(struct drm_i915_private *i915, wakeref = intel_runtime_pm_get(i915); + ce = i915_gem_context_get_engine(ctx, RCS0); + if (IS_ERR(ce)) { + ret = PTR_ERR(ce); + goto out_rpm; + } + + ret = intel_context_pin(ce); + if (ret) + goto out_context; + /* First set the default mask. */ - ret = __sseu_test(i915, name, flags, ctx, engine, obj, default_sseu); + ret = __sseu_test(i915, name, flags, ce, obj, default_sseu); if (ret) goto out_fail; /* Then set a power-gated configuration. */ - ret = __sseu_test(i915, name, flags, ctx, engine, obj, pg_sseu); + ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu); if (ret) goto out_fail; /* Back to defaults. */ - ret = __sseu_test(i915, name, flags, ctx, engine, obj, default_sseu); + ret = __sseu_test(i915, name, flags, ce, obj, default_sseu); if (ret) goto out_fail; /* One last power-gated configuration for the road. */ - ret = __sseu_test(i915, name, flags, ctx, engine, obj, pg_sseu); + ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu); if (ret) goto out_fail; @@ -1126,9 +1129,12 @@ out_fail: if (igt_flush_test(i915, I915_WAIT_LOCKED)) ret = -EIO; - i915_gem_object_put(obj); - + intel_context_unpin(ce); +out_context: + intel_context_put(ce); +out_rpm: intel_runtime_pm_put(i915, wakeref); + i915_gem_object_put(obj); out_unlock: mutex_unlock(&i915->drm.struct_mutex); @@ -1345,7 +1351,7 @@ static int write_to_scratch(struct i915_gem_context *ctx, if (err) goto err_unpin; - rq = i915_request_alloc(engine, ctx); + rq = igt_request_alloc(ctx, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_unpin; @@ -1440,7 +1446,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, if (err) goto err_unpin; - rq = i915_request_alloc(engine, ctx); + rq = igt_request_alloc(ctx, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_unpin; @@ -1608,113 +1614,6 @@ __engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines) return "none"; } -static int __igt_switch_to_kernel_context(struct drm_i915_private *i915, - struct i915_gem_context *ctx, - intel_engine_mask_t engines) -{ - struct intel_engine_cs *engine; - intel_engine_mask_t tmp; - int pass; - - GEM_TRACE("Testing %s\n", __engine_name(i915, engines)); - for (pass = 0; pass < 4; pass++) { /* Once busy; once idle; repeat */ - bool from_idle = pass & 1; - int err; - - if (!from_idle) { - for_each_engine_masked(engine, i915, engines, tmp) { - struct i915_request *rq; - - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) - return PTR_ERR(rq); - - i915_request_add(rq); - } - } - - err = i915_gem_switch_to_kernel_context(i915, - i915->gt.active_engines); - if (err) - return err; - - if (!from_idle) { - err = i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (err) - return err; - } - - if (i915->gt.active_requests) { - pr_err("%d active requests remain after switching to kernel context, pass %d (%s) on %s engine%s\n", - i915->gt.active_requests, - pass, from_idle ? "idle" : "busy", - __engine_name(i915, engines), - is_power_of_2(engines) ? "" : "s"); - return -EINVAL; - } - - /* XXX Bonus points for proving we are the kernel context! */ - - mutex_unlock(&i915->drm.struct_mutex); - drain_delayed_work(&i915->gt.idle_work); - mutex_lock(&i915->drm.struct_mutex); - } - - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - return -EIO; - - return 0; -} - -static int igt_switch_to_kernel_context(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct i915_gem_context *ctx; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err; - - /* - * A core premise of switching to the kernel context is that - * if an engine is already idling in the kernel context, we - * do not emit another request and wake it up. The other being - * that we do indeed end up idling in the kernel context. - */ - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - ctx = kernel_context(i915); - if (IS_ERR(ctx)) { - mutex_unlock(&i915->drm.struct_mutex); - return PTR_ERR(ctx); - } - - /* First check idling each individual engine */ - for_each_engine(engine, i915, id) { - err = __igt_switch_to_kernel_context(i915, ctx, BIT(id)); - if (err) - goto out_unlock; - } - - /* Now en masse */ - err = __igt_switch_to_kernel_context(i915, ctx, ALL_ENGINES); - if (err) - goto out_unlock; - -out_unlock: - GEM_TRACE_DUMP_ON(err); - - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - - kernel_context_close(ctx); - return err; -} - static void mock_barrier_task(void *data) { unsigned int *counter = data; @@ -1729,7 +1628,6 @@ static int mock_context_barrier(void *arg) struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx; struct i915_request *rq; - intel_wakeref_t wakeref; unsigned int counter; int err; @@ -1767,20 +1665,17 @@ static int mock_context_barrier(void *arg) goto out; } if (counter == 0) { - pr_err("Did not retire immediately for all inactive engines\n"); + pr_err("Did not retire immediately for all unused engines\n"); err = -EINVAL; goto out; } - rq = ERR_PTR(-ENODEV); - with_intel_runtime_pm(i915, wakeref) - rq = i915_request_alloc(i915->engine[RCS0], ctx); + rq = igt_request_alloc(ctx, i915->engine[RCS0]); if (IS_ERR(rq)) { pr_err("Request allocation failed!\n"); goto out; } i915_request_add(rq); - GEM_BUG_ON(list_empty(&ctx->active_engines)); counter = 0; context_barrier_inject_fault = BIT(RCS0); @@ -1824,7 +1719,6 @@ unlock: int i915_gem_context_mock_selftests(void) { static const struct i915_subtest tests[] = { - SUBTEST(igt_switch_to_kernel_context), SUBTEST(mock_context_barrier), }; struct drm_i915_private *i915; @@ -1843,7 +1737,6 @@ int i915_gem_context_mock_selftests(void) int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) { static const struct i915_subtest tests[] = { - SUBTEST(igt_switch_to_kernel_context), SUBTEST(live_nop_switch), SUBTEST(igt_ctx_exec), SUBTEST(igt_ctx_readonly), diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 89766688e420..4fc6e5445dd1 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -24,6 +24,7 @@ #include "../i915_selftest.h" +#include "igt_gem_utils.h" #include "lib_sw_fence.h" #include "mock_context.h" #include "mock_drm.h" @@ -460,7 +461,7 @@ static int igt_evict_contexts(void *arg) /* We will need some GGTT space for the rq's context */ igt_evict_ctl.fail_if_busy = true; - rq = i915_request_alloc(engine, ctx); + rq = igt_request_alloc(ctx, engine); igt_evict_ctl.fail_if_busy = false; if (IS_ERR(rq)) { diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index 971148fbe6f5..b926d1cd165d 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -24,6 +24,7 @@ #include "../i915_selftest.h" +#include "igt_flush_test.h" #include "mock_gem_device.h" #include "huge_gem_object.h" @@ -468,7 +469,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) if (err) return err; - rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context); + rq = i915_request_create(i915->engine[RCS0]->kernel_context); if (IS_ERR(rq)) { i915_vma_unpin(vma); return PTR_ERR(rq); @@ -505,17 +506,21 @@ static void disable_retire_worker(struct drm_i915_private *i915) { i915_gem_shrinker_unregister(i915); - mutex_lock(&i915->drm.struct_mutex); - if (!i915->gt.active_requests++) { - intel_wakeref_t wakeref; + intel_gt_pm_get(i915); - with_intel_runtime_pm(i915, wakeref) - i915_gem_unpark(i915); - } + cancel_delayed_work_sync(&i915->gem.retire_work); + flush_work(&i915->gem.idle_work); +} + +static void restore_retire_worker(struct drm_i915_private *i915) +{ + intel_gt_pm_put(i915); + + mutex_lock(&i915->drm.struct_mutex); + igt_flush_test(i915, I915_WAIT_LOCKED); mutex_unlock(&i915->drm.struct_mutex); - cancel_delayed_work_sync(&i915->gt.retire_work); - cancel_delayed_work_sync(&i915->gt.idle_work); + i915_gem_shrinker_register(i915); } static int igt_mmap_offset_exhaustion(void *arg) @@ -615,13 +620,7 @@ static int igt_mmap_offset_exhaustion(void *arg) out: drm_mm_remove_node(&resv); out_park: - mutex_lock(&i915->drm.struct_mutex); - if (--i915->gt.active_requests) - queue_delayed_work(i915->wq, &i915->gt.retire_work, 0); - else - queue_delayed_work(i915->wq, &i915->gt.idle_work, 0); - mutex_unlock(&i915->drm.struct_mutex); - i915_gem_shrinker_register(i915); + restore_retire_worker(i915); return err; err_obj: i915_gem_object_put(obj); diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index 6d766925ad04..a953125b14c4 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -17,12 +17,14 @@ selftest(requests, i915_request_live_selftests) selftest(active, i915_active_live_selftests) selftest(objects, i915_gem_object_live_selftests) selftest(dmabuf, i915_gem_dmabuf_live_selftests) +selftest(vma, i915_vma_live_selftests) selftest(coherency, i915_gem_coherency_live_selftests) selftest(gtt, i915_gem_gtt_live_selftests) selftest(gem, i915_gem_live_selftests) selftest(evict, i915_gem_evict_live_selftests) selftest(hugepages, i915_gem_huge_page_live_selftests) selftest(contexts, i915_gem_context_live_selftests) +selftest(reset, intel_reset_live_selftests) selftest(hangcheck, intel_hangcheck_live_selftests) selftest(execlists, intel_execlists_live_selftests) selftest(guc, intel_guc_live_selftest) diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index e6ffe2240126..b60591531e4a 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -267,7 +267,7 @@ static struct i915_request * __live_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { - return i915_request_alloc(engine, ctx); + return igt_request_alloc(ctx, engine); } static int __igt_breadcrumbs_smoketest(void *arg) @@ -551,8 +551,7 @@ static int live_nop_request(void *arg) times[1] = ktime_get_raw(); for (n = 0; n < prime; n++) { - request = i915_request_alloc(engine, - i915->kernel_context); + request = i915_request_create(engine->kernel_context); if (IS_ERR(request)) { err = PTR_ERR(request); goto out_unlock; @@ -649,7 +648,7 @@ empty_request(struct intel_engine_cs *engine, struct i915_request *request; int err; - request = i915_request_alloc(engine, engine->i915->kernel_context); + request = i915_request_create(engine->kernel_context); if (IS_ERR(request)) return request; @@ -853,7 +852,7 @@ static int live_all_engines(void *arg) } for_each_engine(engine, i915, id) { - request[id] = i915_request_alloc(engine, i915->kernel_context); + request[id] = i915_request_create(engine->kernel_context); if (IS_ERR(request[id])) { err = PTR_ERR(request[id]); pr_err("%s: Request allocation failed with err=%d\n", @@ -962,7 +961,7 @@ static int live_sequential_engines(void *arg) goto out_unlock; } - request[id] = i915_request_alloc(engine, i915->kernel_context); + request[id] = i915_request_create(engine->kernel_context); if (IS_ERR(request[id])) { err = PTR_ERR(request[id]); pr_err("%s: Request allocation failed for %s with err=%d\n", @@ -1075,7 +1074,7 @@ max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine) if (HAS_EXECLISTS(ctx->i915)) return INT_MAX; - rq = i915_request_alloc(engine, ctx); + rq = igt_request_alloc(ctx, engine); if (IS_ERR(rq)) { ret = PTR_ERR(rq); } else { diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c index bd96afcadfe7..ff9ebe50fae8 100644 --- a/drivers/gpu/drm/i915/selftests/i915_timeline.c +++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c @@ -454,7 +454,7 @@ tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value) goto out; } - rq = i915_request_alloc(engine, engine->i915->kernel_context); + rq = i915_request_create(engine->kernel_context); if (IS_ERR(rq)) goto out_unpin; @@ -678,7 +678,7 @@ static int live_hwsp_wrap(void *arg) if (!intel_engine_can_store_dword(engine)) continue; - rq = i915_request_alloc(engine, i915->kernel_context); + rq = i915_request_create(engine->kernel_context); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out; diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index fc594b030f5a..0027c1fac336 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -59,7 +59,7 @@ static bool assert_vma(struct i915_vma *vma, static struct i915_vma * checked_vma_instance(struct drm_i915_gem_object *obj, struct i915_address_space *vm, - struct i915_ggtt_view *view) + const struct i915_ggtt_view *view) { struct i915_vma *vma; bool ok = true; @@ -397,18 +397,79 @@ assert_rotated(struct drm_i915_gem_object *obj, return sg; } -static unsigned int rotated_size(const struct intel_rotation_plane_info *a, - const struct intel_rotation_plane_info *b) +static unsigned long remapped_index(const struct intel_remapped_info *r, + unsigned int n, + unsigned int x, + unsigned int y) +{ + return (r->plane[n].stride * y + + r->plane[n].offset + x); +} + +static struct scatterlist * +assert_remapped(struct drm_i915_gem_object *obj, + const struct intel_remapped_info *r, unsigned int n, + struct scatterlist *sg) +{ + unsigned int x, y; + unsigned int left = 0; + unsigned int offset; + + for (y = 0; y < r->plane[n].height; y++) { + for (x = 0; x < r->plane[n].width; x++) { + unsigned long src_idx; + dma_addr_t src; + + if (!sg) { + pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n", + n, x, y); + return ERR_PTR(-EINVAL); + } + if (!left) { + offset = 0; + left = sg_dma_len(sg); + } + + src_idx = remapped_index(r, n, x, y); + src = i915_gem_object_get_dma_address(obj, src_idx); + + if (left < PAGE_SIZE || left & (PAGE_SIZE-1)) { + pr_err("Invalid sg.length, found %d, expected %lu for remapped page (%d, %d) [src index %lu]\n", + sg_dma_len(sg), PAGE_SIZE, + x, y, src_idx); + return ERR_PTR(-EINVAL); + } + + if (sg_dma_address(sg) + offset != src) { + pr_err("Invalid address for remapped page (%d, %d) [src index %lu]\n", + x, y, src_idx); + return ERR_PTR(-EINVAL); + } + + left -= PAGE_SIZE; + offset += PAGE_SIZE; + + + if (!left) + sg = sg_next(sg); + } + } + + return sg; +} + +static unsigned int rotated_size(const struct intel_remapped_plane_info *a, + const struct intel_remapped_plane_info *b) { return a->width * a->height + b->width * b->height; } -static int igt_vma_rotate(void *arg) +static int igt_vma_rotate_remap(void *arg) { struct i915_ggtt *ggtt = arg; struct i915_address_space *vm = &ggtt->vm; struct drm_i915_gem_object *obj; - const struct intel_rotation_plane_info planes[] = { + const struct intel_remapped_plane_info planes[] = { { .width = 1, .height = 1, .stride = 1 }, { .width = 2, .height = 2, .stride = 2 }, { .width = 4, .height = 4, .stride = 4 }, @@ -426,6 +487,11 @@ static int igt_vma_rotate(void *arg) { .width = 6, .height = 4, .stride = 6 }, { } }, *a, *b; + enum i915_ggtt_view_type types[] = { + I915_GGTT_VIEW_ROTATED, + I915_GGTT_VIEW_REMAPPED, + 0, + }, *t; const unsigned int max_pages = 64; int err = -ENOMEM; @@ -437,6 +503,7 @@ static int igt_vma_rotate(void *arg) if (IS_ERR(obj)) goto out; + for (t = types; *t; t++) { for (a = planes; a->width; a++) { for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) { struct i915_ggtt_view view; @@ -447,7 +514,7 @@ static int igt_vma_rotate(void *arg) GEM_BUG_ON(max_offset > max_pages); max_offset = max_pages - max_offset; - view.type = I915_GGTT_VIEW_ROTATED; + view.type = *t; view.rotated.plane[0] = *a; view.rotated.plane[1] = *b; @@ -468,14 +535,23 @@ static int igt_vma_rotate(void *arg) goto out_object; } - if (vma->size != rotated_size(a, b) * PAGE_SIZE) { + if (view.type == I915_GGTT_VIEW_ROTATED && + vma->size != rotated_size(a, b) * PAGE_SIZE) { pr_err("VMA is wrong size, expected %lu, found %llu\n", PAGE_SIZE * rotated_size(a, b), vma->size); err = -EINVAL; goto out_object; } - if (vma->pages->nents != rotated_size(a, b)) { + if (view.type == I915_GGTT_VIEW_REMAPPED && + vma->size > rotated_size(a, b) * PAGE_SIZE) { + pr_err("VMA is wrong size, expected %lu, found %llu\n", + PAGE_SIZE * rotated_size(a, b), vma->size); + err = -EINVAL; + goto out_object; + } + + if (vma->pages->nents > rotated_size(a, b)) { pr_err("sg table is wrong sizeo, expected %u, found %u nents\n", rotated_size(a, b), vma->pages->nents); err = -EINVAL; @@ -497,9 +573,14 @@ static int igt_vma_rotate(void *arg) sg = vma->pages->sgl; for (n = 0; n < ARRAY_SIZE(view.rotated.plane); n++) { - sg = assert_rotated(obj, &view.rotated, n, sg); + if (view.type == I915_GGTT_VIEW_ROTATED) + sg = assert_rotated(obj, &view.rotated, n, sg); + else + sg = assert_remapped(obj, &view.remapped, n, sg); if (IS_ERR(sg)) { - pr_err("Inconsistent VMA pages for plane %d: [(%d, %d, %d, %d), (%d, %d, %d, %d)]\n", n, + pr_err("Inconsistent %s VMA pages for plane %d: [(%d, %d, %d, %d), (%d, %d, %d, %d)]\n", + view.type == I915_GGTT_VIEW_ROTATED ? + "rotated" : "remapped", n, view.rotated.plane[0].width, view.rotated.plane[0].height, view.rotated.plane[0].stride, @@ -518,6 +599,7 @@ static int igt_vma_rotate(void *arg) } } } + } out_object: i915_gem_object_put(obj); @@ -721,7 +803,7 @@ int i915_vma_mock_selftests(void) static const struct i915_subtest tests[] = { SUBTEST(igt_vma_create), SUBTEST(igt_vma_pin1), - SUBTEST(igt_vma_rotate), + SUBTEST(igt_vma_rotate_remap), SUBTEST(igt_vma_partial), }; struct drm_i915_private *i915; @@ -752,3 +834,145 @@ out_put: drm_dev_put(&i915->drm); return err; } + +static int igt_vma_remapped_gtt(void *arg) +{ + struct drm_i915_private *i915 = arg; + const struct intel_remapped_plane_info planes[] = { + { .width = 1, .height = 1, .stride = 1 }, + { .width = 2, .height = 2, .stride = 2 }, + { .width = 4, .height = 4, .stride = 4 }, + { .width = 8, .height = 8, .stride = 8 }, + + { .width = 3, .height = 5, .stride = 3 }, + { .width = 3, .height = 5, .stride = 4 }, + { .width = 3, .height = 5, .stride = 5 }, + + { .width = 5, .height = 3, .stride = 5 }, + { .width = 5, .height = 3, .stride = 7 }, + { .width = 5, .height = 3, .stride = 9 }, + + { .width = 4, .height = 6, .stride = 6 }, + { .width = 6, .height = 4, .stride = 6 }, + { } + }, *p; + enum i915_ggtt_view_type types[] = { + I915_GGTT_VIEW_ROTATED, + I915_GGTT_VIEW_REMAPPED, + 0, + }, *t; + struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; + int err = 0; + + obj = i915_gem_object_create_internal(i915, 10 * 10 * PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + mutex_lock(&i915->drm.struct_mutex); + + wakeref = intel_runtime_pm_get(i915); + + for (t = types; *t; t++) { + for (p = planes; p->width; p++) { + struct i915_ggtt_view view = { + .type = *t, + .rotated.plane[0] = *p, + }; + struct i915_vma *vma; + u32 __iomem *map; + unsigned int x, y; + int err; + + err = i915_gem_object_set_to_gtt_domain(obj, true); + if (err) + goto out; + + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out; + } + + GEM_BUG_ON(vma->ggtt_view.type != *t); + + map = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + if (IS_ERR(map)) { + err = PTR_ERR(map); + goto out; + } + + for (y = 0 ; y < p->height; y++) { + for (x = 0 ; x < p->width; x++) { + unsigned int offset; + u32 val = y << 16 | x; + + if (*t == I915_GGTT_VIEW_ROTATED) + offset = (x * p->height + y) * PAGE_SIZE; + else + offset = (y * p->width + x) * PAGE_SIZE; + + iowrite32(val, &map[offset / sizeof(*map)]); + } + } + + i915_vma_unpin_iomap(vma); + + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out; + } + + GEM_BUG_ON(vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL); + + map = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + if (IS_ERR(map)) { + err = PTR_ERR(map); + goto out; + } + + for (y = 0 ; y < p->height; y++) { + for (x = 0 ; x < p->width; x++) { + unsigned int offset, src_idx; + u32 exp = y << 16 | x; + u32 val; + + if (*t == I915_GGTT_VIEW_ROTATED) + src_idx = rotated_index(&view.rotated, 0, x, y); + else + src_idx = remapped_index(&view.remapped, 0, x, y); + offset = src_idx * PAGE_SIZE; + + val = ioread32(&map[offset / sizeof(*map)]); + if (val != exp) { + pr_err("%s VMA write test failed, expected 0x%x, found 0x%x\n", + *t == I915_GGTT_VIEW_ROTATED ? "Rotated" : "Remapped", + val, exp); + i915_vma_unpin_iomap(vma); + goto out; + } + } + } + i915_vma_unpin_iomap(vma); + } + } + +out: + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + i915_gem_object_put(obj); + + return err; +} + +int i915_vma_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_vma_remapped_gtt), + }; + + return i915_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/selftests/igt_atomic.h b/drivers/gpu/drm/i915/selftests/igt_atomic.h new file mode 100644 index 000000000000..93ec89f487ec --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_atomic.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2018 Intel Corporation + */ + +#ifndef IGT_ATOMIC_H +#define IGT_ATOMIC_H + +#include <linux/preempt.h> +#include <linux/bottom_half.h> +#include <linux/irqflags.h> + +static void __preempt_begin(void) +{ + preempt_disable(); +} + +static void __preempt_end(void) +{ + preempt_enable(); +} + +static void __softirq_begin(void) +{ + local_bh_disable(); +} + +static void __softirq_end(void) +{ + local_bh_enable(); +} + +static void __hardirq_begin(void) +{ + local_irq_disable(); +} + +static void __hardirq_end(void) +{ + local_irq_enable(); +} + +struct igt_atomic_section { + const char *name; + void (*critical_section_begin)(void); + void (*critical_section_end)(void); +}; + +static const struct igt_atomic_section igt_atomic_phases[] = { + { "preempt", __preempt_begin, __preempt_end }, + { "softirq", __softirq_begin, __softirq_end }, + { "hardirq", __hardirq_begin, __hardirq_end }, + { } +}; + +#endif /* IGT_ATOMIC_H */ diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c index 94aee4071a66..e42f3c58536a 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c @@ -11,23 +11,29 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) { + int ret = i915_terminally_wedged(i915) ? -EIO : 0; + int repeat = !!(flags & I915_WAIT_LOCKED); + cond_resched(); - if (flags & I915_WAIT_LOCKED && - i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines)) { - pr_err("Failed to switch back to kernel context; declaring wedged\n"); - i915_gem_set_wedged(i915); - } + do { + if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) { + pr_err("%pS timed out, cancelling all further testing.\n", + __builtin_return_address(0)); - if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) { - pr_err("%pS timed out, cancelling all further testing.\n", - __builtin_return_address(0)); + GEM_TRACE("%pS timed out.\n", + __builtin_return_address(0)); + GEM_TRACE_DUMP(); - GEM_TRACE("%pS timed out.\n", __builtin_return_address(0)); - GEM_TRACE_DUMP(); + i915_gem_set_wedged(i915); + repeat = 0; + ret = -EIO; + } - i915_gem_set_wedged(i915); - } + /* Ensure we also flush after wedging. */ + if (flags & I915_WAIT_LOCKED) + i915_retire_requests(i915); + } while (repeat--); - return i915_terminally_wedged(i915); + return ret; } diff --git a/drivers/gpu/drm/i915/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/selftests/igt_gem_utils.c new file mode 100644 index 000000000000..16891b1a3e50 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_gem_utils.c @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include "igt_gem_utils.h" + +#include "gt/intel_context.h" + +#include "../i915_gem_context.h" +#include "../i915_gem_pm.h" +#include "../i915_request.h" + +struct i915_request * +igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine) +{ + struct intel_context *ce; + struct i915_request *rq; + + /* + * Pinning the contexts may generate requests in order to acquire + * GGTT space, so do this first before we reserve a seqno for + * ourselves. + */ + ce = i915_gem_context_get_engine(ctx, engine->id); + if (IS_ERR(ce)) + return ERR_CAST(ce); + + rq = intel_context_create_request(ce); + intel_context_put(ce); + + return rq; +} diff --git a/drivers/gpu/drm/i915/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/selftests/igt_gem_utils.h new file mode 100644 index 000000000000..0f17251cf75d --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_gem_utils.h @@ -0,0 +1,17 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#ifndef __IGT_GEM_UTILS_H__ +#define __IGT_GEM_UTILS_H__ + +struct i915_request; +struct i915_gem_context; +struct intel_engine_cs; + +struct i915_request * +igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine); + +#endif /* __IGT_GEM_UTILS_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c index 208a966da8ca..587df6fd4ffe 100644 --- a/drivers/gpu/drm/i915/selftests/igt_reset.c +++ b/drivers/gpu/drm/i915/selftests/igt_reset.c @@ -6,8 +6,9 @@ #include "igt_reset.h" +#include "gt/intel_engine.h" + #include "../i915_drv.h" -#include "../intel_ringbuffer.h" void igt_global_reset_lock(struct drm_i915_private *i915) { @@ -42,3 +43,11 @@ void igt_global_reset_unlock(struct drm_i915_private *i915) clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); wake_up_all(&i915->gpu_error.reset_queue); } + +bool igt_force_reset(struct drm_i915_private *i915) +{ + i915_gem_set_wedged(i915); + i915_reset(i915, 0, NULL); + + return !i915_reset_failed(i915); +} diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h index 5f0234d045d5..363bd853e50f 100644 --- a/drivers/gpu/drm/i915/selftests/igt_reset.h +++ b/drivers/gpu/drm/i915/selftests/igt_reset.h @@ -11,5 +11,6 @@ void igt_global_reset_lock(struct drm_i915_private *i915); void igt_global_reset_unlock(struct drm_i915_private *i915); +bool igt_force_reset(struct drm_i915_private *i915); #endif diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 16890dfe74c0..ece8a8a0d3b0 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -4,6 +4,7 @@ * Copyright © 2018 Intel Corporation */ +#include "igt_gem_utils.h" #include "igt_spinner.h" int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) @@ -114,7 +115,7 @@ igt_spinner_create_request(struct igt_spinner *spin, if (err) goto unpin_vma; - rq = i915_request_alloc(engine, ctx); + rq = igt_request_alloc(ctx, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto unpin_hws; diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h index 391777c76dc7..d312e7cdab68 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.h +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h @@ -9,9 +9,10 @@ #include "../i915_selftest.h" +#include "gt/intel_engine.h" + #include "../i915_drv.h" #include "../i915_request.h" -#include "../intel_ringbuffer.h" #include "../i915_gem_context.h" struct igt_spinner { diff --git a/drivers/gpu/drm/i915/selftests/intel_engine_cs.c b/drivers/gpu/drm/i915/selftests/intel_engine_cs.c deleted file mode 100644 index cfaa6b296835..000000000000 --- a/drivers/gpu/drm/i915/selftests/intel_engine_cs.c +++ /dev/null @@ -1,58 +0,0 @@ -/* - * SPDX-License-Identifier: GPL-2.0 - * - * Copyright © 2018 Intel Corporation - */ - -#include "../i915_selftest.h" - -static int intel_mmio_bases_check(void *arg) -{ - int i, j; - - for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { - const struct engine_info *info = &intel_engines[i]; - char name[INTEL_ENGINE_CS_MAX_NAME]; - u8 prev = U8_MAX; - - __sprint_engine_name(name, info); - - for (j = 0; j < MAX_MMIO_BASES; j++) { - u8 gen = info->mmio_bases[j].gen; - u32 base = info->mmio_bases[j].base; - - if (gen >= prev) { - pr_err("%s: %s: mmio base for gen %x " - "is before the one for gen %x\n", - __func__, name, prev, gen); - return -EINVAL; - } - - if (gen == 0) - break; - - if (!base) { - pr_err("%s: %s: invalid mmio base (%x) " - "for gen %x at entry %u\n", - __func__, name, base, gen, j); - return -EINVAL; - } - - prev = gen; - } - - pr_info("%s: min gen supported for %s = %d\n", - __func__, name, prev); - } - - return 0; -} - -int intel_engine_cs_mock_selftests(void) -{ - static const struct i915_subtest tests[] = { - SUBTEST(intel_mmio_bases_check), - }; - - return i915_subtests(tests, NULL); -} diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c deleted file mode 100644 index 050bd1e19e02..000000000000 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ /dev/null @@ -1,1919 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include <linux/kthread.h> - -#include "../i915_selftest.h" -#include "i915_random.h" -#include "igt_flush_test.h" -#include "igt_reset.h" -#include "igt_wedge_me.h" - -#include "mock_context.h" -#include "mock_drm.h" - -#define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */ - -struct hang { - struct drm_i915_private *i915; - struct drm_i915_gem_object *hws; - struct drm_i915_gem_object *obj; - struct i915_gem_context *ctx; - u32 *seqno; - u32 *batch; -}; - -static int hang_init(struct hang *h, struct drm_i915_private *i915) -{ - void *vaddr; - int err; - - memset(h, 0, sizeof(*h)); - h->i915 = i915; - - h->ctx = kernel_context(i915); - if (IS_ERR(h->ctx)) - return PTR_ERR(h->ctx); - - GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx)); - - h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(h->hws)) { - err = PTR_ERR(h->hws); - goto err_ctx; - } - - h->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(h->obj)) { - err = PTR_ERR(h->obj); - goto err_hws; - } - - i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC); - vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_obj; - } - h->seqno = memset(vaddr, 0xff, PAGE_SIZE); - - vaddr = i915_gem_object_pin_map(h->obj, - i915_coherent_map_type(i915)); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_unpin_hws; - } - h->batch = vaddr; - - return 0; - -err_unpin_hws: - i915_gem_object_unpin_map(h->hws); -err_obj: - i915_gem_object_put(h->obj); -err_hws: - i915_gem_object_put(h->hws); -err_ctx: - kernel_context_close(h->ctx); - return err; -} - -static u64 hws_address(const struct i915_vma *hws, - const struct i915_request *rq) -{ - return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context); -} - -static int move_to_active(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags) -{ - int err; - - err = i915_vma_move_to_active(vma, rq, flags); - if (err) - return err; - - if (!i915_gem_object_has_active_reference(vma->obj)) { - i915_gem_object_get(vma->obj); - i915_gem_object_set_active_reference(vma->obj); - } - - return 0; -} - -static struct i915_request * -hang_create_request(struct hang *h, struct intel_engine_cs *engine) -{ - struct drm_i915_private *i915 = h->i915; - struct i915_address_space *vm = - h->ctx->ppgtt ? &h->ctx->ppgtt->vm : &i915->ggtt.vm; - struct i915_request *rq = NULL; - struct i915_vma *hws, *vma; - unsigned int flags; - u32 *batch; - int err; - - if (i915_gem_object_is_active(h->obj)) { - struct drm_i915_gem_object *obj; - void *vaddr; - - obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - vaddr = i915_gem_object_pin_map(obj, - i915_coherent_map_type(h->i915)); - if (IS_ERR(vaddr)) { - i915_gem_object_put(obj); - return ERR_CAST(vaddr); - } - - i915_gem_object_unpin_map(h->obj); - i915_gem_object_put(h->obj); - - h->obj = obj; - h->batch = vaddr; - } - - vma = i915_vma_instance(h->obj, vm, NULL); - if (IS_ERR(vma)) - return ERR_CAST(vma); - - hws = i915_vma_instance(h->hws, vm, NULL); - if (IS_ERR(hws)) - return ERR_CAST(hws); - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return ERR_PTR(err); - - err = i915_vma_pin(hws, 0, 0, PIN_USER); - if (err) - goto unpin_vma; - - rq = i915_request_alloc(engine, h->ctx); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto unpin_hws; - } - - err = move_to_active(vma, rq, 0); - if (err) - goto cancel_rq; - - err = move_to_active(hws, rq, 0); - if (err) - goto cancel_rq; - - batch = h->batch; - if (INTEL_GEN(i915) >= 8) { - *batch++ = MI_STORE_DWORD_IMM_GEN4; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = upper_32_bits(hws_address(hws, rq)); - *batch++ = rq->fence.seqno; - *batch++ = MI_ARB_CHECK; - - memset(batch, 0, 1024); - batch += 1024 / sizeof(*batch); - - *batch++ = MI_ARB_CHECK; - *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; - *batch++ = lower_32_bits(vma->node.start); - *batch++ = upper_32_bits(vma->node.start); - } else if (INTEL_GEN(i915) >= 6) { - *batch++ = MI_STORE_DWORD_IMM_GEN4; - *batch++ = 0; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = rq->fence.seqno; - *batch++ = MI_ARB_CHECK; - - memset(batch, 0, 1024); - batch += 1024 / sizeof(*batch); - - *batch++ = MI_ARB_CHECK; - *batch++ = MI_BATCH_BUFFER_START | 1 << 8; - *batch++ = lower_32_bits(vma->node.start); - } else if (INTEL_GEN(i915) >= 4) { - *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *batch++ = 0; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = rq->fence.seqno; - *batch++ = MI_ARB_CHECK; - - memset(batch, 0, 1024); - batch += 1024 / sizeof(*batch); - - *batch++ = MI_ARB_CHECK; - *batch++ = MI_BATCH_BUFFER_START | 2 << 6; - *batch++ = lower_32_bits(vma->node.start); - } else { - *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = rq->fence.seqno; - *batch++ = MI_ARB_CHECK; - - memset(batch, 0, 1024); - batch += 1024 / sizeof(*batch); - - *batch++ = MI_ARB_CHECK; - *batch++ = MI_BATCH_BUFFER_START | 2 << 6; - *batch++ = lower_32_bits(vma->node.start); - } - *batch++ = MI_BATCH_BUFFER_END; /* not reached */ - i915_gem_chipset_flush(h->i915); - - if (rq->engine->emit_init_breadcrumb) { - err = rq->engine->emit_init_breadcrumb(rq); - if (err) - goto cancel_rq; - } - - flags = 0; - if (INTEL_GEN(vm->i915) <= 5) - flags |= I915_DISPATCH_SECURE; - - err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); - -cancel_rq: - if (err) { - i915_request_skip(rq, err); - i915_request_add(rq); - } -unpin_hws: - i915_vma_unpin(hws); -unpin_vma: - i915_vma_unpin(vma); - return err ? ERR_PTR(err) : rq; -} - -static u32 hws_seqno(const struct hang *h, const struct i915_request *rq) -{ - return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]); -} - -static void hang_fini(struct hang *h) -{ - *h->batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(h->i915); - - i915_gem_object_unpin_map(h->obj); - i915_gem_object_put(h->obj); - - i915_gem_object_unpin_map(h->hws); - i915_gem_object_put(h->hws); - - kernel_context_close(h->ctx); - - igt_flush_test(h->i915, I915_WAIT_LOCKED); -} - -static bool wait_until_running(struct hang *h, struct i915_request *rq) -{ - return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq), - rq->fence.seqno), - 10) && - wait_for(i915_seqno_passed(hws_seqno(h, rq), - rq->fence.seqno), - 1000)); -} - -static int igt_hang_sanitycheck(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_request *rq; - struct intel_engine_cs *engine; - enum intel_engine_id id; - struct hang h; - int err; - - /* Basic check that we can execute our hanging batch */ - - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - if (err) - goto unlock; - - for_each_engine(engine, i915, id) { - struct igt_wedge_me w; - long timeout; - - if (!intel_engine_can_store_dword(engine)) - continue; - - rq = hang_create_request(&h, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - pr_err("Failed to create request for %s, err=%d\n", - engine->name, err); - goto fini; - } - - i915_request_get(rq); - - *h.batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); - - i915_request_add(rq); - - timeout = 0; - igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) - timeout = i915_request_wait(rq, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (i915_reset_failed(i915)) - timeout = -EIO; - - i915_request_put(rq); - - if (timeout < 0) { - err = timeout; - pr_err("Wait for request failed on %s, err=%d\n", - engine->name, err); - goto fini; - } - } - -fini: - hang_fini(&h); -unlock: - mutex_unlock(&i915->drm.struct_mutex); - return err; -} - -static int igt_global_reset(void *arg) -{ - struct drm_i915_private *i915 = arg; - unsigned int reset_count; - int err = 0; - - /* Check that we can issue a global GPU reset */ - - igt_global_reset_lock(i915); - - reset_count = i915_reset_count(&i915->gpu_error); - - i915_reset(i915, ALL_ENGINES, NULL); - - if (i915_reset_count(&i915->gpu_error) == reset_count) { - pr_err("No GPU reset recorded!\n"); - err = -EINVAL; - } - - igt_global_reset_unlock(i915); - - if (i915_reset_failed(i915)) - err = -EIO; - - return err; -} - -static int igt_wedged_reset(void *arg) -{ - struct drm_i915_private *i915 = arg; - intel_wakeref_t wakeref; - - /* Check that we can recover a wedged device with a GPU reset */ - - igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(i915); - - i915_gem_set_wedged(i915); - - GEM_BUG_ON(!i915_reset_failed(i915)); - i915_reset(i915, ALL_ENGINES, NULL); - - intel_runtime_pm_put(i915, wakeref); - igt_global_reset_unlock(i915); - - return i915_reset_failed(i915) ? -EIO : 0; -} - -static bool wait_for_idle(struct intel_engine_cs *engine) -{ - return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0; -} - -static int igt_reset_nop(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct i915_gem_context *ctx; - unsigned int reset_count, count; - enum intel_engine_id id; - intel_wakeref_t wakeref; - struct drm_file *file; - IGT_TIMEOUT(end_time); - int err = 0; - - /* Check that we can reset during non-user portions of requests */ - - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - mutex_lock(&i915->drm.struct_mutex); - ctx = live_context(i915, file); - mutex_unlock(&i915->drm.struct_mutex); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out; - } - - i915_gem_context_clear_bannable(ctx); - wakeref = intel_runtime_pm_get(i915); - reset_count = i915_reset_count(&i915->gpu_error); - count = 0; - do { - mutex_lock(&i915->drm.struct_mutex); - for_each_engine(engine, i915, id) { - int i; - - for (i = 0; i < 16; i++) { - struct i915_request *rq; - - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - break; - } - - i915_request_add(rq); - } - } - mutex_unlock(&i915->drm.struct_mutex); - - igt_global_reset_lock(i915); - i915_reset(i915, ALL_ENGINES, NULL); - igt_global_reset_unlock(i915); - if (i915_reset_failed(i915)) { - err = -EIO; - break; - } - - if (i915_reset_count(&i915->gpu_error) != - reset_count + ++count) { - pr_err("Full GPU reset not recorded!\n"); - err = -EINVAL; - break; - } - - if (!i915_reset_flush(i915)) { - struct drm_printer p = - drm_info_printer(i915->drm.dev); - - pr_err("%s failed to idle after reset\n", - engine->name); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - err = -EIO; - break; - } - - err = igt_flush_test(i915, 0); - if (err) - break; - } while (time_before(jiffies, end_time)); - pr_info("%s: %d resets\n", __func__, count); - - mutex_lock(&i915->drm.struct_mutex); - err = igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); - - intel_runtime_pm_put(i915, wakeref); - -out: - mock_file_free(i915, file); - if (i915_reset_failed(i915)) - err = -EIO; - return err; -} - -static int igt_reset_nop_engine(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct i915_gem_context *ctx; - enum intel_engine_id id; - intel_wakeref_t wakeref; - struct drm_file *file; - int err = 0; - - /* Check that we can engine-reset during non-user portions */ - - if (!intel_has_reset_engine(i915)) - return 0; - - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - mutex_lock(&i915->drm.struct_mutex); - ctx = live_context(i915, file); - mutex_unlock(&i915->drm.struct_mutex); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out; - } - - i915_gem_context_clear_bannable(ctx); - wakeref = intel_runtime_pm_get(i915); - for_each_engine(engine, i915, id) { - unsigned int reset_count, reset_engine_count; - unsigned int count; - IGT_TIMEOUT(end_time); - - reset_count = i915_reset_count(&i915->gpu_error); - reset_engine_count = i915_reset_engine_count(&i915->gpu_error, - engine); - count = 0; - - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - do { - int i; - - if (!wait_for_idle(engine)) { - pr_err("%s failed to idle before reset\n", - engine->name); - err = -EIO; - break; - } - - mutex_lock(&i915->drm.struct_mutex); - for (i = 0; i < 16; i++) { - struct i915_request *rq; - - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - break; - } - - i915_request_add(rq); - } - mutex_unlock(&i915->drm.struct_mutex); - - err = i915_reset_engine(engine, NULL); - if (err) { - pr_err("i915_reset_engine failed\n"); - break; - } - - if (i915_reset_count(&i915->gpu_error) != reset_count) { - pr_err("Full GPU reset recorded! (engine reset expected)\n"); - err = -EINVAL; - break; - } - - if (i915_reset_engine_count(&i915->gpu_error, engine) != - reset_engine_count + ++count) { - pr_err("%s engine reset not recorded!\n", - engine->name); - err = -EINVAL; - break; - } - - if (!i915_reset_flush(i915)) { - struct drm_printer p = - drm_info_printer(i915->drm.dev); - - pr_err("%s failed to idle after reset\n", - engine->name); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - err = -EIO; - break; - } - } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - pr_info("%s(%s): %d resets\n", __func__, engine->name, count); - - if (err) - break; - - err = igt_flush_test(i915, 0); - if (err) - break; - } - - mutex_lock(&i915->drm.struct_mutex); - err = igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); - - intel_runtime_pm_put(i915, wakeref); -out: - mock_file_free(i915, file); - if (i915_reset_failed(i915)) - err = -EIO; - return err; -} - -static int __igt_reset_engine(struct drm_i915_private *i915, bool active) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - struct hang h; - int err = 0; - - /* Check that we can issue an engine reset on an idle engine (no-op) */ - - if (!intel_has_reset_engine(i915)) - return 0; - - if (active) { - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - mutex_unlock(&i915->drm.struct_mutex); - if (err) - return err; - } - - for_each_engine(engine, i915, id) { - unsigned int reset_count, reset_engine_count; - IGT_TIMEOUT(end_time); - - if (active && !intel_engine_can_store_dword(engine)) - continue; - - if (!wait_for_idle(engine)) { - pr_err("%s failed to idle before reset\n", - engine->name); - err = -EIO; - break; - } - - reset_count = i915_reset_count(&i915->gpu_error); - reset_engine_count = i915_reset_engine_count(&i915->gpu_error, - engine); - - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - do { - if (active) { - struct i915_request *rq; - - mutex_lock(&i915->drm.struct_mutex); - rq = hang_create_request(&h, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - mutex_unlock(&i915->drm.struct_mutex); - break; - } - - i915_request_get(rq); - i915_request_add(rq); - mutex_unlock(&i915->drm.struct_mutex); - - if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("%s: Failed to start request %llx, at %x\n", - __func__, rq->fence.seqno, hws_seqno(&h, rq)); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - i915_request_put(rq); - err = -EIO; - break; - } - - i915_request_put(rq); - } - - err = i915_reset_engine(engine, NULL); - if (err) { - pr_err("i915_reset_engine failed\n"); - break; - } - - if (i915_reset_count(&i915->gpu_error) != reset_count) { - pr_err("Full GPU reset recorded! (engine reset expected)\n"); - err = -EINVAL; - break; - } - - if (i915_reset_engine_count(&i915->gpu_error, engine) != - ++reset_engine_count) { - pr_err("%s engine reset not recorded!\n", - engine->name); - err = -EINVAL; - break; - } - - if (!i915_reset_flush(i915)) { - struct drm_printer p = - drm_info_printer(i915->drm.dev); - - pr_err("%s failed to idle after reset\n", - engine->name); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - err = -EIO; - break; - } - } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - - if (err) - break; - - err = igt_flush_test(i915, 0); - if (err) - break; - } - - if (i915_reset_failed(i915)) - err = -EIO; - - if (active) { - mutex_lock(&i915->drm.struct_mutex); - hang_fini(&h); - mutex_unlock(&i915->drm.struct_mutex); - } - - return err; -} - -static int igt_reset_idle_engine(void *arg) -{ - return __igt_reset_engine(arg, false); -} - -static int igt_reset_active_engine(void *arg) -{ - return __igt_reset_engine(arg, true); -} - -struct active_engine { - struct task_struct *task; - struct intel_engine_cs *engine; - unsigned long resets; - unsigned int flags; -}; - -#define TEST_ACTIVE BIT(0) -#define TEST_OTHERS BIT(1) -#define TEST_SELF BIT(2) -#define TEST_PRIORITY BIT(3) - -static int active_request_put(struct i915_request *rq) -{ - int err = 0; - - if (!rq) - return 0; - - if (i915_request_wait(rq, 0, 5 * HZ) < 0) { - GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n", - rq->engine->name, - rq->fence.context, - rq->fence.seqno); - GEM_TRACE_DUMP(); - - i915_gem_set_wedged(rq->i915); - err = -EIO; - } - - i915_request_put(rq); - - return err; -} - -static int active_engine(void *data) -{ - I915_RND_STATE(prng); - struct active_engine *arg = data; - struct intel_engine_cs *engine = arg->engine; - struct i915_request *rq[8] = {}; - struct i915_gem_context *ctx[ARRAY_SIZE(rq)]; - struct drm_file *file; - unsigned long count = 0; - int err = 0; - - file = mock_file(engine->i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - for (count = 0; count < ARRAY_SIZE(ctx); count++) { - mutex_lock(&engine->i915->drm.struct_mutex); - ctx[count] = live_context(engine->i915, file); - mutex_unlock(&engine->i915->drm.struct_mutex); - if (IS_ERR(ctx[count])) { - err = PTR_ERR(ctx[count]); - while (--count) - i915_gem_context_put(ctx[count]); - goto err_file; - } - } - - while (!kthread_should_stop()) { - unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1); - struct i915_request *old = rq[idx]; - struct i915_request *new; - - mutex_lock(&engine->i915->drm.struct_mutex); - new = i915_request_alloc(engine, ctx[idx]); - if (IS_ERR(new)) { - mutex_unlock(&engine->i915->drm.struct_mutex); - err = PTR_ERR(new); - break; - } - - if (arg->flags & TEST_PRIORITY) - ctx[idx]->sched.priority = - i915_prandom_u32_max_state(512, &prng); - - rq[idx] = i915_request_get(new); - i915_request_add(new); - mutex_unlock(&engine->i915->drm.struct_mutex); - - err = active_request_put(old); - if (err) - break; - - cond_resched(); - } - - for (count = 0; count < ARRAY_SIZE(rq); count++) { - int err__ = active_request_put(rq[count]); - - /* Keep the first error */ - if (!err) - err = err__; - } - -err_file: - mock_file_free(engine->i915, file); - return err; -} - -static int __igt_reset_engines(struct drm_i915_private *i915, - const char *test_name, - unsigned int flags) -{ - struct intel_engine_cs *engine, *other; - enum intel_engine_id id, tmp; - struct hang h; - int err = 0; - - /* Check that issuing a reset on one engine does not interfere - * with any other engine. - */ - - if (!intel_has_reset_engine(i915)) - return 0; - - if (flags & TEST_ACTIVE) { - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - mutex_unlock(&i915->drm.struct_mutex); - if (err) - return err; - - if (flags & TEST_PRIORITY) - h.ctx->sched.priority = 1024; - } - - for_each_engine(engine, i915, id) { - struct active_engine threads[I915_NUM_ENGINES] = {}; - unsigned long global = i915_reset_count(&i915->gpu_error); - unsigned long count = 0, reported; - IGT_TIMEOUT(end_time); - - if (flags & TEST_ACTIVE && - !intel_engine_can_store_dword(engine)) - continue; - - if (!wait_for_idle(engine)) { - pr_err("i915_reset_engine(%s:%s): failed to idle before reset\n", - engine->name, test_name); - err = -EIO; - break; - } - - memset(threads, 0, sizeof(threads)); - for_each_engine(other, i915, tmp) { - struct task_struct *tsk; - - threads[tmp].resets = - i915_reset_engine_count(&i915->gpu_error, - other); - - if (!(flags & TEST_OTHERS)) - continue; - - if (other == engine && !(flags & TEST_SELF)) - continue; - - threads[tmp].engine = other; - threads[tmp].flags = flags; - - tsk = kthread_run(active_engine, &threads[tmp], - "igt/%s", other->name); - if (IS_ERR(tsk)) { - err = PTR_ERR(tsk); - goto unwind; - } - - threads[tmp].task = tsk; - get_task_struct(tsk); - } - - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - do { - struct i915_request *rq = NULL; - - if (flags & TEST_ACTIVE) { - mutex_lock(&i915->drm.struct_mutex); - rq = hang_create_request(&h, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - mutex_unlock(&i915->drm.struct_mutex); - break; - } - - i915_request_get(rq); - i915_request_add(rq); - mutex_unlock(&i915->drm.struct_mutex); - - if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("%s: Failed to start request %llx, at %x\n", - __func__, rq->fence.seqno, hws_seqno(&h, rq)); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - i915_request_put(rq); - err = -EIO; - break; - } - } - - err = i915_reset_engine(engine, NULL); - if (err) { - pr_err("i915_reset_engine(%s:%s): failed, err=%d\n", - engine->name, test_name, err); - break; - } - - count++; - - if (rq) { - if (i915_request_wait(rq, 0, HZ / 5) < 0) { - struct drm_printer p = - drm_info_printer(i915->drm.dev); - - pr_err("i915_reset_engine(%s:%s):" - " failed to complete request after reset\n", - engine->name, test_name); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - i915_request_put(rq); - - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - err = -EIO; - break; - } - - i915_request_put(rq); - } - - if (!(flags & TEST_SELF) && !wait_for_idle(engine)) { - struct drm_printer p = - drm_info_printer(i915->drm.dev); - - pr_err("i915_reset_engine(%s:%s):" - " failed to idle after reset\n", - engine->name, test_name); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - err = -EIO; - break; - } - } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - pr_info("i915_reset_engine(%s:%s): %lu resets\n", - engine->name, test_name, count); - - reported = i915_reset_engine_count(&i915->gpu_error, engine); - reported -= threads[engine->id].resets; - if (reported != count) { - pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n", - engine->name, test_name, count, reported); - if (!err) - err = -EINVAL; - } - -unwind: - for_each_engine(other, i915, tmp) { - int ret; - - if (!threads[tmp].task) - continue; - - ret = kthread_stop(threads[tmp].task); - if (ret) { - pr_err("kthread for other engine %s failed, err=%d\n", - other->name, ret); - if (!err) - err = ret; - } - put_task_struct(threads[tmp].task); - - if (other != engine && - threads[tmp].resets != - i915_reset_engine_count(&i915->gpu_error, other)) { - pr_err("Innocent engine %s was reset (count=%ld)\n", - other->name, - i915_reset_engine_count(&i915->gpu_error, - other) - - threads[tmp].resets); - if (!err) - err = -EINVAL; - } - } - - if (global != i915_reset_count(&i915->gpu_error)) { - pr_err("Global reset (count=%ld)!\n", - i915_reset_count(&i915->gpu_error) - global); - if (!err) - err = -EINVAL; - } - - if (err) - break; - - err = igt_flush_test(i915, 0); - if (err) - break; - } - - if (i915_reset_failed(i915)) - err = -EIO; - - if (flags & TEST_ACTIVE) { - mutex_lock(&i915->drm.struct_mutex); - hang_fini(&h); - mutex_unlock(&i915->drm.struct_mutex); - } - - return err; -} - -static int igt_reset_engines(void *arg) -{ - static const struct { - const char *name; - unsigned int flags; - } phases[] = { - { "idle", 0 }, - { "active", TEST_ACTIVE }, - { "others-idle", TEST_OTHERS }, - { "others-active", TEST_OTHERS | TEST_ACTIVE }, - { - "others-priority", - TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY - }, - { - "self-priority", - TEST_OTHERS | TEST_ACTIVE | TEST_PRIORITY | TEST_SELF, - }, - { } - }; - struct drm_i915_private *i915 = arg; - typeof(*phases) *p; - int err; - - for (p = phases; p->name; p++) { - if (p->flags & TEST_PRIORITY) { - if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) - continue; - } - - err = __igt_reset_engines(arg, p->name, p->flags); - if (err) - return err; - } - - return 0; -} - -static u32 fake_hangcheck(struct drm_i915_private *i915, - intel_engine_mask_t mask) -{ - u32 count = i915_reset_count(&i915->gpu_error); - - i915_reset(i915, mask, NULL); - - return count; -} - -static int igt_reset_wait(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_request *rq; - unsigned int reset_count; - struct hang h; - long timeout; - int err; - - if (!intel_engine_can_store_dword(i915->engine[RCS0])) - return 0; - - /* Check that we detect a stuck waiter and issue a reset */ - - igt_global_reset_lock(i915); - - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - if (err) - goto unlock; - - rq = hang_create_request(&h, i915->engine[RCS0]); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto fini; - } - - i915_request_get(rq); - i915_request_add(rq); - - if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("%s: Failed to start request %llx, at %x\n", - __func__, rq->fence.seqno, hws_seqno(&h, rq)); - intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - - i915_gem_set_wedged(i915); - - err = -EIO; - goto out_rq; - } - - reset_count = fake_hangcheck(i915, ALL_ENGINES); - - timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10); - if (timeout < 0) { - pr_err("i915_request_wait failed on a stuck request: err=%ld\n", - timeout); - err = timeout; - goto out_rq; - } - - if (i915_reset_count(&i915->gpu_error) == reset_count) { - pr_err("No GPU reset recorded!\n"); - err = -EINVAL; - goto out_rq; - } - -out_rq: - i915_request_put(rq); -fini: - hang_fini(&h); -unlock: - mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_unlock(i915); - - if (i915_reset_failed(i915)) - return -EIO; - - return err; -} - -struct evict_vma { - struct completion completion; - struct i915_vma *vma; -}; - -static int evict_vma(void *data) -{ - struct evict_vma *arg = data; - struct i915_address_space *vm = arg->vma->vm; - struct drm_i915_private *i915 = vm->i915; - struct drm_mm_node evict = arg->vma->node; - int err; - - complete(&arg->completion); - - mutex_lock(&i915->drm.struct_mutex); - err = i915_gem_evict_for_node(vm, &evict, 0); - mutex_unlock(&i915->drm.struct_mutex); - - return err; -} - -static int evict_fence(void *data) -{ - struct evict_vma *arg = data; - struct drm_i915_private *i915 = arg->vma->vm->i915; - int err; - - complete(&arg->completion); - - mutex_lock(&i915->drm.struct_mutex); - - /* Mark the fence register as dirty to force the mmio update. */ - err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512); - if (err) { - pr_err("Invalid Y-tiling settings; err:%d\n", err); - goto out_unlock; - } - - err = i915_vma_pin_fence(arg->vma); - if (err) { - pr_err("Unable to pin Y-tiled fence; err:%d\n", err); - goto out_unlock; - } - - i915_vma_unpin_fence(arg->vma); - -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); - - return err; -} - -static int __igt_reset_evict_vma(struct drm_i915_private *i915, - struct i915_address_space *vm, - int (*fn)(void *), - unsigned int flags) -{ - struct drm_i915_gem_object *obj; - struct task_struct *tsk = NULL; - struct i915_request *rq; - struct evict_vma arg; - struct hang h; - int err; - - if (!intel_engine_can_store_dword(i915->engine[RCS0])) - return 0; - - /* Check that we can recover an unbind stuck on a hanging request */ - - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - if (err) - goto unlock; - - obj = i915_gem_object_create_internal(i915, SZ_1M); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto fini; - } - - if (flags & EXEC_OBJECT_NEEDS_FENCE) { - err = i915_gem_object_set_tiling(obj, I915_TILING_X, 512); - if (err) { - pr_err("Invalid X-tiling settings; err:%d\n", err); - goto out_obj; - } - } - - arg.vma = i915_vma_instance(obj, vm, NULL); - if (IS_ERR(arg.vma)) { - err = PTR_ERR(arg.vma); - goto out_obj; - } - - rq = hang_create_request(&h, i915->engine[RCS0]); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out_obj; - } - - err = i915_vma_pin(arg.vma, 0, 0, - i915_vma_is_ggtt(arg.vma) ? - PIN_GLOBAL | PIN_MAPPABLE : - PIN_USER); - if (err) { - i915_request_add(rq); - goto out_obj; - } - - if (flags & EXEC_OBJECT_NEEDS_FENCE) { - err = i915_vma_pin_fence(arg.vma); - if (err) { - pr_err("Unable to pin X-tiled fence; err:%d\n", err); - i915_vma_unpin(arg.vma); - i915_request_add(rq); - goto out_obj; - } - } - - err = i915_vma_move_to_active(arg.vma, rq, flags); - - if (flags & EXEC_OBJECT_NEEDS_FENCE) - i915_vma_unpin_fence(arg.vma); - i915_vma_unpin(arg.vma); - - i915_request_get(rq); - i915_request_add(rq); - if (err) - goto out_rq; - - mutex_unlock(&i915->drm.struct_mutex); - - if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("%s: Failed to start request %llx, at %x\n", - __func__, rq->fence.seqno, hws_seqno(&h, rq)); - intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - - i915_gem_set_wedged(i915); - goto out_reset; - } - - init_completion(&arg.completion); - - tsk = kthread_run(fn, &arg, "igt/evict_vma"); - if (IS_ERR(tsk)) { - err = PTR_ERR(tsk); - tsk = NULL; - goto out_reset; - } - get_task_struct(tsk); - - wait_for_completion(&arg.completion); - - if (wait_for(!list_empty(&rq->fence.cb_list), 10)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("igt/evict_vma kthread did not wait\n"); - intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - - i915_gem_set_wedged(i915); - goto out_reset; - } - -out_reset: - igt_global_reset_lock(i915); - fake_hangcheck(rq->i915, rq->engine->mask); - igt_global_reset_unlock(i915); - - if (tsk) { - struct igt_wedge_me w; - - /* The reset, even indirectly, should take less than 10ms. */ - igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) - err = kthread_stop(tsk); - - put_task_struct(tsk); - } - - mutex_lock(&i915->drm.struct_mutex); -out_rq: - i915_request_put(rq); -out_obj: - i915_gem_object_put(obj); -fini: - hang_fini(&h); -unlock: - mutex_unlock(&i915->drm.struct_mutex); - - if (i915_reset_failed(i915)) - return -EIO; - - return err; -} - -static int igt_reset_evict_ggtt(void *arg) -{ - struct drm_i915_private *i915 = arg; - - return __igt_reset_evict_vma(i915, &i915->ggtt.vm, - evict_vma, EXEC_OBJECT_WRITE); -} - -static int igt_reset_evict_ppgtt(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_gem_context *ctx; - struct drm_file *file; - int err; - - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - mutex_lock(&i915->drm.struct_mutex); - ctx = live_context(i915, file); - mutex_unlock(&i915->drm.struct_mutex); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out; - } - - err = 0; - if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */ - err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm, - evict_vma, EXEC_OBJECT_WRITE); - -out: - mock_file_free(i915, file); - return err; -} - -static int igt_reset_evict_fence(void *arg) -{ - struct drm_i915_private *i915 = arg; - - return __igt_reset_evict_vma(i915, &i915->ggtt.vm, - evict_fence, EXEC_OBJECT_NEEDS_FENCE); -} - -static int wait_for_others(struct drm_i915_private *i915, - struct intel_engine_cs *exclude) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) { - if (engine == exclude) - continue; - - if (!wait_for_idle(engine)) - return -EIO; - } - - return 0; -} - -static int igt_reset_queue(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - enum intel_engine_id id; - struct hang h; - int err; - - /* Check that we replay pending requests following a hang */ - - igt_global_reset_lock(i915); - - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - if (err) - goto unlock; - - for_each_engine(engine, i915, id) { - struct i915_request *prev; - IGT_TIMEOUT(end_time); - unsigned int count; - - if (!intel_engine_can_store_dword(engine)) - continue; - - prev = hang_create_request(&h, engine); - if (IS_ERR(prev)) { - err = PTR_ERR(prev); - goto fini; - } - - i915_request_get(prev); - i915_request_add(prev); - - count = 0; - do { - struct i915_request *rq; - unsigned int reset_count; - - rq = hang_create_request(&h, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto fini; - } - - i915_request_get(rq); - i915_request_add(rq); - - /* - * XXX We don't handle resetting the kernel context - * very well. If we trigger a device reset twice in - * quick succession while the kernel context is - * executing, we may end up skipping the breadcrumb. - * This is really only a problem for the selftest as - * normally there is a large interlude between resets - * (hangcheck), or we focus on resetting just one - * engine and so avoid repeatedly resetting innocents. - */ - err = wait_for_others(i915, engine); - if (err) { - pr_err("%s(%s): Failed to idle other inactive engines after device reset\n", - __func__, engine->name); - i915_request_put(rq); - i915_request_put(prev); - - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - goto fini; - } - - if (!wait_until_running(&h, prev)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("%s(%s): Failed to start request %llx, at %x\n", - __func__, engine->name, - prev->fence.seqno, hws_seqno(&h, prev)); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - i915_request_put(rq); - i915_request_put(prev); - - i915_gem_set_wedged(i915); - - err = -EIO; - goto fini; - } - - reset_count = fake_hangcheck(i915, BIT(id)); - - if (prev->fence.error != -EIO) { - pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n", - prev->fence.error); - i915_request_put(rq); - i915_request_put(prev); - err = -EINVAL; - goto fini; - } - - if (rq->fence.error) { - pr_err("Fence error status not zero [%d] after unrelated reset\n", - rq->fence.error); - i915_request_put(rq); - i915_request_put(prev); - err = -EINVAL; - goto fini; - } - - if (i915_reset_count(&i915->gpu_error) == reset_count) { - pr_err("No GPU reset recorded!\n"); - i915_request_put(rq); - i915_request_put(prev); - err = -EINVAL; - goto fini; - } - - i915_request_put(prev); - prev = rq; - count++; - } while (time_before(jiffies, end_time)); - pr_info("%s: Completed %d resets\n", engine->name, count); - - *h.batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); - - i915_request_put(prev); - - err = igt_flush_test(i915, I915_WAIT_LOCKED); - if (err) - break; - } - -fini: - hang_fini(&h); -unlock: - mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_unlock(i915); - - if (i915_reset_failed(i915)) - return -EIO; - - return err; -} - -static int igt_handle_error(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine = i915->engine[RCS0]; - struct hang h; - struct i915_request *rq; - struct i915_gpu_state *error; - int err; - - /* Check that we can issue a global GPU and engine reset */ - - if (!intel_has_reset_engine(i915)) - return 0; - - if (!engine || !intel_engine_can_store_dword(engine)) - return 0; - - mutex_lock(&i915->drm.struct_mutex); - - err = hang_init(&h, i915); - if (err) - goto err_unlock; - - rq = hang_create_request(&h, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_fini; - } - - i915_request_get(rq); - i915_request_add(rq); - - if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("%s: Failed to start request %llx, at %x\n", - __func__, rq->fence.seqno, hws_seqno(&h, rq)); - intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - - i915_gem_set_wedged(i915); - - err = -EIO; - goto err_request; - } - - mutex_unlock(&i915->drm.struct_mutex); - - /* Temporarily disable error capture */ - error = xchg(&i915->gpu_error.first_error, (void *)-1); - - i915_handle_error(i915, engine->mask, 0, NULL); - - xchg(&i915->gpu_error.first_error, error); - - mutex_lock(&i915->drm.struct_mutex); - - if (rq->fence.error != -EIO) { - pr_err("Guilty request not identified!\n"); - err = -EINVAL; - goto err_request; - } - -err_request: - i915_request_put(rq); -err_fini: - hang_fini(&h); -err_unlock: - mutex_unlock(&i915->drm.struct_mutex); - return err; -} - -static void __preempt_begin(void) -{ - preempt_disable(); -} - -static void __preempt_end(void) -{ - preempt_enable(); -} - -static void __softirq_begin(void) -{ - local_bh_disable(); -} - -static void __softirq_end(void) -{ - local_bh_enable(); -} - -static void __hardirq_begin(void) -{ - local_irq_disable(); -} - -static void __hardirq_end(void) -{ - local_irq_enable(); -} - -struct atomic_section { - const char *name; - void (*critical_section_begin)(void); - void (*critical_section_end)(void); -}; - -static int __igt_atomic_reset_engine(struct intel_engine_cs *engine, - const struct atomic_section *p, - const char *mode) -{ - struct tasklet_struct * const t = &engine->execlists.tasklet; - int err; - - GEM_TRACE("i915_reset_engine(%s:%s) under %s\n", - engine->name, mode, p->name); - - tasklet_disable_nosync(t); - p->critical_section_begin(); - - err = i915_reset_engine(engine, NULL); - - p->critical_section_end(); - tasklet_enable(t); - - if (err) - pr_err("i915_reset_engine(%s:%s) failed under %s\n", - engine->name, mode, p->name); - - return err; -} - -static int igt_atomic_reset_engine(struct intel_engine_cs *engine, - const struct atomic_section *p) -{ - struct drm_i915_private *i915 = engine->i915; - struct i915_request *rq; - struct hang h; - int err; - - err = __igt_atomic_reset_engine(engine, p, "idle"); - if (err) - return err; - - err = hang_init(&h, i915); - if (err) - return err; - - rq = hang_create_request(&h, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out; - } - - i915_request_get(rq); - i915_request_add(rq); - - if (wait_until_running(&h, rq)) { - err = __igt_atomic_reset_engine(engine, p, "active"); - } else { - pr_err("%s(%s): Failed to start request %llx, at %x\n", - __func__, engine->name, - rq->fence.seqno, hws_seqno(&h, rq)); - i915_gem_set_wedged(i915); - err = -EIO; - } - - if (err == 0) { - struct igt_wedge_me w; - - igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/) - i915_request_wait(rq, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (i915_reset_failed(i915)) - err = -EIO; - } - - i915_request_put(rq); -out: - hang_fini(&h); - return err; -} - -static void force_reset(struct drm_i915_private *i915) -{ - i915_gem_set_wedged(i915); - i915_reset(i915, 0, NULL); -} - -static int igt_atomic_reset(void *arg) -{ - static const struct atomic_section phases[] = { - { "preempt", __preempt_begin, __preempt_end }, - { "softirq", __softirq_begin, __softirq_end }, - { "hardirq", __hardirq_begin, __hardirq_end }, - { } - }; - struct drm_i915_private *i915 = arg; - intel_wakeref_t wakeref; - int err = 0; - - /* Check that the resets are usable from atomic context */ - - if (USES_GUC_SUBMISSION(i915)) - return 0; /* guc is dead; long live the guc */ - - igt_global_reset_lock(i915); - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - /* Flush any requests before we get started and check basics */ - force_reset(i915); - if (i915_reset_failed(i915)) - goto unlock; - - if (intel_has_gpu_reset(i915)) { - const typeof(*phases) *p; - - for (p = phases; p->name; p++) { - GEM_TRACE("intel_gpu_reset under %s\n", p->name); - - p->critical_section_begin(); - err = intel_gpu_reset(i915, ALL_ENGINES); - p->critical_section_end(); - - if (err) { - pr_err("intel_gpu_reset failed under %s\n", - p->name); - goto out; - } - } - - force_reset(i915); - } - - if (intel_has_reset_engine(i915)) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) { - const typeof(*phases) *p; - - for (p = phases; p->name; p++) { - err = igt_atomic_reset_engine(engine, p); - if (err) - goto out; - } - } - } - -out: - /* As we poke around the guts, do a full reset before continuing. */ - force_reset(i915); - -unlock: - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_unlock(i915); - - return err; -} - -int intel_hangcheck_live_selftests(struct drm_i915_private *i915) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_global_reset), /* attempt to recover GPU first */ - SUBTEST(igt_wedged_reset), - SUBTEST(igt_hang_sanitycheck), - SUBTEST(igt_reset_nop), - SUBTEST(igt_reset_nop_engine), - SUBTEST(igt_reset_idle_engine), - SUBTEST(igt_reset_active_engine), - SUBTEST(igt_reset_engines), - SUBTEST(igt_reset_queue), - SUBTEST(igt_reset_wait), - SUBTEST(igt_reset_evict_ggtt), - SUBTEST(igt_reset_evict_ppgtt), - SUBTEST(igt_reset_evict_fence), - SUBTEST(igt_handle_error), - SUBTEST(igt_atomic_reset), - }; - intel_wakeref_t wakeref; - bool saved_hangcheck; - int err; - - if (!intel_has_gpu_reset(i915)) - return 0; - - if (i915_terminally_wedged(i915)) - return -EIO; /* we're long past hope of a successful reset */ - - wakeref = intel_runtime_pm_get(i915); - saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); - drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */ - - err = i915_subtests(tests, i915); - - mutex_lock(&i915->drm.struct_mutex); - igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); - - i915_modparams.enable_hangcheck = saved_hangcheck; - intel_runtime_pm_put(i915, wakeref); - - return err; -} diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c deleted file mode 100644 index e8b0b5dbcb2c..000000000000 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ /dev/null @@ -1,1330 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - */ - -#include <linux/prime_numbers.h> - -#include "../i915_reset.h" - -#include "../i915_selftest.h" -#include "igt_flush_test.h" -#include "igt_live_test.h" -#include "igt_spinner.h" -#include "i915_random.h" - -#include "mock_context.h" - -static int live_sanitycheck(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct i915_gem_context *ctx; - enum intel_engine_id id; - struct igt_spinner spin; - intel_wakeref_t wakeref; - int err = -ENOMEM; - - if (!HAS_LOGICAL_RING_CONTEXTS(i915)) - return 0; - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (igt_spinner_init(&spin, i915)) - goto err_unlock; - - ctx = kernel_context(i915); - if (!ctx) - goto err_spin; - - for_each_engine(engine, i915, id) { - struct i915_request *rq; - - rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ctx; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin, rq)) { - GEM_TRACE("spinner failed to start\n"); - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_ctx; - } - - igt_spinner_end(&spin); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) { - err = -EIO; - goto err_ctx; - } - } - - err = 0; -err_ctx: - kernel_context_close(ctx); -err_spin: - igt_spinner_fini(&spin); -err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; -} - -static int live_busywait_preempt(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_gem_context *ctx_hi, *ctx_lo; - struct intel_engine_cs *engine; - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = -ENOMEM; - u32 *map; - - /* - * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can - * preempt the busywaits used to synchronise between rings. - */ - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - ctx_hi = kernel_context(i915); - if (!ctx_hi) - goto err_unlock; - ctx_hi->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); - - ctx_lo = kernel_context(i915); - if (!ctx_lo) - goto err_ctx_hi; - ctx_lo->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); - - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto err_ctx_lo; - } - - map = i915_gem_object_pin_map(obj, I915_MAP_WC); - if (IS_ERR(map)) { - err = PTR_ERR(map); - goto err_obj; - } - - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_map; - } - - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); - if (err) - goto err_map; - - for_each_engine(engine, i915, id) { - struct i915_request *lo, *hi; - struct igt_live_test t; - u32 *cs; - - if (!intel_engine_can_store_dword(engine)) - continue; - - if (igt_live_test_begin(&t, i915, __func__, engine->name)) { - err = -EIO; - goto err_vma; - } - - /* - * We create two requests. The low priority request - * busywaits on a semaphore (inside the ringbuffer where - * is should be preemptible) and the high priority requests - * uses a MI_STORE_DWORD_IMM to update the semaphore value - * allowing the first request to complete. If preemption - * fails, we hang instead. - */ - - lo = i915_request_alloc(engine, ctx_lo); - if (IS_ERR(lo)) { - err = PTR_ERR(lo); - goto err_vma; - } - - cs = intel_ring_begin(lo, 8); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - i915_request_add(lo); - goto err_vma; - } - - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = i915_ggtt_offset(vma); - *cs++ = 0; - *cs++ = 1; - - /* XXX Do we need a flush + invalidate here? */ - - *cs++ = MI_SEMAPHORE_WAIT | - MI_SEMAPHORE_GLOBAL_GTT | - MI_SEMAPHORE_POLL | - MI_SEMAPHORE_SAD_EQ_SDD; - *cs++ = 0; - *cs++ = i915_ggtt_offset(vma); - *cs++ = 0; - - intel_ring_advance(lo, cs); - i915_request_add(lo); - - if (wait_for(READ_ONCE(*map), 10)) { - err = -ETIMEDOUT; - goto err_vma; - } - - /* Low priority request should be busywaiting now */ - if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) { - pr_err("%s: Busywaiting request did not!\n", - engine->name); - err = -EIO; - goto err_vma; - } - - hi = i915_request_alloc(engine, ctx_hi); - if (IS_ERR(hi)) { - err = PTR_ERR(hi); - goto err_vma; - } - - cs = intel_ring_begin(hi, 4); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - i915_request_add(hi); - goto err_vma; - } - - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = i915_ggtt_offset(vma); - *cs++ = 0; - *cs++ = 0; - - intel_ring_advance(hi, cs); - i915_request_add(hi); - - if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) { - struct drm_printer p = drm_info_printer(i915->drm.dev); - - pr_err("%s: Failed to preempt semaphore busywait!\n", - engine->name); - - intel_engine_dump(engine, &p, "%s\n", engine->name); - GEM_TRACE_DUMP(); - - i915_gem_set_wedged(i915); - err = -EIO; - goto err_vma; - } - GEM_BUG_ON(READ_ONCE(*map)); - - if (igt_live_test_end(&t)) { - err = -EIO; - goto err_vma; - } - } - - err = 0; -err_vma: - i915_vma_unpin(vma); -err_map: - i915_gem_object_unpin_map(obj); -err_obj: - i915_gem_object_put(obj); -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); -err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; -} - -static int live_preempt(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_gem_context *ctx_hi, *ctx_lo; - struct igt_spinner spin_hi, spin_lo; - struct intel_engine_cs *engine; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = -ENOMEM; - - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) - return 0; - - if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) - pr_err("Logical preemption supported, but not exposed\n"); - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (igt_spinner_init(&spin_hi, i915)) - goto err_unlock; - - if (igt_spinner_init(&spin_lo, i915)) - goto err_spin_hi; - - ctx_hi = kernel_context(i915); - if (!ctx_hi) - goto err_spin_lo; - ctx_hi->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); - - ctx_lo = kernel_context(i915); - if (!ctx_lo) - goto err_ctx_hi; - ctx_lo->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); - - for_each_engine(engine, i915, id) { - struct igt_live_test t; - struct i915_request *rq; - - if (!intel_engine_has_preemption(engine)) - continue; - - if (igt_live_test_begin(&t, i915, __func__, engine->name)) { - err = -EIO; - goto err_ctx_lo; - } - - rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin_lo, rq)) { - GEM_TRACE("lo spinner failed to start\n"); - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_ctx_lo; - } - - rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) { - igt_spinner_end(&spin_lo); - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin_hi, rq)) { - GEM_TRACE("hi spinner failed to start\n"); - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_ctx_lo; - } - - igt_spinner_end(&spin_hi); - igt_spinner_end(&spin_lo); - - if (igt_live_test_end(&t)) { - err = -EIO; - goto err_ctx_lo; - } - } - - err = 0; -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); -err_spin_lo: - igt_spinner_fini(&spin_lo); -err_spin_hi: - igt_spinner_fini(&spin_hi); -err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; -} - -static int live_late_preempt(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_gem_context *ctx_hi, *ctx_lo; - struct igt_spinner spin_hi, spin_lo; - struct intel_engine_cs *engine; - struct i915_sched_attr attr = {}; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = -ENOMEM; - - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) - return 0; - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (igt_spinner_init(&spin_hi, i915)) - goto err_unlock; - - if (igt_spinner_init(&spin_lo, i915)) - goto err_spin_hi; - - ctx_hi = kernel_context(i915); - if (!ctx_hi) - goto err_spin_lo; - - ctx_lo = kernel_context(i915); - if (!ctx_lo) - goto err_ctx_hi; - - for_each_engine(engine, i915, id) { - struct igt_live_test t; - struct i915_request *rq; - - if (!intel_engine_has_preemption(engine)) - continue; - - if (igt_live_test_begin(&t, i915, __func__, engine->name)) { - err = -EIO; - goto err_ctx_lo; - } - - rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin_lo, rq)) { - pr_err("First context failed to start\n"); - goto err_wedged; - } - - rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, - MI_NOOP); - if (IS_ERR(rq)) { - igt_spinner_end(&spin_lo); - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (igt_wait_for_spinner(&spin_hi, rq)) { - pr_err("Second context overtook first?\n"); - goto err_wedged; - } - - attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); - engine->schedule(rq, &attr); - - if (!igt_wait_for_spinner(&spin_hi, rq)) { - pr_err("High priority context failed to preempt the low priority context\n"); - GEM_TRACE_DUMP(); - goto err_wedged; - } - - igt_spinner_end(&spin_hi); - igt_spinner_end(&spin_lo); - - if (igt_live_test_end(&t)) { - err = -EIO; - goto err_ctx_lo; - } - } - - err = 0; -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); -err_spin_lo: - igt_spinner_fini(&spin_lo); -err_spin_hi: - igt_spinner_fini(&spin_hi); -err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; - -err_wedged: - igt_spinner_end(&spin_hi); - igt_spinner_end(&spin_lo); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_ctx_lo; -} - -struct preempt_client { - struct igt_spinner spin; - struct i915_gem_context *ctx; -}; - -static int preempt_client_init(struct drm_i915_private *i915, - struct preempt_client *c) -{ - c->ctx = kernel_context(i915); - if (!c->ctx) - return -ENOMEM; - - if (igt_spinner_init(&c->spin, i915)) - goto err_ctx; - - return 0; - -err_ctx: - kernel_context_close(c->ctx); - return -ENOMEM; -} - -static void preempt_client_fini(struct preempt_client *c) -{ - igt_spinner_fini(&c->spin); - kernel_context_close(c->ctx); -} - -static int live_suppress_self_preempt(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct i915_sched_attr attr = { - .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX) - }; - struct preempt_client a, b; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = -ENOMEM; - - /* - * Verify that if a preemption request does not cause a change in - * the current execution order, the preempt-to-idle injection is - * skipped and that we do not accidentally apply it after the CS - * completion event. - */ - - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) - return 0; - - if (USES_GUC_SUBMISSION(i915)) - return 0; /* presume black blox */ - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (preempt_client_init(i915, &a)) - goto err_unlock; - if (preempt_client_init(i915, &b)) - goto err_client_a; - - for_each_engine(engine, i915, id) { - struct i915_request *rq_a, *rq_b; - int depth; - - if (!intel_engine_has_preemption(engine)) - continue; - - engine->execlists.preempt_hang.count = 0; - - rq_a = igt_spinner_create_request(&a.spin, - a.ctx, engine, - MI_NOOP); - if (IS_ERR(rq_a)) { - err = PTR_ERR(rq_a); - goto err_client_b; - } - - i915_request_add(rq_a); - if (!igt_wait_for_spinner(&a.spin, rq_a)) { - pr_err("First client failed to start\n"); - goto err_wedged; - } - - for (depth = 0; depth < 8; depth++) { - rq_b = igt_spinner_create_request(&b.spin, - b.ctx, engine, - MI_NOOP); - if (IS_ERR(rq_b)) { - err = PTR_ERR(rq_b); - goto err_client_b; - } - i915_request_add(rq_b); - - GEM_BUG_ON(i915_request_completed(rq_a)); - engine->schedule(rq_a, &attr); - igt_spinner_end(&a.spin); - - if (!igt_wait_for_spinner(&b.spin, rq_b)) { - pr_err("Second client failed to start\n"); - goto err_wedged; - } - - swap(a, b); - rq_a = rq_b; - } - igt_spinner_end(&a.spin); - - if (engine->execlists.preempt_hang.count) { - pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n", - engine->execlists.preempt_hang.count, - depth); - err = -EINVAL; - goto err_client_b; - } - - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - goto err_wedged; - } - - err = 0; -err_client_b: - preempt_client_fini(&b); -err_client_a: - preempt_client_fini(&a); -err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; - -err_wedged: - igt_spinner_end(&b.spin); - igt_spinner_end(&a.spin); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_client_b; -} - -static int __i915_sw_fence_call -dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) -{ - return NOTIFY_DONE; -} - -static struct i915_request *dummy_request(struct intel_engine_cs *engine) -{ - struct i915_request *rq; - - rq = kzalloc(sizeof(*rq), GFP_KERNEL); - if (!rq) - return NULL; - - INIT_LIST_HEAD(&rq->active_list); - rq->engine = engine; - - i915_sched_node_init(&rq->sched); - - /* mark this request as permanently incomplete */ - rq->fence.seqno = 1; - BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */ - rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1; - GEM_BUG_ON(i915_request_completed(rq)); - - i915_sw_fence_init(&rq->submit, dummy_notify); - i915_sw_fence_commit(&rq->submit); - - return rq; -} - -static void dummy_request_free(struct i915_request *dummy) -{ - i915_request_mark_complete(dummy); - i915_sched_node_fini(&dummy->sched); - i915_sw_fence_fini(&dummy->submit); - - dma_fence_free(&dummy->fence); -} - -static int live_suppress_wait_preempt(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct preempt_client client[4]; - struct intel_engine_cs *engine; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = -ENOMEM; - int i; - - /* - * Waiters are given a little priority nudge, but not enough - * to actually cause any preemption. Double check that we do - * not needlessly generate preempt-to-idle cycles. - */ - - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) - return 0; - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (preempt_client_init(i915, &client[0])) /* ELSP[0] */ - goto err_unlock; - if (preempt_client_init(i915, &client[1])) /* ELSP[1] */ - goto err_client_0; - if (preempt_client_init(i915, &client[2])) /* head of queue */ - goto err_client_1; - if (preempt_client_init(i915, &client[3])) /* bystander */ - goto err_client_2; - - for_each_engine(engine, i915, id) { - int depth; - - if (!intel_engine_has_preemption(engine)) - continue; - - if (!engine->emit_init_breadcrumb) - continue; - - for (depth = 0; depth < ARRAY_SIZE(client); depth++) { - struct i915_request *rq[ARRAY_SIZE(client)]; - struct i915_request *dummy; - - engine->execlists.preempt_hang.count = 0; - - dummy = dummy_request(engine); - if (!dummy) - goto err_client_3; - - for (i = 0; i < ARRAY_SIZE(client); i++) { - rq[i] = igt_spinner_create_request(&client[i].spin, - client[i].ctx, engine, - MI_NOOP); - if (IS_ERR(rq[i])) { - err = PTR_ERR(rq[i]); - goto err_wedged; - } - - /* Disable NEWCLIENT promotion */ - __i915_active_request_set(&rq[i]->timeline->last_request, - dummy); - i915_request_add(rq[i]); - } - - dummy_request_free(dummy); - - GEM_BUG_ON(i915_request_completed(rq[0])); - if (!igt_wait_for_spinner(&client[0].spin, rq[0])) { - pr_err("%s: First client failed to start\n", - engine->name); - goto err_wedged; - } - GEM_BUG_ON(!i915_request_started(rq[0])); - - if (i915_request_wait(rq[depth], - I915_WAIT_LOCKED | - I915_WAIT_PRIORITY, - 1) != -ETIME) { - pr_err("%s: Waiter depth:%d completed!\n", - engine->name, depth); - goto err_wedged; - } - - for (i = 0; i < ARRAY_SIZE(client); i++) - igt_spinner_end(&client[i].spin); - - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - goto err_wedged; - - if (engine->execlists.preempt_hang.count) { - pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n", - engine->name, - engine->execlists.preempt_hang.count, - depth); - err = -EINVAL; - goto err_client_3; - } - } - } - - err = 0; -err_client_3: - preempt_client_fini(&client[3]); -err_client_2: - preempt_client_fini(&client[2]); -err_client_1: - preempt_client_fini(&client[1]); -err_client_0: - preempt_client_fini(&client[0]); -err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; - -err_wedged: - for (i = 0; i < ARRAY_SIZE(client); i++) - igt_spinner_end(&client[i].spin); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_client_3; -} - -static int live_chain_preempt(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct preempt_client hi, lo; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = -ENOMEM; - - /* - * Build a chain AB...BA between two contexts (A, B) and request - * preemption of the last request. It should then complete before - * the previously submitted spinner in B. - */ - - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) - return 0; - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (preempt_client_init(i915, &hi)) - goto err_unlock; - - if (preempt_client_init(i915, &lo)) - goto err_client_hi; - - for_each_engine(engine, i915, id) { - struct i915_sched_attr attr = { - .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), - }; - struct igt_live_test t; - struct i915_request *rq; - int ring_size, count, i; - - if (!intel_engine_has_preemption(engine)) - continue; - - rq = igt_spinner_create_request(&lo.spin, - lo.ctx, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) - goto err_wedged; - i915_request_add(rq); - - ring_size = rq->wa_tail - rq->head; - if (ring_size < 0) - ring_size += rq->ring->size; - ring_size = rq->ring->size / ring_size; - pr_debug("%s(%s): Using maximum of %d requests\n", - __func__, engine->name, ring_size); - - igt_spinner_end(&lo.spin); - if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) { - pr_err("Timed out waiting to flush %s\n", engine->name); - goto err_wedged; - } - - if (igt_live_test_begin(&t, i915, __func__, engine->name)) { - err = -EIO; - goto err_wedged; - } - - for_each_prime_number_from(count, 1, ring_size) { - rq = igt_spinner_create_request(&hi.spin, - hi.ctx, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) - goto err_wedged; - i915_request_add(rq); - if (!igt_wait_for_spinner(&hi.spin, rq)) - goto err_wedged; - - rq = igt_spinner_create_request(&lo.spin, - lo.ctx, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) - goto err_wedged; - i915_request_add(rq); - - for (i = 0; i < count; i++) { - rq = i915_request_alloc(engine, lo.ctx); - if (IS_ERR(rq)) - goto err_wedged; - i915_request_add(rq); - } - - rq = i915_request_alloc(engine, hi.ctx); - if (IS_ERR(rq)) - goto err_wedged; - i915_request_add(rq); - engine->schedule(rq, &attr); - - igt_spinner_end(&hi.spin); - if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { - struct drm_printer p = - drm_info_printer(i915->drm.dev); - - pr_err("Failed to preempt over chain of %d\n", - count); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - goto err_wedged; - } - igt_spinner_end(&lo.spin); - - rq = i915_request_alloc(engine, lo.ctx); - if (IS_ERR(rq)) - goto err_wedged; - i915_request_add(rq); - if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { - struct drm_printer p = - drm_info_printer(i915->drm.dev); - - pr_err("Failed to flush low priority chain of %d requests\n", - count); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - goto err_wedged; - } - } - - if (igt_live_test_end(&t)) { - err = -EIO; - goto err_wedged; - } - } - - err = 0; -err_client_lo: - preempt_client_fini(&lo); -err_client_hi: - preempt_client_fini(&hi); -err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; - -err_wedged: - igt_spinner_end(&hi.spin); - igt_spinner_end(&lo.spin); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_client_lo; -} - -static int live_preempt_hang(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_gem_context *ctx_hi, *ctx_lo; - struct igt_spinner spin_hi, spin_lo; - struct intel_engine_cs *engine; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = -ENOMEM; - - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) - return 0; - - if (!intel_has_reset_engine(i915)) - return 0; - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (igt_spinner_init(&spin_hi, i915)) - goto err_unlock; - - if (igt_spinner_init(&spin_lo, i915)) - goto err_spin_hi; - - ctx_hi = kernel_context(i915); - if (!ctx_hi) - goto err_spin_lo; - ctx_hi->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); - - ctx_lo = kernel_context(i915); - if (!ctx_lo) - goto err_ctx_hi; - ctx_lo->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); - - for_each_engine(engine, i915, id) { - struct i915_request *rq; - - if (!intel_engine_has_preemption(engine)) - continue; - - rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin_lo, rq)) { - GEM_TRACE("lo spinner failed to start\n"); - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_ctx_lo; - } - - rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) { - igt_spinner_end(&spin_lo); - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - init_completion(&engine->execlists.preempt_hang.completion); - engine->execlists.preempt_hang.inject_hang = true; - - i915_request_add(rq); - - if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion, - HZ / 10)) { - pr_err("Preemption did not occur within timeout!"); - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_ctx_lo; - } - - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - i915_reset_engine(engine, NULL); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - - engine->execlists.preempt_hang.inject_hang = false; - - if (!igt_wait_for_spinner(&spin_hi, rq)) { - GEM_TRACE("hi spinner failed to start\n"); - GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); - err = -EIO; - goto err_ctx_lo; - } - - igt_spinner_end(&spin_hi); - igt_spinner_end(&spin_lo); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) { - err = -EIO; - goto err_ctx_lo; - } - } - - err = 0; -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); -err_spin_lo: - igt_spinner_fini(&spin_lo); -err_spin_hi: - igt_spinner_fini(&spin_hi); -err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; -} - -static int random_range(struct rnd_state *rnd, int min, int max) -{ - return i915_prandom_u32_max_state(max - min, rnd) + min; -} - -static int random_priority(struct rnd_state *rnd) -{ - return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX); -} - -struct preempt_smoke { - struct drm_i915_private *i915; - struct i915_gem_context **contexts; - struct intel_engine_cs *engine; - struct drm_i915_gem_object *batch; - unsigned int ncontext; - struct rnd_state prng; - unsigned long count; -}; - -static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke) -{ - return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext, - &smoke->prng)]; -} - -static int smoke_submit(struct preempt_smoke *smoke, - struct i915_gem_context *ctx, int prio, - struct drm_i915_gem_object *batch) -{ - struct i915_request *rq; - struct i915_vma *vma = NULL; - int err = 0; - - if (batch) { - vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return err; - } - - ctx->sched.priority = prio; - - rq = i915_request_alloc(smoke->engine, ctx); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto unpin; - } - - if (vma) { - err = rq->engine->emit_bb_start(rq, - vma->node.start, - PAGE_SIZE, 0); - if (!err) - err = i915_vma_move_to_active(vma, rq, 0); - } - - i915_request_add(rq); - -unpin: - if (vma) - i915_vma_unpin(vma); - - return err; -} - -static int smoke_crescendo_thread(void *arg) -{ - struct preempt_smoke *smoke = arg; - IGT_TIMEOUT(end_time); - unsigned long count; - - count = 0; - do { - struct i915_gem_context *ctx = smoke_context(smoke); - int err; - - mutex_lock(&smoke->i915->drm.struct_mutex); - err = smoke_submit(smoke, - ctx, count % I915_PRIORITY_MAX, - smoke->batch); - mutex_unlock(&smoke->i915->drm.struct_mutex); - if (err) - return err; - - count++; - } while (!__igt_timeout(end_time, NULL)); - - smoke->count = count; - return 0; -} - -static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) -#define BATCH BIT(0) -{ - struct task_struct *tsk[I915_NUM_ENGINES] = {}; - struct preempt_smoke arg[I915_NUM_ENGINES]; - struct intel_engine_cs *engine; - enum intel_engine_id id; - unsigned long count; - int err = 0; - - mutex_unlock(&smoke->i915->drm.struct_mutex); - - for_each_engine(engine, smoke->i915, id) { - arg[id] = *smoke; - arg[id].engine = engine; - if (!(flags & BATCH)) - arg[id].batch = NULL; - arg[id].count = 0; - - tsk[id] = kthread_run(smoke_crescendo_thread, &arg, - "igt/smoke:%d", id); - if (IS_ERR(tsk[id])) { - err = PTR_ERR(tsk[id]); - break; - } - get_task_struct(tsk[id]); - } - - count = 0; - for_each_engine(engine, smoke->i915, id) { - int status; - - if (IS_ERR_OR_NULL(tsk[id])) - continue; - - status = kthread_stop(tsk[id]); - if (status && !err) - err = status; - - count += arg[id].count; - - put_task_struct(tsk[id]); - } - - mutex_lock(&smoke->i915->drm.struct_mutex); - - pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", - count, flags, - RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext); - return 0; -} - -static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) -{ - enum intel_engine_id id; - IGT_TIMEOUT(end_time); - unsigned long count; - - count = 0; - do { - for_each_engine(smoke->engine, smoke->i915, id) { - struct i915_gem_context *ctx = smoke_context(smoke); - int err; - - err = smoke_submit(smoke, - ctx, random_priority(&smoke->prng), - flags & BATCH ? smoke->batch : NULL); - if (err) - return err; - - count++; - } - } while (!__igt_timeout(end_time, NULL)); - - pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", - count, flags, - RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext); - return 0; -} - -static int live_preempt_smoke(void *arg) -{ - struct preempt_smoke smoke = { - .i915 = arg, - .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed), - .ncontext = 1024, - }; - const unsigned int phase[] = { 0, BATCH }; - intel_wakeref_t wakeref; - struct igt_live_test t; - int err = -ENOMEM; - u32 *cs; - int n; - - if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915)) - return 0; - - smoke.contexts = kmalloc_array(smoke.ncontext, - sizeof(*smoke.contexts), - GFP_KERNEL); - if (!smoke.contexts) - return -ENOMEM; - - mutex_lock(&smoke.i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(smoke.i915); - - smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE); - if (IS_ERR(smoke.batch)) { - err = PTR_ERR(smoke.batch); - goto err_unlock; - } - - cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - goto err_batch; - } - for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) - cs[n] = MI_ARB_CHECK; - cs[n] = MI_BATCH_BUFFER_END; - i915_gem_object_flush_map(smoke.batch); - i915_gem_object_unpin_map(smoke.batch); - - if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) { - err = -EIO; - goto err_batch; - } - - for (n = 0; n < smoke.ncontext; n++) { - smoke.contexts[n] = kernel_context(smoke.i915); - if (!smoke.contexts[n]) - goto err_ctx; - } - - for (n = 0; n < ARRAY_SIZE(phase); n++) { - err = smoke_crescendo(&smoke, phase[n]); - if (err) - goto err_ctx; - - err = smoke_random(&smoke, phase[n]); - if (err) - goto err_ctx; - } - -err_ctx: - if (igt_live_test_end(&t)) - err = -EIO; - - for (n = 0; n < smoke.ncontext; n++) { - if (!smoke.contexts[n]) - break; - kernel_context_close(smoke.contexts[n]); - } - -err_batch: - i915_gem_object_put(smoke.batch); -err_unlock: - intel_runtime_pm_put(smoke.i915, wakeref); - mutex_unlock(&smoke.i915->drm.struct_mutex); - kfree(smoke.contexts); - - return err; -} - -int intel_execlists_live_selftests(struct drm_i915_private *i915) -{ - static const struct i915_subtest tests[] = { - SUBTEST(live_sanitycheck), - SUBTEST(live_busywait_preempt), - SUBTEST(live_preempt), - SUBTEST(live_late_preempt), - SUBTEST(live_suppress_self_preempt), - SUBTEST(live_suppress_wait_preempt), - SUBTEST(live_chain_preempt), - SUBTEST(live_preempt_hang), - SUBTEST(live_preempt_smoke), - }; - - if (!HAS_EXECLISTS(i915)) - return 0; - - if (i915_terminally_wedged(i915)) - return 0; - - return i915_subtests(tests, i915); -} diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c deleted file mode 100644 index 567b6f8dae86..000000000000 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ /dev/null @@ -1,901 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - */ - -#include "../i915_selftest.h" -#include "../i915_reset.h" - -#include "igt_flush_test.h" -#include "igt_reset.h" -#include "igt_spinner.h" -#include "igt_wedge_me.h" -#include "mock_context.h" -#include "mock_drm.h" - -static const struct wo_register { - enum intel_platform platform; - u32 reg; -} wo_registers[] = { - { INTEL_GEMINILAKE, 0x731c } -}; - -#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4) -struct wa_lists { - struct i915_wa_list gt_wa_list; - struct { - char name[REF_NAME_MAX]; - struct i915_wa_list wa_list; - } engine[I915_NUM_ENGINES]; -}; - -static void -reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - memset(lists, 0, sizeof(*lists)); - - wa_init_start(&lists->gt_wa_list, "GT_REF"); - gt_init_workarounds(i915, &lists->gt_wa_list); - wa_init_finish(&lists->gt_wa_list); - - for_each_engine(engine, i915, id) { - struct i915_wa_list *wal = &lists->engine[id].wa_list; - char *name = lists->engine[id].name; - - snprintf(name, REF_NAME_MAX, "%s_REF", engine->name); - - wa_init_start(wal, name); - engine_init_workarounds(engine, wal); - wa_init_finish(wal); - } -} - -static void -reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) - intel_wa_list_free(&lists->engine[id].wa_list); - - intel_wa_list_free(&lists->gt_wa_list); -} - -static struct drm_i915_gem_object * -read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) -{ - const u32 base = engine->mmio_base; - struct drm_i915_gem_object *result; - intel_wakeref_t wakeref; - struct i915_request *rq; - struct i915_vma *vma; - u32 srm, *cs; - int err; - int i; - - result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); - if (IS_ERR(result)) - return result; - - i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC); - - cs = i915_gem_object_pin_map(result, I915_MAP_WB); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - goto err_obj; - } - memset(cs, 0xc5, PAGE_SIZE); - i915_gem_object_flush_map(result); - i915_gem_object_unpin_map(result); - - vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_obj; - } - - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); - if (err) - goto err_obj; - - rq = ERR_PTR(-ENODEV); - with_intel_runtime_pm(engine->i915, wakeref) - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_pin; - } - - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - if (err) - goto err_req; - - srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; - if (INTEL_GEN(ctx->i915) >= 8) - srm++; - - cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - goto err_req; - } - - for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { - *cs++ = srm; - *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i)); - *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; - *cs++ = 0; - } - intel_ring_advance(rq, cs); - - i915_gem_object_get(result); - i915_gem_object_set_active_reference(result); - - i915_request_add(rq); - i915_vma_unpin(vma); - - return result; - -err_req: - i915_request_add(rq); -err_pin: - i915_vma_unpin(vma); -err_obj: - i915_gem_object_put(result); - return ERR_PTR(err); -} - -static u32 -get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i) -{ - i915_reg_t reg = i < engine->whitelist.count ? - engine->whitelist.list[i].reg : - RING_NOPID(engine->mmio_base); - - return i915_mmio_reg_offset(reg); -} - -static void -print_results(const struct intel_engine_cs *engine, const u32 *results) -{ - unsigned int i; - - for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { - u32 expected = get_whitelist_reg(engine, i); - u32 actual = results[i]; - - pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n", - i, expected, actual); - } -} - -static int check_whitelist(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) -{ - struct drm_i915_gem_object *results; - struct igt_wedge_me wedge; - u32 *vaddr; - int err; - int i; - - results = read_nonprivs(ctx, engine); - if (IS_ERR(results)) - return PTR_ERR(results); - - err = 0; - igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */ - err = i915_gem_object_set_to_cpu_domain(results, false); - if (i915_terminally_wedged(ctx->i915)) - err = -EIO; - if (err) - goto out_put; - - vaddr = i915_gem_object_pin_map(results, I915_MAP_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto out_put; - } - - for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) { - u32 expected = get_whitelist_reg(engine, i); - u32 actual = vaddr[i]; - - if (expected != actual) { - print_results(engine, vaddr); - pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n", - i, expected, actual); - - err = -EINVAL; - break; - } - } - - i915_gem_object_unpin_map(results); -out_put: - i915_gem_object_put(results); - return err; -} - -static int do_device_reset(struct intel_engine_cs *engine) -{ - i915_reset(engine->i915, engine->mask, "live_workarounds"); - return 0; -} - -static int do_engine_reset(struct intel_engine_cs *engine) -{ - return i915_reset_engine(engine, "live_workarounds"); -} - -static int -switch_to_scratch_context(struct intel_engine_cs *engine, - struct igt_spinner *spin) -{ - struct i915_gem_context *ctx; - struct i915_request *rq; - intel_wakeref_t wakeref; - int err = 0; - - ctx = kernel_context(engine->i915); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - - GEM_BUG_ON(i915_gem_context_is_bannable(ctx)); - - rq = ERR_PTR(-ENODEV); - with_intel_runtime_pm(engine->i915, wakeref) - rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); - - kernel_context_close(ctx); - - if (IS_ERR(rq)) { - spin = NULL; - err = PTR_ERR(rq); - goto err; - } - - i915_request_add(rq); - - if (spin && !igt_wait_for_spinner(spin, rq)) { - pr_err("Spinner failed to start\n"); - err = -ETIMEDOUT; - } - -err: - if (err && spin) - igt_spinner_end(spin); - - return err; -} - -static int check_whitelist_across_reset(struct intel_engine_cs *engine, - int (*reset)(struct intel_engine_cs *), - const char *name) -{ - struct drm_i915_private *i915 = engine->i915; - struct i915_gem_context *ctx; - struct igt_spinner spin; - intel_wakeref_t wakeref; - int err; - - pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n", - engine->whitelist.count, name); - - err = igt_spinner_init(&spin, i915); - if (err) - return err; - - ctx = kernel_context(i915); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - - err = check_whitelist(ctx, engine); - if (err) { - pr_err("Invalid whitelist *before* %s reset!\n", name); - goto out; - } - - err = switch_to_scratch_context(engine, &spin); - if (err) - goto out; - - with_intel_runtime_pm(i915, wakeref) - err = reset(engine); - - igt_spinner_end(&spin); - igt_spinner_fini(&spin); - - if (err) { - pr_err("%s reset failed\n", name); - goto out; - } - - err = check_whitelist(ctx, engine); - if (err) { - pr_err("Whitelist not preserved in context across %s reset!\n", - name); - goto out; - } - - kernel_context_close(ctx); - - ctx = kernel_context(i915); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - - err = check_whitelist(ctx, engine); - if (err) { - pr_err("Invalid whitelist *after* %s reset in fresh context!\n", - name); - goto out; - } - -out: - kernel_context_close(ctx); - return err; -} - -static struct i915_vma *create_scratch(struct i915_gem_context *ctx) -{ - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - void *ptr; - int err; - - obj = i915_gem_object_create_internal(ctx->i915, PAGE_SIZE); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); - - ptr = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(ptr)) { - err = PTR_ERR(ptr); - goto err_obj; - } - memset(ptr, 0xc5, PAGE_SIZE); - i915_gem_object_flush_map(obj); - i915_gem_object_unpin_map(obj); - - vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_obj; - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - goto err_obj; - - err = i915_gem_object_set_to_cpu_domain(obj, false); - if (err) - goto err_obj; - - return vma; - -err_obj: - i915_gem_object_put(obj); - return ERR_PTR(err); -} - -static struct i915_vma *create_batch(struct i915_gem_context *ctx) -{ - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - int err; - - obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_obj; - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - goto err_obj; - - err = i915_gem_object_set_to_wc_domain(obj, true); - if (err) - goto err_obj; - - return vma; - -err_obj: - i915_gem_object_put(obj); - return ERR_PTR(err); -} - -static u32 reg_write(u32 old, u32 new, u32 rsvd) -{ - if (rsvd == 0x0000ffff) { - old &= ~(new >> 16); - old |= new & (new >> 16); - } else { - old &= ~rsvd; - old |= new & rsvd; - } - - return old; -} - -static bool wo_register(struct intel_engine_cs *engine, u32 reg) -{ - enum intel_platform platform = INTEL_INFO(engine->i915)->platform; - int i; - - for (i = 0; i < ARRAY_SIZE(wo_registers); i++) { - if (wo_registers[i].platform == platform && - wo_registers[i].reg == reg) - return true; - } - - return false; -} - -static int check_dirty_whitelist(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) -{ - const u32 values[] = { - 0x00000000, - 0x01010101, - 0x10100101, - 0x03030303, - 0x30300303, - 0x05050505, - 0x50500505, - 0x0f0f0f0f, - 0xf00ff00f, - 0x10101010, - 0xf0f01010, - 0x30303030, - 0xa0a03030, - 0x50505050, - 0xc0c05050, - 0xf0f0f0f0, - 0x11111111, - 0x33333333, - 0x55555555, - 0x0000ffff, - 0x00ff00ff, - 0xff0000ff, - 0xffff00ff, - 0xffffffff, - }; - struct i915_vma *scratch; - struct i915_vma *batch; - int err = 0, i, v; - u32 *cs, *results; - - scratch = create_scratch(ctx); - if (IS_ERR(scratch)) - return PTR_ERR(scratch); - - batch = create_batch(ctx); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto out_scratch; - } - - for (i = 0; i < engine->whitelist.count; i++) { - u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); - u64 addr = scratch->node.start; - struct i915_request *rq; - u32 srm, lrm, rsvd; - u32 expect; - int idx; - - if (wo_register(engine, reg)) - continue; - - srm = MI_STORE_REGISTER_MEM; - lrm = MI_LOAD_REGISTER_MEM; - if (INTEL_GEN(ctx->i915) >= 8) - lrm++, srm++; - - pr_debug("%s: Writing garbage to %x\n", - engine->name, reg); - - cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - goto out_batch; - } - - /* SRM original */ - *cs++ = srm; - *cs++ = reg; - *cs++ = lower_32_bits(addr); - *cs++ = upper_32_bits(addr); - - idx = 1; - for (v = 0; v < ARRAY_SIZE(values); v++) { - /* LRI garbage */ - *cs++ = MI_LOAD_REGISTER_IMM(1); - *cs++ = reg; - *cs++ = values[v]; - - /* SRM result */ - *cs++ = srm; - *cs++ = reg; - *cs++ = lower_32_bits(addr + sizeof(u32) * idx); - *cs++ = upper_32_bits(addr + sizeof(u32) * idx); - idx++; - } - for (v = 0; v < ARRAY_SIZE(values); v++) { - /* LRI garbage */ - *cs++ = MI_LOAD_REGISTER_IMM(1); - *cs++ = reg; - *cs++ = ~values[v]; - - /* SRM result */ - *cs++ = srm; - *cs++ = reg; - *cs++ = lower_32_bits(addr + sizeof(u32) * idx); - *cs++ = upper_32_bits(addr + sizeof(u32) * idx); - idx++; - } - GEM_BUG_ON(idx * sizeof(u32) > scratch->size); - - /* LRM original -- don't leave garbage in the context! */ - *cs++ = lrm; - *cs++ = reg; - *cs++ = lower_32_bits(addr); - *cs++ = upper_32_bits(addr); - - *cs++ = MI_BATCH_BUFFER_END; - - i915_gem_object_flush_map(batch->obj); - i915_gem_object_unpin_map(batch->obj); - i915_gem_chipset_flush(ctx->i915); - - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out_batch; - } - - if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ - err = engine->emit_init_breadcrumb(rq); - if (err) - goto err_request; - } - - err = engine->emit_bb_start(rq, - batch->node.start, PAGE_SIZE, - 0); - if (err) - goto err_request; - -err_request: - i915_request_add(rq); - if (err) - goto out_batch; - - if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { - pr_err("%s: Futzing %x timedout; cancelling test\n", - engine->name, reg); - i915_gem_set_wedged(ctx->i915); - err = -EIO; - goto out_batch; - } - - results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); - if (IS_ERR(results)) { - err = PTR_ERR(results); - goto out_batch; - } - - GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff); - rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */ - if (!rsvd) { - pr_err("%s: Unable to write to whitelisted register %x\n", - engine->name, reg); - err = -EINVAL; - goto out_unpin; - } - - expect = results[0]; - idx = 1; - for (v = 0; v < ARRAY_SIZE(values); v++) { - expect = reg_write(expect, values[v], rsvd); - if (results[idx] != expect) - err++; - idx++; - } - for (v = 0; v < ARRAY_SIZE(values); v++) { - expect = reg_write(expect, ~values[v], rsvd); - if (results[idx] != expect) - err++; - idx++; - } - if (err) { - pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n", - engine->name, err, reg); - - pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n", - engine->name, reg, results[0], rsvd); - - expect = results[0]; - idx = 1; - for (v = 0; v < ARRAY_SIZE(values); v++) { - u32 w = values[v]; - - expect = reg_write(expect, w, rsvd); - pr_info("Wrote %08x, read %08x, expect %08x\n", - w, results[idx], expect); - idx++; - } - for (v = 0; v < ARRAY_SIZE(values); v++) { - u32 w = ~values[v]; - - expect = reg_write(expect, w, rsvd); - pr_info("Wrote %08x, read %08x, expect %08x\n", - w, results[idx], expect); - idx++; - } - - err = -EINVAL; - } -out_unpin: - i915_gem_object_unpin_map(scratch->obj); - if (err) - break; - } - - if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED)) - err = -EIO; -out_batch: - i915_vma_unpin_and_release(&batch, 0); -out_scratch: - i915_vma_unpin_and_release(&scratch, 0); - return err; -} - -static int live_dirty_whitelist(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct i915_gem_context *ctx; - enum intel_engine_id id; - intel_wakeref_t wakeref; - struct drm_file *file; - int err = 0; - - /* Can the user write to the whitelisted registers? */ - - if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */ - return 0; - - wakeref = intel_runtime_pm_get(i915); - - mutex_unlock(&i915->drm.struct_mutex); - file = mock_file(i915); - mutex_lock(&i915->drm.struct_mutex); - if (IS_ERR(file)) { - err = PTR_ERR(file); - goto out_rpm; - } - - ctx = live_context(i915, file); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out_file; - } - - for_each_engine(engine, i915, id) { - if (engine->whitelist.count == 0) - continue; - - err = check_dirty_whitelist(ctx, engine); - if (err) - goto out_file; - } - -out_file: - mutex_unlock(&i915->drm.struct_mutex); - mock_file_free(i915, file); - mutex_lock(&i915->drm.struct_mutex); -out_rpm: - intel_runtime_pm_put(i915, wakeref); - return err; -} - -static int live_reset_whitelist(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine = i915->engine[RCS0]; - int err = 0; - - /* If we reset the gpu, we should not lose the RING_NONPRIV */ - - if (!engine || engine->whitelist.count == 0) - return 0; - - igt_global_reset_lock(i915); - - if (intel_has_reset_engine(i915)) { - err = check_whitelist_across_reset(engine, - do_engine_reset, - "engine"); - if (err) - goto out; - } - - if (intel_has_gpu_reset(i915)) { - err = check_whitelist_across_reset(engine, - do_device_reset, - "device"); - if (err) - goto out; - } - -out: - igt_global_reset_unlock(i915); - return err; -} - -static bool verify_gt_engine_wa(struct drm_i915_private *i915, - struct wa_lists *lists, const char *str) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - bool ok = true; - - ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str); - - for_each_engine(engine, i915, id) - ok &= wa_list_verify(engine->uncore, - &lists->engine[id].wa_list, str); - - return ok; -} - -static int -live_gpu_reset_gt_engine_workarounds(void *arg) -{ - struct drm_i915_private *i915 = arg; - intel_wakeref_t wakeref; - struct wa_lists lists; - bool ok; - - if (!intel_has_gpu_reset(i915)) - return 0; - - pr_info("Verifying after GPU reset...\n"); - - igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(i915); - - reference_lists_init(i915, &lists); - - ok = verify_gt_engine_wa(i915, &lists, "before reset"); - if (!ok) - goto out; - - i915_reset(i915, ALL_ENGINES, "live_workarounds"); - - ok = verify_gt_engine_wa(i915, &lists, "after reset"); - -out: - reference_lists_fini(i915, &lists); - intel_runtime_pm_put(i915, wakeref); - igt_global_reset_unlock(i915); - - return ok ? 0 : -ESRCH; -} - -static int -live_engine_reset_gt_engine_workarounds(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct i915_gem_context *ctx; - struct igt_spinner spin; - enum intel_engine_id id; - struct i915_request *rq; - intel_wakeref_t wakeref; - struct wa_lists lists; - int ret = 0; - - if (!intel_has_reset_engine(i915)) - return 0; - - ctx = kernel_context(i915); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - - igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(i915); - - reference_lists_init(i915, &lists); - - for_each_engine(engine, i915, id) { - bool ok; - - pr_info("Verifying after %s reset...\n", engine->name); - - ok = verify_gt_engine_wa(i915, &lists, "before reset"); - if (!ok) { - ret = -ESRCH; - goto err; - } - - i915_reset_engine(engine, "live_workarounds"); - - ok = verify_gt_engine_wa(i915, &lists, "after idle reset"); - if (!ok) { - ret = -ESRCH; - goto err; - } - - ret = igt_spinner_init(&spin, i915); - if (ret) - goto err; - - rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); - if (IS_ERR(rq)) { - ret = PTR_ERR(rq); - igt_spinner_fini(&spin); - goto err; - } - - i915_request_add(rq); - - if (!igt_wait_for_spinner(&spin, rq)) { - pr_err("Spinner failed to start\n"); - igt_spinner_fini(&spin); - ret = -ETIMEDOUT; - goto err; - } - - i915_reset_engine(engine, "live_workarounds"); - - igt_spinner_end(&spin); - igt_spinner_fini(&spin); - - ok = verify_gt_engine_wa(i915, &lists, "after busy reset"); - if (!ok) { - ret = -ESRCH; - goto err; - } - } - -err: - reference_lists_fini(i915, &lists); - intel_runtime_pm_put(i915, wakeref); - igt_global_reset_unlock(i915); - kernel_context_close(ctx); - - igt_flush_test(i915, I915_WAIT_LOCKED); - - return ret; -} - -int intel_workarounds_live_selftests(struct drm_i915_private *i915) -{ - static const struct i915_subtest tests[] = { - SUBTEST(live_dirty_whitelist), - SUBTEST(live_reset_whitelist), - SUBTEST(live_gpu_reset_gt_engine_workarounds), - SUBTEST(live_engine_reset_gt_engine_workarounds), - }; - int err; - - if (i915_terminally_wedged(i915)) - return 0; - - mutex_lock(&i915->drm.struct_mutex); - err = i915_subtests(tests, i915); - mutex_unlock(&i915->drm.struct_mutex); - - return err; -} diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c index 2bfa72c1654b..b976c12817c5 100644 --- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c +++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c @@ -45,6 +45,9 @@ void __onstack_fence_init(struct i915_sw_fence *fence, void onstack_fence_fini(struct i915_sw_fence *fence) { + if (!fence->flags) + return; + i915_sw_fence_commit(fence); i915_sw_fence_fini(fence); } diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c index 0426093bf1d9..10e67c931ed1 100644 --- a/drivers/gpu/drm/i915/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/selftests/mock_context.c @@ -30,6 +30,7 @@ mock_context(struct drm_i915_private *i915, const char *name) { struct i915_gem_context *ctx; + struct i915_gem_engines *e; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); @@ -40,18 +41,20 @@ mock_context(struct drm_i915_private *i915, INIT_LIST_HEAD(&ctx->link); ctx->i915 = i915; - ctx->hw_contexts = RB_ROOT; - spin_lock_init(&ctx->hw_contexts_lock); + mutex_init(&ctx->engines_mutex); + e = default_engines(ctx); + if (IS_ERR(e)) + goto err_free; + RCU_INIT_POINTER(ctx->engines, e); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_LIST_HEAD(&ctx->handles_list); INIT_LIST_HEAD(&ctx->hw_id_link); - INIT_LIST_HEAD(&ctx->active_engines); mutex_init(&ctx->mutex); ret = i915_gem_context_pin_hw_id(ctx); if (ret < 0) - goto err_handles; + goto err_engines; if (name) { struct i915_hw_ppgtt *ppgtt; @@ -69,7 +72,9 @@ mock_context(struct drm_i915_private *i915, return ctx; -err_handles: +err_engines: + free_engines(rcu_access_pointer(ctx->engines)); +err_free: kfree(ctx); return NULL; diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c deleted file mode 100644 index 61a8206ed677..000000000000 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "mock_engine.h" -#include "mock_request.h" - -struct mock_ring { - struct intel_ring base; - struct i915_timeline timeline; -}; - -static void mock_timeline_pin(struct i915_timeline *tl) -{ - tl->pin_count++; -} - -static void mock_timeline_unpin(struct i915_timeline *tl) -{ - GEM_BUG_ON(!tl->pin_count); - tl->pin_count--; -} - -static struct intel_ring *mock_ring(struct intel_engine_cs *engine) -{ - const unsigned long sz = PAGE_SIZE / 2; - struct mock_ring *ring; - - ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); - if (!ring) - return NULL; - - if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) { - kfree(ring); - return NULL; - } - - kref_init(&ring->base.ref); - ring->base.size = sz; - ring->base.effective_size = sz; - ring->base.vaddr = (void *)(ring + 1); - ring->base.timeline = &ring->timeline; - - INIT_LIST_HEAD(&ring->base.request_list); - intel_ring_update_space(&ring->base); - - return &ring->base; -} - -static void mock_ring_free(struct intel_ring *base) -{ - struct mock_ring *ring = container_of(base, typeof(*ring), base); - - i915_timeline_fini(&ring->timeline); - kfree(ring); -} - -static struct i915_request *first_request(struct mock_engine *engine) -{ - return list_first_entry_or_null(&engine->hw_queue, - struct i915_request, - mock.link); -} - -static void advance(struct i915_request *request) -{ - list_del_init(&request->mock.link); - i915_request_mark_complete(request); - GEM_BUG_ON(!i915_request_completed(request)); - - intel_engine_queue_breadcrumbs(request->engine); -} - -static void hw_delay_complete(struct timer_list *t) -{ - struct mock_engine *engine = from_timer(engine, t, hw_delay); - struct i915_request *request; - unsigned long flags; - - spin_lock_irqsave(&engine->hw_lock, flags); - - /* Timer fired, first request is complete */ - request = first_request(engine); - if (request) - advance(request); - - /* - * Also immediately signal any subsequent 0-delay requests, but - * requeue the timer for the next delayed request. - */ - while ((request = first_request(engine))) { - if (request->mock.delay) { - mod_timer(&engine->hw_delay, - jiffies + request->mock.delay); - break; - } - - advance(request); - } - - spin_unlock_irqrestore(&engine->hw_lock, flags); -} - -static void mock_context_unpin(struct intel_context *ce) -{ - mock_timeline_unpin(ce->ring->timeline); -} - -static void mock_context_destroy(struct kref *ref) -{ - struct intel_context *ce = container_of(ref, typeof(*ce), ref); - - GEM_BUG_ON(intel_context_is_pinned(ce)); - - if (ce->ring) - mock_ring_free(ce->ring); - - intel_context_free(ce); -} - -static int mock_context_pin(struct intel_context *ce) -{ - if (!ce->ring) { - ce->ring = mock_ring(ce->engine); - if (!ce->ring) - return -ENOMEM; - } - - mock_timeline_pin(ce->ring->timeline); - return 0; -} - -static const struct intel_context_ops mock_context_ops = { - .pin = mock_context_pin, - .unpin = mock_context_unpin, - - .destroy = mock_context_destroy, -}; - -static int mock_request_alloc(struct i915_request *request) -{ - INIT_LIST_HEAD(&request->mock.link); - request->mock.delay = 0; - - return 0; -} - -static int mock_emit_flush(struct i915_request *request, - unsigned int flags) -{ - return 0; -} - -static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs) -{ - return cs; -} - -static void mock_submit_request(struct i915_request *request) -{ - struct mock_engine *engine = - container_of(request->engine, typeof(*engine), base); - unsigned long flags; - - i915_request_submit(request); - - spin_lock_irqsave(&engine->hw_lock, flags); - list_add_tail(&request->mock.link, &engine->hw_queue); - if (list_is_first(&request->mock.link, &engine->hw_queue)) { - if (request->mock.delay) - mod_timer(&engine->hw_delay, - jiffies + request->mock.delay); - else - advance(request); - } - spin_unlock_irqrestore(&engine->hw_lock, flags); -} - -static void mock_reset_prepare(struct intel_engine_cs *engine) -{ -} - -static void mock_reset(struct intel_engine_cs *engine, bool stalled) -{ - GEM_BUG_ON(stalled); -} - -static void mock_reset_finish(struct intel_engine_cs *engine) -{ -} - -static void mock_cancel_requests(struct intel_engine_cs *engine) -{ - struct i915_request *request; - unsigned long flags; - - spin_lock_irqsave(&engine->timeline.lock, flags); - - /* Mark all submitted requests as skipped. */ - list_for_each_entry(request, &engine->timeline.requests, sched.link) { - if (!i915_request_signaled(request)) - dma_fence_set_error(&request->fence, -EIO); - - i915_request_mark_complete(request); - } - - spin_unlock_irqrestore(&engine->timeline.lock, flags); -} - -struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, - const char *name, - int id) -{ - struct mock_engine *engine; - - GEM_BUG_ON(id >= I915_NUM_ENGINES); - - engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL); - if (!engine) - return NULL; - - /* minimal engine setup for requests */ - engine->base.i915 = i915; - snprintf(engine->base.name, sizeof(engine->base.name), "%s", name); - engine->base.id = id; - engine->base.mask = BIT(id); - engine->base.status_page.addr = (void *)(engine + 1); - - engine->base.cops = &mock_context_ops; - engine->base.request_alloc = mock_request_alloc; - engine->base.emit_flush = mock_emit_flush; - engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb; - engine->base.submit_request = mock_submit_request; - - engine->base.reset.prepare = mock_reset_prepare; - engine->base.reset.reset = mock_reset; - engine->base.reset.finish = mock_reset_finish; - engine->base.cancel_requests = mock_cancel_requests; - - if (i915_timeline_init(i915, &engine->base.timeline, NULL)) - goto err_free; - i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE); - - intel_engine_init_breadcrumbs(&engine->base); - - /* fake hw queue */ - spin_lock_init(&engine->hw_lock); - timer_setup(&engine->hw_delay, hw_delay_complete, 0); - INIT_LIST_HEAD(&engine->hw_queue); - - if (pin_context(i915->kernel_context, &engine->base, - &engine->base.kernel_context)) - goto err_breadcrumbs; - - return &engine->base; - -err_breadcrumbs: - intel_engine_fini_breadcrumbs(&engine->base); - i915_timeline_fini(&engine->base.timeline); -err_free: - kfree(engine); - return NULL; -} - -void mock_engine_flush(struct intel_engine_cs *engine) -{ - struct mock_engine *mock = - container_of(engine, typeof(*mock), base); - struct i915_request *request, *rn; - - del_timer_sync(&mock->hw_delay); - - spin_lock_irq(&mock->hw_lock); - list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link) - advance(request); - spin_unlock_irq(&mock->hw_lock); -} - -void mock_engine_reset(struct intel_engine_cs *engine) -{ -} - -void mock_engine_free(struct intel_engine_cs *engine) -{ - struct mock_engine *mock = - container_of(engine, typeof(*mock), base); - struct intel_context *ce; - - GEM_BUG_ON(timer_pending(&mock->hw_delay)); - - ce = fetch_and_zero(&engine->last_retired_context); - if (ce) - intel_context_unpin(ce); - - intel_context_unpin(engine->kernel_context); - - intel_engine_fini_breadcrumbs(engine); - i915_timeline_fini(&engine->timeline); - - kfree(engine); -} diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.h b/drivers/gpu/drm/i915/selftests/mock_engine.h deleted file mode 100644 index b9cc3a245f16..000000000000 --- a/drivers/gpu/drm/i915/selftests/mock_engine.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef __MOCK_ENGINE_H__ -#define __MOCK_ENGINE_H__ - -#include <linux/list.h> -#include <linux/spinlock.h> -#include <linux/timer.h> - -#include "../intel_ringbuffer.h" - -struct mock_engine { - struct intel_engine_cs base; - - spinlock_t hw_lock; - struct list_head hw_queue; - struct timer_list hw_delay; -}; - -struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, - const char *name, - int id); -void mock_engine_flush(struct intel_engine_cs *engine); -void mock_engine_reset(struct intel_engine_cs *engine); -void mock_engine_free(struct intel_engine_cs *engine); - -#endif /* !__MOCK_ENGINE_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 60bbf8b4df40..9fd02025d382 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -25,7 +25,8 @@ #include <linux/pm_domain.h> #include <linux/pm_runtime.h> -#include "mock_engine.h" +#include "gt/mock_engine.h" + #include "mock_context.h" #include "mock_request.h" #include "mock_gem_device.h" @@ -40,11 +41,10 @@ void mock_device_flush(struct drm_i915_private *i915) lockdep_assert_held(&i915->drm.struct_mutex); - for_each_engine(engine, i915, id) - mock_engine_flush(engine); - - i915_retire_requests(i915); - GEM_BUG_ON(i915->gt.active_requests); + do { + for_each_engine(engine, i915, id) + mock_engine_flush(engine); + } while (i915_retire_requests(i915)); } static void mock_device_release(struct drm_device *dev) @@ -58,8 +58,7 @@ static void mock_device_release(struct drm_device *dev) i915_gem_contexts_lost(i915); mutex_unlock(&i915->drm.struct_mutex); - drain_delayed_work(&i915->gt.retire_work); - drain_delayed_work(&i915->gt.idle_work); + flush_work(&i915->gem.idle_work); i915_gem_drain_workqueue(i915); mutex_lock(&i915->drm.struct_mutex); @@ -109,10 +108,6 @@ static void mock_retire_work_handler(struct work_struct *work) static void mock_idle_work_handler(struct work_struct *work) { - struct drm_i915_private *i915 = - container_of(work, typeof(*i915), gt.idle_work.work); - - i915->gt.active_engines = 0; } static int pm_domain_resume(struct device *dev) @@ -184,6 +179,8 @@ struct drm_i915_private *mock_gem_device(void) mock_uncore_init(&i915->uncore); i915_gem_init__mm(i915); + intel_gt_pm_init(i915); + atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */ init_waitqueue_head(&i915->gpu_error.wait_queue); init_waitqueue_head(&i915->gpu_error.reset_queue); @@ -196,8 +193,8 @@ struct drm_i915_private *mock_gem_device(void) mock_init_contexts(i915); - INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler); - INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler); + INIT_DELAYED_WORK(&i915->gem.retire_work, mock_retire_work_handler); + INIT_WORK(&i915->gem.idle_work, mock_idle_work_handler); i915->gt.awake = true; @@ -211,12 +208,16 @@ struct drm_i915_private *mock_gem_device(void) mock_init_ggtt(i915, &i915->ggtt); mkwrite_device_info(i915)->engine_mask = BIT(0); - i915->kernel_context = mock_context(i915, NULL); - if (!i915->kernel_context) - goto err_unlock; i915->engine[RCS0] = mock_engine(i915, "mock", RCS0); if (!i915->engine[RCS0]) + goto err_unlock; + + i915->kernel_context = mock_context(i915, NULL); + if (!i915->kernel_context) + goto err_engine; + + if (mock_engine_init(i915->engine[RCS0])) goto err_context; mutex_unlock(&i915->drm.struct_mutex); @@ -227,6 +228,8 @@ struct drm_i915_private *mock_gem_device(void) err_context: i915_gem_contexts_fini(i915); +err_engine: + mock_engine_free(i915->engine[RCS0]); err_unlock: mutex_unlock(&i915->drm.struct_mutex); i915_timelines_fini(i915); diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c index d1a7c9608712..b99f7576153c 100644 --- a/drivers/gpu/drm/i915/selftests/mock_request.c +++ b/drivers/gpu/drm/i915/selftests/mock_request.c @@ -22,7 +22,9 @@ * */ -#include "mock_engine.h" +#include "gt/mock_engine.h" + +#include "igt_gem_utils.h" #include "mock_request.h" struct i915_request * @@ -33,7 +35,7 @@ mock_request(struct intel_engine_cs *engine, struct i915_request *request; /* NB the i915->requests slab cache is enlarged to fit mock_request */ - request = i915_request_alloc(engine, context); + request = igt_request_alloc(context, engine); if (IS_ERR(request)) return NULL; |