summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/selftests/igt_spinner.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-11-01 10:15:28 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2019-11-01 13:06:35 +0000
commitf05816cbbcd0aa0af1efdd888ea6964644197e13 (patch)
treef0b3dca9aa8c1ca4f458b6afad2f60d976c71d7f /drivers/gpu/drm/i915/selftests/igt_spinner.c
parent1883e2999f045e1fd6f76a7f30288a5312085289 (diff)
downloadlinux-f05816cbbcd0aa0af1efdd888ea6964644197e13.tar.bz2
drm/i915/selftests: Spin on all engines simultaneously
Vanshidhar Konda asked for the simplest test "to verify that the kernel can submit and hardware can execute batch buffers on all the command streamers in parallel." We have a number of tests in userspace that submit load to each engine and verify that it is present, but strictly we have no selftest to prove that the kernel can _simultaneously_ execute on all known engines. (We have tests to demonstrate that we can submit to HW in parallel, but we don't insist that they execute in parallel.) v2: Improve the igt_spinner support for older gen. Suggested-by: Vanshidhar Konda <vanshidhar.r.konda@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Vanshidhar Konda <vanshidhar.r.konda@intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Vanshidhar Konda <vanshidhar.r.konda@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191101101528.10553-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/igt_spinner.c')
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c40
1 files changed, 33 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index ee8450b871da..e8a58fe49c39 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -15,8 +15,6 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
void *vaddr;
int err;
- GEM_BUG_ON(INTEL_GEN(gt->i915) < 8);
-
memset(spin, 0, sizeof(*spin));
spin->gt = gt;
@@ -95,11 +93,15 @@ igt_spinner_create_request(struct igt_spinner *spin,
struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
+ unsigned int flags;
u32 *batch;
int err;
GEM_BUG_ON(spin->gt != ce->vm->gt);
+ if (!intel_engine_can_store_dword(ce->engine))
+ return ERR_PTR(-ENODEV);
+
vma = i915_vma_instance(spin->obj, ce->vm, NULL);
if (IS_ERR(vma))
return ERR_CAST(vma);
@@ -132,16 +134,37 @@ igt_spinner_create_request(struct igt_spinner *spin,
batch = spin->batch;
- *batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = lower_32_bits(hws_address(hws, rq));
- *batch++ = upper_32_bits(hws_address(hws, rq));
+ if (INTEL_GEN(rq->i915) >= 8) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = upper_32_bits(hws_address(hws, rq));
+ } else if (INTEL_GEN(rq->i915) >= 6) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = 0;
+ *batch++ = hws_address(hws, rq);
+ } else if (INTEL_GEN(rq->i915) >= 4) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *batch++ = 0;
+ *batch++ = hws_address(hws, rq);
+ } else {
+ *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *batch++ = hws_address(hws, rq);
+ }
*batch++ = rq->fence.seqno;
*batch++ = arbitration_command;
- *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ if (INTEL_GEN(rq->i915) >= 8)
+ *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
+ else if (IS_HASWELL(rq->i915))
+ *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
+ else if (INTEL_GEN(rq->i915) >= 6)
+ *batch++ = MI_BATCH_BUFFER_START;
+ else
+ *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
+
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
intel_gt_chipset_flush(engine->gt);
@@ -153,7 +176,10 @@ igt_spinner_create_request(struct igt_spinner *spin,
goto cancel_rq;
}
- err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+ flags = 0;
+ if (INTEL_GEN(rq->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
+ err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
cancel_rq:
if (err) {