diff options
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
17 files changed, 237 insertions, 139 deletions
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index fbe4324116d7..d9f439f6219f 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -919,12 +919,12 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) *cmd++ = val; } else if (gen >= 4) { *cmd++ = MI_STORE_DWORD_IMM_GEN4 | - (gen < 6 ? 1 << 22 : 0); + (gen < 6 ? MI_USE_GGTT : 0); *cmd++ = 0; *cmd++ = offset; *cmd++ = val; } else { - *cmd++ = MI_STORE_DWORD_IMM | 1 << 22; + *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *cmd++ = offset; *cmd++ = val; } @@ -985,7 +985,10 @@ static int gpu_write(struct i915_vma *vma, goto err_request; } - i915_vma_move_to_active(batch, rq, 0); + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto err_request; + i915_gem_object_set_active_reference(batch->obj); i915_vma_unpin(batch); i915_vma_close(batch); @@ -996,11 +999,9 @@ static int gpu_write(struct i915_vma *vma, if (err) goto err_request; - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - - reservation_object_lock(vma->resv, NULL); - reservation_object_add_excl_fence(vma->resv, &rq->fence); - reservation_object_unlock(vma->resv); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + i915_request_skip(rq, err); err_request: i915_request_add(rq); @@ -1694,7 +1695,7 @@ int i915_gem_huge_page_mock_selftests(void) dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39)); mutex_lock(&dev_priv->drm.struct_mutex); - ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV), "mock"); + ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV)); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; @@ -1724,7 +1725,7 @@ out_unlock: i915_modparams.enable_ppgtt = saved_ppgtt; - drm_dev_unref(&dev_priv->drm); + drm_dev_put(&dev_priv->drm); return err; } @@ -1748,6 +1749,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) return 0; } + if (i915_terminally_wedged(&dev_priv->gpu_error)) + return 0; + file = mock_file(dev_priv); if (IS_ERR(file)) return PTR_ERR(file); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index a4900091ae3d..3a095c37c120 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -42,11 +42,21 @@ static int cpu_set(struct drm_i915_gem_object *obj, page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); - if (needs_clflush & CLFLUSH_BEFORE) + + if (needs_clflush & CLFLUSH_BEFORE) { + mb(); clflush(map+offset_in_page(offset) / sizeof(*map)); + mb(); + } + map[offset_in_page(offset) / sizeof(*map)] = v; - if (needs_clflush & CLFLUSH_AFTER) + + if (needs_clflush & CLFLUSH_AFTER) { + mb(); clflush(map+offset_in_page(offset) / sizeof(*map)); + mb(); + } + kunmap_atomic(map); i915_gem_obj_finish_shmem_access(obj); @@ -68,8 +78,13 @@ static int cpu_get(struct drm_i915_gem_object *obj, page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); - if (needs_clflush & CLFLUSH_BEFORE) + + if (needs_clflush & CLFLUSH_BEFORE) { + mb(); clflush(map+offset_in_page(offset) / sizeof(*map)); + mb(); + } + *v = map[offset_in_page(offset) / sizeof(*map)]; kunmap_atomic(map); @@ -210,28 +225,24 @@ static int gpu_set(struct drm_i915_gem_object *obj, *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset); *cs++ = v; } else if (INTEL_GEN(i915) >= 4) { - *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22; + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = 0; *cs++ = i915_ggtt_offset(vma) + offset; *cs++ = v; } else { - *cs++ = MI_STORE_DWORD_IMM | 1 << 22; + *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *cs++ = i915_ggtt_offset(vma) + offset; *cs++ = v; *cs++ = MI_NOOP; } intel_ring_advance(rq, cs); - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_vma_unpin(vma); - reservation_object_lock(obj->resv, NULL); - reservation_object_add_excl_fence(obj->resv, &rq->fence); - reservation_object_unlock(obj->resv); - i915_request_add(rq); - return 0; + return err; } static bool always_valid(struct drm_i915_private *i915) @@ -239,8 +250,16 @@ static bool always_valid(struct drm_i915_private *i915) return true; } +static bool needs_fence_registers(struct drm_i915_private *i915) +{ + return !i915_terminally_wedged(&i915->gpu_error); +} + static bool needs_mi_store_dword(struct drm_i915_private *i915) { + if (i915_terminally_wedged(&i915->gpu_error)) + return false; + return intel_engine_can_store_dword(i915->engine[RCS]); } @@ -251,7 +270,7 @@ static const struct igt_coherency_mode { bool (*valid)(struct drm_i915_private *i915); } igt_coherency_mode[] = { { "cpu", cpu_set, cpu_get, always_valid }, - { "gtt", gtt_set, gtt_get, always_valid }, + { "gtt", gtt_set, gtt_get, needs_fence_registers }, { "wc", wc_set, wc_get, always_valid }, { "gpu", gpu_set, NULL, needs_mi_store_dword }, { }, diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 90c3c36173ba..ab2590242033 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -63,12 +63,12 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) *cmd++ = value; } else if (gen >= 4) { *cmd++ = MI_STORE_DWORD_IMM_GEN4 | - (gen < 6 ? 1 << 22 : 0); + (gen < 6 ? MI_USE_GGTT : 0); *cmd++ = 0; *cmd++ = offset; *cmd++ = value; } else { - *cmd++ = MI_STORE_DWORD_IMM | 1 << 22; + *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *cmd++ = offset; *cmd++ = value; } @@ -170,22 +170,26 @@ static int gpu_fill(struct drm_i915_gem_object *obj, if (err) goto err_request; - i915_vma_move_to_active(batch, rq, 0); + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto skip_request; + + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto skip_request; + i915_gem_object_set_active_reference(batch->obj); i915_vma_unpin(batch); i915_vma_close(batch); - i915_vma_move_to_active(vma, rq, 0); i915_vma_unpin(vma); - reservation_object_lock(obj->resv, NULL); - reservation_object_add_excl_fence(obj->resv, &rq->fence); - reservation_object_unlock(obj->resv); - i915_request_add(rq); return 0; +skip_request: + i915_request_skip(rq, err); err_request: i915_request_add(rq); err_batch: @@ -336,11 +340,15 @@ static int igt_ctx_exec(void *arg) bool first_shared_gtt = true; int err = -ENODEV; - /* Create a few different contexts (with different mm) and write + /* + * Create a few different contexts (with different mm) and write * through each ctx/mm using the GPU making sure those writes end * up in the expected pages of our obj. */ + if (!DRIVER_CAPS(i915)->has_logical_contexts) + return 0; + file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); @@ -367,6 +375,9 @@ static int igt_ctx_exec(void *arg) } for_each_engine(engine, i915, id) { + if (!engine->context_size) + continue; /* No logical context support in HW */ + if (!intel_engine_can_store_dword(engine)) continue; @@ -467,7 +478,9 @@ static int __igt_switch_to_kernel_context(struct drm_i915_private *i915, } } - err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); + err = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (err) return err; @@ -586,7 +599,7 @@ int i915_gem_context_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } @@ -599,6 +612,9 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) bool fake_alias = false; int err; + if (i915_terminally_wedged(&dev_priv->gpu_error)) + return 0; + /* Install a fake aliasing gtt for exercise */ if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) { mutex_lock(&dev_priv->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c index 89dc25a5a53b..a7055b12e53c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c @@ -389,7 +389,7 @@ int i915_gem_dmabuf_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 2dc72a984d45..128ad1cf0647 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -490,7 +490,7 @@ int i915_gem_evict_mock_selftests(void) err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } @@ -500,5 +500,8 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_evict_contexts), }; + if (i915_terminally_wedged(&i915->gpu_error)) + return 0; + return i915_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index a4060238bef0..600a3bcbd3d6 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -32,6 +32,20 @@ #include "mock_drm.h" #include "mock_gem_device.h" +static void cleanup_freed_objects(struct drm_i915_private *i915) +{ + /* + * As we may hold onto the struct_mutex for inordinate lengths of + * time, the NMI khungtaskd detector may fire for the free objects + * worker. + */ + mutex_unlock(&i915->drm.struct_mutex); + + i915_gem_drain_freed_objects(i915); + + mutex_lock(&i915->drm.struct_mutex); +} + static void fake_free_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { @@ -134,7 +148,7 @@ static int igt_ppgtt_alloc(void *arg) { struct drm_i915_private *dev_priv = arg; struct i915_hw_ppgtt *ppgtt; - u64 size, last; + u64 size, last, limit; int err = 0; /* Allocate a ppggt and try to fill the entire range */ @@ -142,20 +156,25 @@ static int igt_ppgtt_alloc(void *arg) if (!USES_PPGTT(dev_priv)) return 0; - mutex_lock(&dev_priv->drm.struct_mutex); ppgtt = __hw_ppgtt_create(dev_priv); - if (IS_ERR(ppgtt)) { - err = PTR_ERR(ppgtt); - goto err_unlock; - } + if (IS_ERR(ppgtt)) + return PTR_ERR(ppgtt); if (!ppgtt->vm.allocate_va_range) goto err_ppgtt_cleanup; + /* + * While we only allocate the page tables here and so we could + * address a much larger GTT than we could actually fit into + * RAM, a practical limit is the amount of physical pages in the system. + * This should ensure that we do not run into the oomkiller during + * the test and take down the machine wilfully. + */ + limit = totalram_pages << PAGE_SHIFT; + limit = min(ppgtt->vm.total, limit); + /* Check we can allocate the entire range */ - for (size = 4096; - size <= ppgtt->vm.total; - size <<= 2) { + for (size = 4096; size <= limit; size <<= 2) { err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size); if (err) { if (err == -ENOMEM) { @@ -166,13 +185,13 @@ static int igt_ppgtt_alloc(void *arg) goto err_ppgtt_cleanup; } + cond_resched(); + ppgtt->vm.clear_range(&ppgtt->vm, 0, size); } /* Check we can incrementally allocate the entire range */ - for (last = 0, size = 4096; - size <= ppgtt->vm.total; - last = size, size <<= 2) { + for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) { err = ppgtt->vm.allocate_va_range(&ppgtt->vm, last, size - last); if (err) { @@ -183,12 +202,13 @@ static int igt_ppgtt_alloc(void *arg) } goto err_ppgtt_cleanup; } + + cond_resched(); } err_ppgtt_cleanup: - ppgtt->vm.cleanup(&ppgtt->vm); - kfree(ppgtt); -err_unlock: + mutex_lock(&dev_priv->drm.struct_mutex); + i915_ppgtt_put(ppgtt); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } @@ -291,6 +311,8 @@ static int lowlevel_hole(struct drm_i915_private *i915, i915_gem_object_put(obj); kfree(order); + + cleanup_freed_objects(i915); } return 0; @@ -519,6 +541,7 @@ static int fill_hole(struct drm_i915_private *i915, } close_object_list(&objects, vm); + cleanup_freed_objects(i915); } return 0; @@ -605,6 +628,8 @@ err_put: i915_gem_object_put(obj); if (err) return err; + + cleanup_freed_objects(i915); } return 0; @@ -789,6 +814,8 @@ err_obj: kfree(order); if (err) return err; + + cleanup_freed_objects(i915); } return 0; @@ -857,6 +884,7 @@ static int __shrink_hole(struct drm_i915_private *i915, } close_object_list(&objects, vm); + cleanup_freed_objects(i915); return err; } @@ -949,6 +977,7 @@ static int shrink_boom(struct drm_i915_private *i915, i915_gem_object_put(explode); memset(&vm->fault_attr, 0, sizeof(vm->fault_attr)); + cleanup_freed_objects(i915); } return 0; @@ -980,7 +1009,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, return PTR_ERR(file); mutex_lock(&dev_priv->drm.struct_mutex); - ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock"); + ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; @@ -1644,7 +1673,7 @@ int i915_gem_gtt_mock_selftests(void) err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index 2b2dde94526f..d77acf4cc439 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -169,9 +169,16 @@ static u64 tiled_offset(const struct tile *tile, u64 v) v += y * tile->width; v += div64_u64_rem(x, tile->width, &x) << tile->size; v += x; - } else { + } else if (tile->width == 128) { const unsigned int ytile_span = 16; - const unsigned int ytile_height = 32 * ytile_span; + const unsigned int ytile_height = 512; + + v += y * ytile_span; + v += div64_u64_rem(x, ytile_span, &x) * ytile_height; + v += x; + } else { + const unsigned int ytile_span = 32; + const unsigned int ytile_height = 256; v += y * ytile_span; v += div64_u64_rem(x, ytile_span, &x) * ytile_height; @@ -288,6 +295,8 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, kunmap(p); if (err) return err; + + i915_vma_destroy(vma); } return 0; @@ -347,6 +356,14 @@ static int igt_partial_tiling(void *arg) unsigned int pitch; struct tile tile; + if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) + /* + * The swizzling pattern is actually unknown as it + * varies based on physical address of each page. + * See i915_gem_detect_bit_6_swizzle(). + */ + break; + tile.tiling = tiling; switch (tiling) { case I915_TILING_X: @@ -357,8 +374,8 @@ static int igt_partial_tiling(void *arg) break; } - if (tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN || - tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) + GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN); + if (tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) continue; if (INTEL_GEN(i915) <= 2) { @@ -454,12 +471,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) return PTR_ERR(rq); } - i915_vma_move_to_active(vma, rq, 0); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_request_add(rq); - i915_gem_object_set_active_reference(obj); + __i915_gem_object_release_unless_active(obj); i915_vma_unpin(vma); - return 0; + + return err; } static bool assert_mmap_offset(struct drm_i915_private *i915, @@ -488,6 +507,15 @@ static int igt_mmap_offset_exhaustion(void *arg) u64 hole_start, hole_end; int loop, err; + /* Disable background reaper */ + mutex_lock(&i915->drm.struct_mutex); + if (!i915->gt.active_requests++) + i915_gem_unpark(i915); + mutex_unlock(&i915->drm.struct_mutex); + cancel_delayed_work_sync(&i915->gt.retire_work); + cancel_delayed_work_sync(&i915->gt.idle_work); + GEM_BUG_ON(!i915->gt.awake); + /* Trim the device mmap space to only a page */ memset(&resv, 0, sizeof(resv)); drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { @@ -496,7 +524,7 @@ static int igt_mmap_offset_exhaustion(void *arg) err = drm_mm_reserve_node(mm, &resv); if (err) { pr_err("Failed to trim VMA manager, err=%d\n", err); - return err; + goto out_park; } break; } @@ -538,6 +566,9 @@ static int igt_mmap_offset_exhaustion(void *arg) /* Now fill with busy dead objects that we expect to reap */ for (loop = 0; loop < 3; loop++) { + if (i915_terminally_wedged(&i915->gpu_error)) + break; + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); @@ -554,6 +585,7 @@ static int igt_mmap_offset_exhaustion(void *arg) goto err_obj; } + /* NB we rely on the _active_ reference to access obj now */ GEM_BUG_ON(!i915_gem_object_is_active(obj)); err = i915_gem_object_create_mmap_offset(obj); if (err) { @@ -565,6 +597,13 @@ static int igt_mmap_offset_exhaustion(void *arg) out: drm_mm_remove_node(&resv); +out_park: + mutex_lock(&i915->drm.struct_mutex); + if (--i915->gt.active_requests) + queue_delayed_work(i915->wq, &i915->gt.retire_work, 0); + else + queue_delayed_work(i915->wq, &i915->gt.idle_work, 0); + mutex_unlock(&i915->drm.struct_mutex); return err; err_obj: i915_gem_object_put(obj); @@ -586,7 +625,7 @@ int i915_gem_object_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 63cd9486cc13..c4aac6141e04 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -262,7 +262,7 @@ int i915_request_mock_selftests(void) return -ENOMEM; err = i915_subtests(tests, i915); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } @@ -286,7 +286,9 @@ static int begin_live_test(struct live_test *t, t->func = func; t->name = name; - err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); + err = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (err) { pr_err("%s(%s): failed to idle before, with err=%d!", func, name, err); @@ -594,11 +596,8 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) } else if (gen >= 6) { *cmd++ = MI_BATCH_BUFFER_START | 1 << 8; *cmd++ = lower_32_bits(vma->node.start); - } else if (gen >= 4) { - *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; - *cmd++ = lower_32_bits(vma->node.start); } else { - *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1; + *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; *cmd++ = lower_32_bits(vma->node.start); } *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */ @@ -678,7 +677,9 @@ static int live_all_engines(void *arg) i915_gem_object_set_active_reference(batch->obj); } - i915_vma_move_to_active(batch, request[id], 0); + err = i915_vma_move_to_active(batch, request[id], 0); + GEM_BUG_ON(err); + i915_request_get(request[id]); i915_request_add(request[id]); } @@ -788,7 +789,9 @@ static int live_sequential_engines(void *arg) GEM_BUG_ON(err); request[id]->batch = batch; - i915_vma_move_to_active(batch, request[id], 0); + err = i915_vma_move_to_active(batch, request[id], 0); + GEM_BUG_ON(err); + i915_gem_object_set_active_reference(batch->obj); i915_vma_get(batch); @@ -862,5 +865,9 @@ int i915_request_live_selftests(struct drm_i915_private *i915) SUBTEST(live_sequential_engines), SUBTEST(live_empty_request), }; + + if (i915_terminally_wedged(&i915->gpu_error)) + return 0; + return i915_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 8400a8cc5cf2..ffa74290e054 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -733,7 +733,7 @@ int i915_vma_mock_selftests(void) err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c index 0d06f559243f..af66e3d4e23a 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c @@ -9,52 +9,8 @@ #include "../i915_selftest.h" #include "igt_flush_test.h" -struct wedge_me { - struct delayed_work work; - struct drm_i915_private *i915; - const void *symbol; -}; - -static void wedge_me(struct work_struct *work) -{ - struct wedge_me *w = container_of(work, typeof(*w), work.work); - - pr_err("%pS timed out, cancelling all further testing.\n", w->symbol); - - GEM_TRACE("%pS timed out.\n", w->symbol); - GEM_TRACE_DUMP(); - - i915_gem_set_wedged(w->i915); -} - -static void __init_wedge(struct wedge_me *w, - struct drm_i915_private *i915, - long timeout, - const void *symbol) -{ - w->i915 = i915; - w->symbol = symbol; - - INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); - schedule_delayed_work(&w->work, timeout); -} - -static void __fini_wedge(struct wedge_me *w) -{ - cancel_delayed_work_sync(&w->work); - destroy_delayed_work_on_stack(&w->work); - w->i915 = NULL; -} - -#define wedge_on_timeout(W, DEV, TIMEOUT) \ - for (__init_wedge((W), (DEV), (TIMEOUT), __builtin_return_address(0)); \ - (W)->i915; \ - __fini_wedge((W))) - int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) { - struct wedge_me w; - cond_resched(); if (flags & I915_WAIT_LOCKED && @@ -63,8 +19,15 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) i915_gem_set_wedged(i915); } - wedge_on_timeout(&w, i915, HZ) - i915_gem_wait_for_idle(i915, flags); + if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) { + pr_err("%pS timed out, cancelling all further testing.\n", + __builtin_return_address(0)); + + GEM_TRACE("%pS timed out.\n", __builtin_return_address(0)); + GEM_TRACE_DUMP(); + + i915_gem_set_wedged(i915); + } return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0; } diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c index d6926e7820e5..f03b407fdbe2 100644 --- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c @@ -464,7 +464,7 @@ int intel_breadcrumbs_mock_selftests(void) return -ENOMEM; err = i915_subtests(tests, i915->engine[RCS]); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index fe7d3190ebfe..73462a65a330 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -130,13 +130,19 @@ static int emit_recurse_batch(struct hang *h, if (err) goto unpin_vma; - i915_vma_move_to_active(vma, rq, 0); + err = i915_vma_move_to_active(vma, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(vma->obj)) { i915_gem_object_get(vma->obj); i915_gem_object_set_active_reference(vma->obj); } - i915_vma_move_to_active(hws, rq, 0); + err = i915_vma_move_to_active(hws, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(hws->obj)) { i915_gem_object_get(hws->obj); i915_gem_object_set_active_reference(hws->obj); @@ -171,7 +177,7 @@ static int emit_recurse_batch(struct hang *h, *batch++ = MI_BATCH_BUFFER_START | 1 << 8; *batch++ = lower_32_bits(vma->node.start); } else if (INTEL_GEN(i915) >= 4) { - *batch++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22; + *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *batch++ = 0; *batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = rq->fence.seqno; @@ -184,7 +190,7 @@ static int emit_recurse_batch(struct hang *h, *batch++ = MI_BATCH_BUFFER_START | 2 << 6; *batch++ = lower_32_bits(vma->node.start); } else { - *batch++ = MI_STORE_DWORD_IMM; + *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = rq->fence.seqno; *batch++ = MI_ARB_CHECK; @@ -193,7 +199,7 @@ static int emit_recurse_batch(struct hang *h, batch += 1024 / sizeof(*batch); *batch++ = MI_ARB_CHECK; - *batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1; + *batch++ = MI_BATCH_BUFFER_START | 2 << 6; *batch++ = lower_32_bits(vma->node.start); } *batch++ = MI_BATCH_BUFFER_END; /* not reached */ @@ -205,6 +211,7 @@ static int emit_recurse_batch(struct hang *h, err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); +unpin_hws: i915_vma_unpin(hws); unpin_vma: i915_vma_unpin(vma); @@ -1243,6 +1250,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) if (!intel_has_gpu_reset(i915)) return 0; + if (i915_terminally_wedged(&i915->gpu_error)) + return -EIO; /* we're long past hope of a successful reset */ + intel_runtime_pm_get(i915); saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index ea27c7cfbf96..636cb68191e3 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -104,13 +104,19 @@ static int emit_recurse_batch(struct spinner *spin, if (err) goto unpin_vma; - i915_vma_move_to_active(vma, rq, 0); + err = i915_vma_move_to_active(vma, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(vma->obj)) { i915_gem_object_get(vma->obj); i915_gem_object_set_active_reference(vma->obj); } - i915_vma_move_to_active(hws, rq, 0); + err = i915_vma_move_to_active(hws, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(hws->obj)) { i915_gem_object_get(hws->obj); i915_gem_object_set_active_reference(hws->obj); @@ -134,6 +140,7 @@ static int emit_recurse_batch(struct spinner *spin, err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); +unpin_hws: i915_vma_unpin(hws); unpin_vma: i915_vma_unpin(vma); @@ -455,5 +462,8 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) if (!HAS_EXECLISTS(i915)) return 0; + if (i915_terminally_wedged(&i915->gpu_error)) + return 0; + return i915_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index e1ea2d2bedd2..fafdec3fe83e 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -49,6 +49,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) goto err_pin; } + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto err_req; + srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; if (INTEL_GEN(ctx->i915) >= 8) srm++; @@ -67,11 +71,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) } intel_ring_advance(rq, cs); - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - reservation_object_lock(vma->resv, NULL); - reservation_object_add_excl_fence(vma->resv, &rq->fence); - reservation_object_unlock(vma->resv); - i915_gem_object_get(result); i915_gem_object_set_active_reference(result); @@ -283,6 +282,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915) }; int err; + if (i915_terminally_wedged(&i915->gpu_error)) + return 0; + mutex_lock(&i915->drm.struct_mutex); err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index c2a0451336cf..22a73da45ad5 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -200,6 +200,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, engine->base.submit_request = mock_submit_request; i915_timeline_init(i915, &engine->base.timeline, engine->base.name); + lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE); + intel_engine_init_breadcrumbs(&engine->base); engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */ diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index c97075c5ccaf..43ed8b28aeaa 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -157,7 +157,8 @@ struct drm_i915_private *mock_gem_device(void) dev_pm_domain_set(&pdev->dev, &pm_domain); pm_runtime_enable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); - WARN_ON(pm_runtime_get_sync(&pdev->dev)); + if (pm_runtime_enabled(&pdev->dev)) + WARN_ON(pm_runtime_get_sync(&pdev->dev)); i915 = (struct drm_i915_private *)(pdev + 1); pci_set_drvdata(pdev, i915); diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 6a7f4da7b523..a140ea5c3a7c 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -70,12 +70,7 @@ mock_ppgtt(struct drm_i915_private *i915, ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); ppgtt->vm.file = ERR_PTR(-ENODEV); - INIT_LIST_HEAD(&ppgtt->vm.active_list); - INIT_LIST_HEAD(&ppgtt->vm.inactive_list); - INIT_LIST_HEAD(&ppgtt->vm.unbound_list); - - INIT_LIST_HEAD(&ppgtt->vm.global_link); - drm_mm_init(&ppgtt->vm.mm, 0, ppgtt->vm.total); + i915_address_space_init(&ppgtt->vm, i915); ppgtt->vm.clear_range = nop_clear_range; ppgtt->vm.insert_page = mock_insert_page; @@ -106,8 +101,6 @@ void mock_init_ggtt(struct drm_i915_private *i915) { struct i915_ggtt *ggtt = &i915->ggtt; - INIT_LIST_HEAD(&i915->vm_list); - ggtt->vm.i915 = i915; ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE); @@ -124,7 +117,7 @@ void mock_init_ggtt(struct drm_i915_private *i915) ggtt->vm.vma_ops.set_pages = ggtt_set_pages; ggtt->vm.vma_ops.clear_pages = clear_pages; - i915_address_space_init(&ggtt->vm, i915, "global"); + i915_address_space_init(&ggtt->vm, i915); } void mock_fini_ggtt(struct drm_i915_private *i915) |