diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 40 |
1 files changed, 29 insertions, 11 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 89834ce19acd..aa3969d52773 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1127,11 +1127,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, offset = offset_in_page(args->offset); for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { struct page *page = i915_gem_object_get_page(obj, idx); - int length; - - length = remain; - if (offset + length > PAGE_SIZE) - length = PAGE_SIZE - offset; + unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); ret = shmem_pread(page, offset, length, user_data, page_to_phys(page) & obj_do_bit17_swizzling, @@ -1575,11 +1571,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, offset = offset_in_page(args->offset); for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { struct page *page = i915_gem_object_get_page(obj, idx); - int length; - - length = remain; - if (offset + length > PAGE_SIZE) - length = PAGE_SIZE - offset; + unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); ret = shmem_pwrite(page, offset, length, user_data, page_to_phys(page) & obj_do_bit17_swizzling, @@ -2506,7 +2498,9 @@ static bool i915_sg_trim(struct sg_table *orig_st) new_sg = new_st.sgl; for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { sg_set_page(new_sg, sg_page(sg), sg->length, 0); - /* called before being DMA mapped, no need to copy sg->dma_* */ + sg_dma_address(new_sg) = sg_dma_address(sg); + sg_dma_len(new_sg) = sg_dma_len(sg); + new_sg = sg_next(new_sg); } GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ @@ -3438,6 +3432,9 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) i915_retire_requests(i915); GEM_BUG_ON(i915->gt.active_requests); + if (!intel_gpu_reset(i915, ALL_ENGINES)) + intel_engines_sanitize(i915); + /* * Undo nop_submit_request. We prevent all new i915 requests from * being queued (by disallowing execbuf whilst wedged) so having @@ -5414,8 +5411,19 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) assert_kernel_context_is_current(i915); + /* + * Immediately park the GPU so that we enable powersaving and + * treat it as idle. The next time we issue a request, we will + * unpark and start using the engine->pinned_default_state, otherwise + * it is in limbo and an early reset may fail. + */ + __i915_gem_park(i915); + for_each_engine(engine, i915, id) { struct i915_vma *state; + void *vaddr; + + GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count); state = to_intel_context(ctx, engine)->state; if (!state) @@ -5438,6 +5446,16 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) goto err_active; engine->default_state = i915_gem_object_get(state->obj); + + /* Check we can acquire the image of the context state */ + vaddr = i915_gem_object_pin_map(engine->default_state, + I915_MAP_FORCE_WB); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_active; + } + + i915_gem_object_unpin_map(engine->default_state); } if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) { |