diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 123 |
1 files changed, 85 insertions, 38 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 8da1bde442dd..12e734b29463 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -28,9 +28,12 @@ */ #include <linux/log2.h> + #include <drm/drmP.h> -#include "i915_drv.h" #include <drm/i915_drm.h> + +#include "i915_drv.h" +#include "i915_gem_render_state.h" #include "i915_trace.h" #include "intel_drv.h" @@ -480,10 +483,14 @@ static bool stop_ring(struct intel_engine_cs *engine) } } - I915_WRITE_CTL(engine, 0); + I915_WRITE_HEAD(engine, I915_READ_TAIL(engine)); + I915_WRITE_HEAD(engine, 0); I915_WRITE_TAIL(engine, 0); + /* The ring must be empty before it is disabled */ + I915_WRITE_CTL(engine, 0); + return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0; } @@ -1359,12 +1366,13 @@ static int context_pin(struct i915_gem_context *ctx) struct i915_vma *vma = ctx->engine[RCS].state; int ret; - /* Clear this page out of any CPU caches for coherent swap-in/out. + /* + * Clear this page out of any CPU caches for coherent swap-in/out. * We only want to do this on the first bind so that we do not stall * on an active context (which by nature is already on the GPU). */ if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { - ret = i915_gem_object_set_to_gtt_domain(vma->obj, false); + ret = i915_gem_object_set_to_gtt_domain(vma->obj, true); if (ret) return ret; } @@ -1379,11 +1387,34 @@ alloc_context_vma(struct intel_engine_cs *engine) struct drm_i915_private *i915 = engine->i915; struct drm_i915_gem_object *obj; struct i915_vma *vma; + int err; obj = i915_gem_object_create(i915, engine->context_size); if (IS_ERR(obj)) return ERR_CAST(obj); + if (engine->default_state) { + void *defaults, *vaddr; + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_obj; + } + + defaults = i915_gem_object_pin_map(engine->default_state, + I915_MAP_WB); + if (IS_ERR(defaults)) { + err = PTR_ERR(defaults); + goto err_map; + } + + memcpy(vaddr, defaults, engine->context_size); + + i915_gem_object_unpin_map(engine->default_state); + i915_gem_object_unpin_map(obj); + } + /* * Try to make the context utilize L3 as well as LLC. * @@ -1405,10 +1436,18 @@ alloc_context_vma(struct intel_engine_cs *engine) } vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); - if (IS_ERR(vma)) - i915_gem_object_put(obj); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } return vma; + +err_map: + i915_gem_object_unpin_map(obj); +err_obj: + i915_gem_object_put(obj); + return ERR_PTR(err); } static struct intel_ring * @@ -1441,20 +1480,9 @@ intel_ring_context_pin(struct intel_engine_cs *engine, if (ret) goto err; - ce->state->obj->mm.dirty = true; ce->state->obj->pin_global++; } - /* The kernel context is only used as a placeholder for flushing the - * active context. It is never used for submitting user rendering and - * as such never requires the golden render context, and so we can skip - * emitting it when we switch to the kernel context. This is required - * as during eviction we cannot allocate and pin the renderstate in - * order to initialise the context. - */ - if (i915_gem_context_is_kernel(ctx)) - ce->initialised = true; - i915_gem_context_get(ctx); out: @@ -1550,7 +1578,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv) static int ring_request_alloc(struct drm_i915_gem_request *request) { - u32 *cs; + int ret; GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count); @@ -1560,37 +1588,24 @@ static int ring_request_alloc(struct drm_i915_gem_request *request) */ request->reserved_space += LEGACY_REQUEST_SIZE; - cs = intel_ring_begin(request, 0); - if (IS_ERR(cs)) - return PTR_ERR(cs); + ret = intel_ring_wait_for_space(request->ring, request->reserved_space); + if (ret) + return ret; request->reserved_space -= LEGACY_REQUEST_SIZE; return 0; } -static noinline int wait_for_space(struct drm_i915_gem_request *req, - unsigned int bytes) +static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes) { - struct intel_ring *ring = req->ring; struct drm_i915_gem_request *target; long timeout; - lockdep_assert_held(&req->i915->drm.struct_mutex); + lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex); if (intel_ring_update_space(ring) >= bytes) return 0; - /* - * Space is reserved in the ringbuffer for finalising the request, - * as that cannot be allowed to fail. During request finalisation, - * reserved_space is set to 0 to stop the overallocation and the - * assumption is that then we never need to wait (which has the - * risk of failing with EINTR). - * - * See also i915_gem_request_alloc() and i915_add_request(). - */ - GEM_BUG_ON(!req->reserved_space); - list_for_each_entry(target, &ring->request_list, ring_link) { /* Would completion of this request free enough space? */ if (bytes <= __intel_ring_space(target->postfix, @@ -1614,6 +1629,22 @@ static noinline int wait_for_space(struct drm_i915_gem_request *req, return 0; } +int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes) +{ + GEM_BUG_ON(bytes > ring->effective_size); + if (unlikely(bytes > ring->effective_size - ring->emit)) + bytes += ring->size - ring->emit; + + if (unlikely(bytes > ring->space)) { + int ret = wait_for_space(ring, bytes); + if (unlikely(ret)) + return ret; + } + + GEM_BUG_ON(ring->space < bytes); + return 0; +} + u32 *intel_ring_begin(struct drm_i915_gem_request *req, unsigned int num_dwords) { @@ -1653,7 +1684,20 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, } if (unlikely(total_bytes > ring->space)) { - int ret = wait_for_space(req, total_bytes); + int ret; + + /* + * Space is reserved in the ringbuffer for finalising the + * request, as that cannot be allowed to fail. During request + * finalisation, reserved_space is set to 0 to stop the + * overallocation and the assumption is that then we never need + * to wait (which has the risk of failing with EINTR). + * + * See also i915_gem_request_alloc() and i915_add_request(). + */ + GEM_BUG_ON(!req->reserved_space); + + ret = wait_for_space(ring, total_bytes); if (unlikely(ret)) return ERR_PTR(ret); } @@ -2028,12 +2072,15 @@ static void i9xx_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = i9xx_submit_request; engine->cancel_requests = cancel_requests; + + engine->park = NULL; + engine->unpark = NULL; } static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) { + i9xx_set_default_submission(engine); engine->submit_request = gen6_bsd_submit_request; - engine->cancel_requests = cancel_requests; } static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, |