summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
diff options
context:
space:
mode:
authorMatthew Auld <matthew.auld@intel.com>2019-08-10 10:29:45 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-08-10 11:19:00 +0100
commit554e330ceb9f00204bb692974c490ad50fc104cc (patch)
treef0ad45a514fd85f188f3ffa26d938e5b3334717e /drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
parent963ad1285b7c7c5e4c75c2aa108ace866010e9bc (diff)
downloadlinux-554e330ceb9f00204bb692974c490ad50fc104cc.tar.bz2
drm/i915/blt: bump the size restriction
As pointed out by Chris, with our current approach we are actually limited to S16_MAX * PAGE_SIZE for our size when using the blt to clear pages. Keeping things simple try to fix this by reducing the copy to a sequence of S16_MAX * PAGE_SIZE blocks. Reported-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> [ickle: hide the details of the engine pool inside emit_vma] Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20190810092945.2762-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_client_blt.c')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c21
1 files changed, 19 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 08a84c940d8d..ac14677dd537 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -5,6 +5,8 @@
#include "i915_drv.h"
#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_pool.h"
#include "i915_gem_client_blt.h"
#include "i915_gem_object_blt.h"
@@ -157,6 +159,7 @@ static void clear_pages_worker(struct work_struct *work)
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq;
+ struct i915_vma *batch;
int err = w->dma.error;
if (unlikely(err))
@@ -176,10 +179,16 @@ static void clear_pages_worker(struct work_struct *work)
if (unlikely(err))
goto out_unlock;
+ batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_unpin;
+ }
+
rq = intel_context_create_request(w->ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto out_unpin;
+ goto out_batch;
}
/* There's no way the fence has signalled */
@@ -187,6 +196,10 @@ static void clear_pages_worker(struct work_struct *work)
clear_pages_dma_fence_cb))
GEM_BUG_ON(1);
+ err = intel_emit_vma_mark_active(batch, rq);
+ if (unlikely(err))
+ goto out_request;
+
if (w->ce->engine->emit_init_breadcrumb) {
err = w->ce->engine->emit_init_breadcrumb(rq);
if (unlikely(err))
@@ -202,7 +215,9 @@ static void clear_pages_worker(struct work_struct *work)
if (err)
goto out_request;
- err = intel_emit_vma_fill_blt(rq, vma, w->value);
+ err = w->ce->engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
out_request:
if (unlikely(err)) {
i915_request_skip(rq, err);
@@ -210,6 +225,8 @@ out_request:
}
i915_request_add(rq);
+out_batch:
+ intel_emit_vma_release(w->ce, batch);
out_unpin:
i915_vma_unpin(vma);
out_unlock: