summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorfred gao <fred.gao@intel.com>2018-03-15 13:21:10 +0800
committerZhenyu Wang <zhenyuw@linux.intel.com>2018-03-15 15:06:26 +0800
commitef75c685869ea2059f85855a7dc00148a704c36c (patch)
treea06433a00f8ce0f55fa9ed90c6a0f2f7c949f171 /drivers/gpu
parentfa3dd623e559e8e7004179f9594b090318df0d05 (diff)
downloadlinux-ef75c685869ea2059f85855a7dc00148a704c36c.tar.bz2
drm/i915/gvt: Correct the privilege shadow batch buffer address
Once the ring buffer is copied to ring_scan_buffer and scanned, the shadow batch buffer start address is only updated into ring_scan_buffer, not the real ring address allocated through intel_ring_begin in later copy_workload_to_ring_buffer. This patch is only to set the right shadow batch buffer address from Ring buffer, not include the shadow_wa_ctx. v2: - refine some comments. (Zhenyu) v3: - fix typo in title. (Zhenyu) v4: - remove the unnecessary comments. (Zhenyu) - add comments in bb_start_cmd_va update. (Zhenyu) Fixes: 0a53bc07f044 ("drm/i915/gvt: Separate cmd scan from request allocation") Cc: stable@vger.kernel.org # v4.15 Cc: Zhenyu Wang <zhenyuw@linux.intel.com> Cc: Yulei Zhang <yulei.zhang@intel.com> Signed-off-by: fred gao <fred.gao@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
3 files changed, 20 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index c8454ac43fae..db6b94dda5df 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -471,6 +471,7 @@ struct parser_exec_state {
* used when ret from 2nd level batch buffer
*/
int saved_buf_addr_type;
+ bool is_ctx_wa;
struct cmd_info *info;
@@ -1715,6 +1716,11 @@ static int perform_bb_shadow(struct parser_exec_state *s)
bb->accessing = true;
bb->bb_start_cmd_va = s->ip_va;
+ if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
+ bb->bb_offset = s->ip_va - s->rb_va;
+ else
+ bb->bb_offset = 0;
+
/*
* ip_va saves the virtual address of the shadow batch buffer, while
* ip_gma saves the graphics address of the original batch buffer.
@@ -2571,6 +2577,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.ring_tail = gma_tail;
s.rb_va = workload->shadow_ring_buffer_va;
s.workload = workload;
+ s.is_ctx_wa = false;
if ((bypass_scan_mask & (1 << workload->ring_id)) ||
gma_head == gma_tail)
@@ -2624,6 +2631,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.ring_tail = gma_tail;
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
s.workload = workload;
+ s.is_ctx_wa = true;
if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
ret = -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 8caf72c1e794..fdf1c0bf0d55 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -426,6 +426,17 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
goto err;
}
+ /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
+ * is only updated into ring_scan_buffer, not real ring address
+ * allocated in later copy_workload_to_ring_buffer. pls be noted
+ * shadow_ring_buffer_va is now pointed to real ring buffer va
+ * in copy_workload_to_ring_buffer.
+ */
+
+ if (bb->bb_offset)
+ bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
+ + bb->bb_offset;
+
/* relocate shadow batch buffer */
bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
if (gmadr_bytes == 8)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 2603336b7c6d..a79a4f60637e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -124,6 +124,7 @@ struct intel_vgpu_shadow_bb {
u32 *bb_start_cmd_va;
unsigned int clflush;
bool accessing;
+ unsigned long bb_offset;
};
#define workload_q_head(vgpu, ring_id) \