From 1c6ccad8a4b110bac3f9ff6db954ab5a74c2e861 Mon Sep 17 00:00:00 2001 From: Tina Zhang Date: Mon, 14 May 2018 13:59:18 +0800 Subject: drm/i915/gvt: Deliver guest cursor hotspot info Guest OS driver uses PV info registers to deliver cursor hotspot info to host. This patch is used to get cursor hotspot info from virtual registers and deliver it to host userspace. v4->v5: - remove CI warning. v3->v4: - return UINT_MAX when x_hot/y_hot is invalid. (Zhenyu) - correct version. v2->v3: - add validate_hotspot(). (Zhenyu) v1->v2: - name as cursor_x_hot/cursor_y_hot. (Zhenyu) - use i915_reg_t definition instead of magic numbers. (Zhenyu) Signed-off-by: Tina Zhang Cc: Zhenyu Wang Cc: Zhi Wang Cc: Gerd Hoffmann Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 22 ++++++++++++++++------ drivers/gpu/drm/i915/gvt/fb_decoder.c | 3 +++ drivers/gpu/drm/i915/gvt/handlers.c | 4 ++-- drivers/gpu/drm/i915/gvt/vgpu.c | 3 +++ 4 files changed, 24 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 6f4f8e941fc2..d2eb2f7754b9 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -192,6 +192,14 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, return obj; } +static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c) +{ + if (c && c->x_hot <= c->width && c->y_hot <= c->height) + return true; + else + return false; +} + static int vgpu_get_plane_info(struct drm_device *dev, struct intel_vgpu *vgpu, struct intel_vgpu_fb_info *info, @@ -229,12 +237,14 @@ static int vgpu_get_plane_info(struct drm_device *dev, info->x_pos = c.x_pos; info->y_pos = c.y_pos; - /* The invalid cursor hotspot value is delivered to host - * until we find a way to get the cursor hotspot info of - * guest OS. - */ - info->x_hot = UINT_MAX; - info->y_hot = UINT_MAX; + if (validate_hotspot(&c)) { + info->x_hot = c.x_hot; + info->y_hot = c.y_hot; + } else { + info->x_hot = UINT_MAX; + info->y_hot = UINT_MAX; + } + info->size = (((info->stride * c.height * c.bpp) / 8) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; } else { diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 1c120683e958..5e7468bd1b36 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -36,6 +36,7 @@ #include #include "i915_drv.h" #include "gvt.h" +#include "i915_pvinfo.h" #define PRIMARY_FORMAT_NUM 16 struct pixel_format { @@ -384,6 +385,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT; plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT; + plane->x_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)); + plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 4b6532fb789a..d5e206661048 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1204,8 +1204,8 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ret = handle_g2v_notification(vgpu, data); break; /* add xhot and yhot to handled list to avoid error log */ - case 0x78830: - case 0x78834: + case _vgtif_reg(cursor_x_hot): + case _vgtif_reg(cursor_y_hot): case _vgtif_reg(pdp[0].lo): case _vgtif_reg(pdp[0].hi): case _vgtif_reg(pdp[1].lo): diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 2e0a02a80fe4..bf75300c1ec1 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -58,6 +58,9 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu); + vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX; + vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX; + gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id); gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n", vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu)); -- cgit v1.2.3 From 579e2f6d999991d2d2dd39c7185cba0a97137cee Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 15 Mar 2017 17:39:50 +0000 Subject: drm/i915/gvt: Use offsetofend() rather than offsetof + sizeof Compute the offset of the end of the crc32 field using offsetofend() rather than open-coding. Signed-off-by: Chris Wilson Cc: Zhenyu Wang Cc: Zhi Wang Reviewed-by: Mika Kuoppala Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/firmware.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index a73e1d418c22..4ac18b447247 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -162,7 +162,7 @@ static int verify_firmware(struct intel_gvt *gvt, h = (struct gvt_firmware_header *)fw->data; - crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; + crc32_start = offsetofend(struct gvt_firmware_header, crc32); mem = fw->data + crc32_start; #define VERIFY(s, a, b) do { \ -- cgit v1.2.3 From f25a49ab8ab9c1b5587837c8a386b276403f315c Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Sat, 19 May 2018 12:28:54 +0800 Subject: drm/i915/gvt: Use vgpu_lock to protect per vgpu access The patch set splits out 2 small locks from the original big gvt lock: - vgpu_lock protects per-vGPU data and logic, especially the vGPU trap emulation path. - sched_lock protects gvt scheudler structure, context schedule logic and vGPU's schedule data. Use vgpu_lock to replace the gvt big lock. By doing this, the mmio read/write trap path, vgpu virtual event emulation and other vgpu related process, would be protected under per vgpu_lock. v9: - Change commit author since the patches are improved a lot compared with original version. Original author: Pei Zhang - Rebase to latest gvt-staging. v8: - Correct coding and comment style. - Rebase to latest gvt-staging. v7: - Remove gtt_lock since already proteced by gvt_lock and vgpu_lock. - Fix a typo in intel_gvt_deactivate_vgpu, unlock the wrong lock. v6: - Rebase to latest gvt-staging. v5: - Rebase to latest gvt-staging. - intel_vgpu_page_track_handler should use vgpu_lock. v4: - Rebase to latest gvt-staging. - Protect vgpu->active access with vgpu_lock. - Do not wait gpu idle in vgpu_lock. v3: update to latest code base v2: add gvt->lock in function gvt_check_vblank_emulation Performance comparison on Kabylake platform. - Configuration: Host: Ubuntu 16.04. Guest 1 & 2: Ubuntu 16.04. glmark2 score comparison: - Configuration: Host: glxgears. Guests: glmark2. +--------------------------------+-----------------+ | Setup | glmark2 score | +--------------------------------+-----------------+ | unified lock, iommu=on | 58~62 (avg. 60) | +--------------------------------+-----------------+ | unified lock, iommu=igfx_off | 57~61 (avg. 59) | +--------------------------------+-----------------+ | per-logic lock, iommu=on | 60~68 (avg. 64) | +--------------------------------+-----------------+ | per-logic lock, iommu=igfx_off | 61~67 (avg. 64) | +--------------------------------+-----------------+ lock_stat comparison: - Configuration: Stop lock stat immediately after boot up. Boot 2 VM Guests. Run glmark2 in guests. Start perf lock_stat for 20 seconds and stop again. - Legend: c - contentions; w - waittime-avg +------------+-----------------+-----------+---------------+------------+ | | gvt_lock |sched_lock | vgpu_lock | gtt_lock | + lock type; +-----------------+-----------+---------------+------------+ | iommu set | c | w | c | w | c | w | c | w | +------------+-------+---------+----+------+------+--------+-----+------+ | unified; | 20697 | 839 |N/A | N/A | N/A | N/A | N/A | N/A | | on | | | | | | | | | +------------+-------+---------+----+------+------+--------+-----+------+ | unified; | 21838 | 658.15 |N/A | N/A | N/A | N/A | N/A | N/A | | igfx_off | | | | | | | | | +------------+-------+---------+----+------+------+--------+-----+------+ | per-logic; | 1553 | 1599.96 |9458|429.97| 5846 | 274.33 | 0 | 0.00 | | on | | | | | | | | | +------------+-------+---------+----+------+------+--------+-----+------+ | per-logic; | 1911 | 1678.32 |8335|445.16| 5451 | 244.80 | 0 | 0.00 | | igfx_off | | | | | | | | | +------------+-------+---------+----+------+------+--------+-----+------+ Signed-off-by: Pei Zhang Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/display.c | 35 ++++++++++++----------- drivers/gpu/drm/i915/gvt/gvt.c | 5 +--- drivers/gpu/drm/i915/gvt/gvt.h | 8 ++++++ drivers/gpu/drm/i915/gvt/handlers.c | 4 +++ drivers/gpu/drm/i915/gvt/mmio.c | 12 ++++---- drivers/gpu/drm/i915/gvt/page_track.c | 5 ++-- drivers/gpu/drm/i915/gvt/scheduler.c | 9 +++--- drivers/gpu/drm/i915/gvt/vgpu.c | 53 ++++++++++++++++++----------------- 8 files changed, 72 insertions(+), 59 deletions(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 6d8180e8d1e2..8e4a63c5b7d1 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -337,26 +337,28 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt) struct intel_gvt_irq *irq = &gvt->irq; struct intel_vgpu *vgpu; int pipe, id; + int found = false; - if (WARN_ON(!mutex_is_locked(&gvt->lock))) - return; - + mutex_lock(&gvt->lock); for_each_active_vgpu(gvt, vgpu, id) { for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) { - if (pipe_is_enabled(vgpu, pipe)) - goto out; + if (pipe_is_enabled(vgpu, pipe)) { + found = true; + break; + } } + if (found) + break; } /* all the pipes are disabled */ - hrtimer_cancel(&irq->vblank_timer.timer); - return; - -out: - hrtimer_start(&irq->vblank_timer.timer, - ktime_add_ns(ktime_get(), irq->vblank_timer.period), - HRTIMER_MODE_ABS); - + if (!found) + hrtimer_cancel(&irq->vblank_timer.timer); + else + hrtimer_start(&irq->vblank_timer.timer, + ktime_add_ns(ktime_get(), irq->vblank_timer.period), + HRTIMER_MODE_ABS); + mutex_unlock(&gvt->lock); } static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) @@ -393,8 +395,10 @@ static void emulate_vblank(struct intel_vgpu *vgpu) { int pipe; + mutex_lock(&vgpu->vgpu_lock); for_each_pipe(vgpu->gvt->dev_priv, pipe) emulate_vblank_on_pipe(vgpu, pipe); + mutex_unlock(&vgpu->vgpu_lock); } /** @@ -409,11 +413,10 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt) struct intel_vgpu *vgpu; int id; - if (WARN_ON(!mutex_is_locked(&gvt->lock))) - return; - + mutex_lock(&gvt->lock); for_each_active_vgpu(gvt, vgpu, id) emulate_vblank(vgpu); + mutex_unlock(&gvt->lock); } /** diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 61bd14fcb649..769e06c3ae23 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -271,11 +271,8 @@ static int gvt_service_thread(void *data) continue; if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK, - (void *)&gvt->service_request)) { - mutex_lock(&gvt->lock); + (void *)&gvt->service_request)) intel_gvt_emulate_vblank(gvt); - mutex_unlock(&gvt->lock); - } if (test_bit(INTEL_GVT_REQUEST_SCHED, (void *)&gvt->service_request) || diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 05d15a095310..3600553c8b56 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -170,6 +170,7 @@ struct intel_vgpu_submission { struct intel_vgpu { struct intel_gvt *gvt; + struct mutex vgpu_lock; int id; unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ bool active; @@ -294,6 +295,9 @@ struct intel_vgpu_type { }; struct intel_gvt { + /* GVT scope lock, protect GVT itself, and all resource currently + * not yet protected by special locks(vgpu and scheduler lock). + */ struct mutex lock; struct drm_i915_private *dev_priv; struct idr vgpu_idr; /* vGPU IDR pool */ @@ -314,6 +318,10 @@ struct intel_gvt { struct task_struct *service_thread; wait_queue_head_t service_thread_wq; + + /* service_request is always used in bit operation, we should always + * use it with atomic bit ops so that no need to use gvt big lock. + */ unsigned long service_request; struct { diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index d5e206661048..d60c2bee00fb 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -316,6 +316,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, } } + /* vgpu_lock already hold by emulate mmio r/w */ intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); /* sw will wait for the device to ack the reset request */ @@ -420,7 +421,10 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE; else vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE; + /* vgpu_lock already hold by emulate mmio r/w */ + mutex_unlock(&vgpu->vgpu_lock); intel_gvt_check_vblank_emulation(vgpu->gvt); + mutex_lock(&vgpu->vgpu_lock); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index e4960aff68bd..2be1be2cf49a 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -67,7 +67,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, return; gvt = vgpu->gvt; - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (reg_is_mmio(gvt, offset)) { if (read) @@ -85,7 +85,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, memcpy(pt, p_data, bytes); } - mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); } /** @@ -109,7 +109,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true); return 0; } - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); @@ -156,7 +156,7 @@ err: gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n", offset, bytes); out: - mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); return ret; } @@ -182,7 +182,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, return 0; } - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); @@ -220,7 +220,7 @@ err: gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset, bytes); out: - mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); return ret; } diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c index 53e2bd79c97d..256d0db8bbb1 100644 --- a/drivers/gpu/drm/i915/gvt/page_track.c +++ b/drivers/gpu/drm/i915/gvt/page_track.c @@ -157,11 +157,10 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, void *data, unsigned int bytes) { - struct intel_gvt *gvt = vgpu->gvt; struct intel_vgpu_page_track *page_track; int ret = 0; - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT); if (!page_track) { @@ -179,6 +178,6 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, } out: - mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); return ret; } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index c2d183b91500..c0848dcf79c7 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -680,6 +680,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", ring_id, workload); + mutex_lock(&vgpu->vgpu_lock); mutex_lock(&dev_priv->drm.struct_mutex); ret = intel_gvt_scan_and_shadow_workload(workload); @@ -704,6 +705,7 @@ out: } mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&vgpu->vgpu_lock); return ret; } @@ -861,14 +863,14 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) int event; mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); /* For the workload w/ request, needs to wait for the context * switch to make sure request is completed. * For the workload w/o request, directly complete the workload. */ if (workload->req) { - struct drm_i915_private *dev_priv = - workload->vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; wait_event(workload->shadow_ctx_status_wq, @@ -939,6 +941,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) if (gvt->scheduler.need_reschedule) intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); + mutex_unlock(&vgpu->vgpu_lock); mutex_unlock(&gvt->lock); } @@ -991,9 +994,7 @@ static int workload_thread(void *priv) intel_uncore_forcewake_get(gvt->dev_priv, FORCEWAKE_ALL); - mutex_lock(&gvt->lock); ret = dispatch_workload(workload); - mutex_unlock(&gvt->lock); if (ret) { vgpu = workload->vgpu; diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index bf75300c1ec1..889d10f8ee96 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -226,22 +226,20 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu) */ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu) { - struct intel_gvt *gvt = vgpu->gvt; - - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); vgpu->active = false; if (atomic_read(&vgpu->submission.running_workload_num)) { - mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); intel_gvt_wait_vgpu_idle(vgpu); - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); } intel_vgpu_stop_schedule(vgpu); intel_vgpu_dmabuf_cleanup(vgpu); - mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); } /** @@ -255,14 +253,11 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); WARN(vgpu->active, "vGPU is still active!\n"); intel_gvt_debugfs_remove_vgpu(vgpu); - idr_remove(&gvt->vgpu_idr, vgpu->id); - if (idr_is_empty(&gvt->vgpu_idr)) - intel_gvt_clean_irq(gvt); intel_vgpu_clean_sched_policy(vgpu); intel_vgpu_clean_submission(vgpu); intel_vgpu_clean_display(vgpu); @@ -272,10 +267,16 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_vgpu_free_resource(vgpu); intel_vgpu_clean_mmio(vgpu); intel_vgpu_dmabuf_cleanup(vgpu); - vfree(vgpu); + mutex_unlock(&vgpu->vgpu_lock); + mutex_lock(&gvt->lock); + idr_remove(&gvt->vgpu_idr, vgpu->id); + if (idr_is_empty(&gvt->vgpu_idr)) + intel_gvt_clean_irq(gvt); intel_gvt_update_vgpu_types(gvt); mutex_unlock(&gvt->lock); + + vfree(vgpu); } #define IDLE_VGPU_IDR 0 @@ -301,6 +302,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt) vgpu->id = IDLE_VGPU_IDR; vgpu->gvt = gvt; + mutex_init(&vgpu->vgpu_lock); for (i = 0; i < I915_NUM_ENGINES; i++) INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]); @@ -327,7 +329,10 @@ out_free_vgpu: */ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu) { + mutex_lock(&vgpu->vgpu_lock); intel_vgpu_clean_sched_policy(vgpu); + mutex_unlock(&vgpu->vgpu_lock); + vfree(vgpu); } @@ -345,8 +350,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (!vgpu) return ERR_PTR(-ENOMEM); - mutex_lock(&gvt->lock); - ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU, GFP_KERNEL); if (ret < 0) @@ -356,6 +359,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, vgpu->handle = param->handle; vgpu->gvt = gvt; vgpu->sched_ctl.weight = param->weight; + mutex_init(&vgpu->vgpu_lock); INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head); INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL); idr_init(&vgpu->object_idr); @@ -403,8 +407,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_sched_policy; - mutex_unlock(&gvt->lock); - return vgpu; out_clean_sched_policy: @@ -427,7 +429,6 @@ out_clean_idr: idr_remove(&gvt->vgpu_idr, vgpu->id); out_free_vgpu: vfree(vgpu); - mutex_unlock(&gvt->lock); return ERR_PTR(ret); } @@ -459,12 +460,12 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz); param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz); + mutex_lock(&gvt->lock); vgpu = __intel_gvt_create_vgpu(gvt, ¶m); - if (IS_ERR(vgpu)) - return vgpu; - - /* calculate left instance change for types */ - intel_gvt_update_vgpu_types(gvt); + if (!IS_ERR(vgpu)) + /* calculate left instance change for types */ + intel_gvt_update_vgpu_types(gvt); + mutex_unlock(&gvt->lock); return vgpu; } @@ -476,7 +477,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, * @engine_mask: engines to reset for GT reset * * This function is called when user wants to reset a virtual GPU through - * device model reset or GT reset. The caller should hold the gvt lock. + * device model reset or GT reset. The caller should hold the vgpu lock. * * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset * the whole vGPU to default state as when it is created. This vGPU function @@ -516,9 +517,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, * scheduler when the reset is triggered by current vgpu. */ if (scheduler->current_vgpu == NULL) { - mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); intel_gvt_wait_vgpu_idle(vgpu); - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); } intel_vgpu_reset_submission(vgpu, resetting_eng); @@ -558,7 +559,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, */ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) { - mutex_lock(&vgpu->gvt->lock); + mutex_lock(&vgpu->vgpu_lock); intel_gvt_reset_vgpu_locked(vgpu, true, 0); - mutex_unlock(&vgpu->gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); } -- cgit v1.2.3 From 9a512e23f173a3598709b74d6ccf9a6616403967 Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Sat, 19 May 2018 12:28:55 +0800 Subject: drm/i915/gvt: Use sched_lock to protect gvt scheduler logic. The scheduler lock(gvt->sched_lock) is used to protect gvt scheduler logic, including the gvt scheduler structure(gvt->scheduler and per vgpu schedule data(vgpu->sched_data, vgpu->sched_ctl). v9: - Change commit author since the patches are improved a lot compared with original version. Original author: Pei Zhang - Rebase to latest gvt-staging. v8: - Correct coding wqstyle. - Rebase to latest gvt-staging. v7: - Remove gtt_lock since already proteced by gvt_lock and vgpu_lock. v6: - Rebase to latest gvt-staging. v5: - Rebase to latest gvt-staging. v4: - Rebase to latest gvt-staging. v3: update to latest code base Signed-off-by: Pei Zhang Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gvt.c | 1 + drivers/gpu/drm/i915/gvt/gvt.h | 8 ++++++++ drivers/gpu/drm/i915/gvt/sched_policy.c | 36 +++++++++++++++++++++++++++++---- drivers/gpu/drm/i915/gvt/scheduler.c | 8 ++++---- 4 files changed, 45 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 769e06c3ae23..22a3ddff38a3 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -376,6 +376,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) idr_init(&gvt->vgpu_idr); spin_lock_init(&gvt->scheduler.mmio_context_lock); mutex_init(&gvt->lock); + mutex_init(&gvt->sched_lock); gvt->dev_priv = dev_priv; init_device_info(gvt); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 3600553c8b56..362e3d506d2d 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -177,6 +177,11 @@ struct intel_vgpu { bool pv_notified; bool failsafe; unsigned int resetting_eng; + + /* Both sched_data and sched_ctl can be seen a part of the global gvt + * scheduler structure. So below 2 vgpu data are protected + * by sched_lock, not vgpu_lock. + */ void *sched_data; struct vgpu_sched_ctl sched_ctl; @@ -299,6 +304,9 @@ struct intel_gvt { * not yet protected by special locks(vgpu and scheduler lock). */ struct mutex lock; + /* scheduler scope lock, protect gvt and vgpu schedule related data */ + struct mutex sched_lock; + struct drm_i915_private *dev_priv; struct idr vgpu_idr; /* vGPU IDR pool */ diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index d053cbe1dc94..09d7bb72b4ff 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -228,7 +228,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt) struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; ktime_t cur_time; - mutex_lock(&gvt->lock); + mutex_lock(&gvt->sched_lock); cur_time = ktime_get(); if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, @@ -244,7 +244,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt) vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time); tbs_sched_func(sched_data); - mutex_unlock(&gvt->lock); + mutex_unlock(&gvt->sched_lock); } static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) @@ -359,39 +359,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { int intel_gvt_init_sched_policy(struct intel_gvt *gvt) { + int ret; + + mutex_lock(&gvt->sched_lock); gvt->scheduler.sched_ops = &tbs_schedule_ops; + ret = gvt->scheduler.sched_ops->init(gvt); + mutex_unlock(&gvt->sched_lock); - return gvt->scheduler.sched_ops->init(gvt); + return ret; } void intel_gvt_clean_sched_policy(struct intel_gvt *gvt) { + mutex_lock(&gvt->sched_lock); gvt->scheduler.sched_ops->clean(gvt); + mutex_unlock(&gvt->sched_lock); } +/* for per-vgpu scheduler policy, there are 2 per-vgpu data: + * sched_data, and sched_ctl. We see these 2 data as part of + * the global scheduler which are proteced by gvt->sched_lock. + * Caller should make their decision if the vgpu_lock should + * be hold outside. + */ + int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu) { - return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu); + int ret; + + mutex_lock(&vgpu->gvt->sched_lock); + ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu); + mutex_unlock(&vgpu->gvt->sched_lock); + + return ret; } void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) { + mutex_lock(&vgpu->gvt->sched_lock); vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu); + mutex_unlock(&vgpu->gvt->sched_lock); } void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) { struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + mutex_lock(&vgpu->gvt->sched_lock); if (!vgpu_data->active) { gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); } + mutex_unlock(&vgpu->gvt->sched_lock); } void intel_gvt_kick_schedule(struct intel_gvt *gvt) { + mutex_lock(&gvt->sched_lock); intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); + mutex_unlock(&gvt->sched_lock); } void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) @@ -406,6 +432,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); + mutex_lock(&vgpu->gvt->sched_lock); scheduler->sched_ops->stop_schedule(vgpu); if (scheduler->next_vgpu == vgpu) @@ -425,4 +452,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) } } spin_unlock_bh(&scheduler->mmio_context_lock); + mutex_unlock(&vgpu->gvt->sched_lock); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index c0848dcf79c7..847f295e6755 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -715,7 +715,7 @@ static struct intel_vgpu_workload *pick_next_workload( struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload = NULL; - mutex_lock(&gvt->lock); + mutex_lock(&gvt->sched_lock); /* * no current vgpu / will be scheduled out / no workload @@ -761,7 +761,7 @@ static struct intel_vgpu_workload *pick_next_workload( atomic_inc(&workload->vgpu->submission.running_workload_num); out: - mutex_unlock(&gvt->lock); + mutex_unlock(&gvt->sched_lock); return workload; } @@ -862,8 +862,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) struct intel_vgpu_submission *s = &vgpu->submission; int event; - mutex_lock(&gvt->lock); mutex_lock(&vgpu->vgpu_lock); + mutex_lock(&gvt->sched_lock); /* For the workload w/ request, needs to wait for the context * switch to make sure request is completed. @@ -941,8 +941,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) if (gvt->scheduler.need_reschedule) intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); + mutex_unlock(&gvt->sched_lock); mutex_unlock(&vgpu->vgpu_lock); - mutex_unlock(&gvt->lock); } struct workload_thread_param { -- cgit v1.2.3 From 1f1c60d5b577377d07863f3e0891e63a0a56a3ad Mon Sep 17 00:00:00 2001 From: Xinyun Liu Date: Thu, 7 Jun 2018 22:48:40 +0800 Subject: drm/i915/gvt: Avoid dereference a potential null pointer Add sanity check for up_irq_info. Signed-off-by: Xinyun Liu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/interrupt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 7a041b368f68..1d79596da510 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -350,7 +350,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu, clear_bits |= (1 << bit); } - WARN_ON(!up_irq_info); + if (WARN_ON(!up_irq_info)) + return; if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) { u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base); -- cgit v1.2.3 From 659571953d315b36204acac508ef8d477044d260 Mon Sep 17 00:00:00 2001 From: Xinyun Liu Date: Thu, 7 Jun 2018 22:48:41 +0800 Subject: drm/i915/gvt: removed unnecessary boundary check type is already checked in the function entry. So it is unnecessary to check it again. Signed-off-by: Xinyun Liu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 78e55aafc8bc..49400ab129ff 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1972,7 +1972,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. */ - if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { + if (type > GTT_TYPE_PPGTT_PTE_PT) { struct intel_gvt_gtt_entry se; memset(&se, 0, sizeof(struct intel_gvt_gtt_entry)); -- cgit v1.2.3 From 1417fad75cb4eddc8d50604be88ca9a8a8de4c71 Mon Sep 17 00:00:00 2001 From: Xinyun Liu Date: Thu, 7 Jun 2018 22:48:42 +0800 Subject: drm/i915/gvt: use array to avoid potential buffer overflow MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Array 'pdp_pair' of size 1 may use index value(s) 1..7. Changed to pdps[8] to avoid confusion. Signed-off-by: Xinyun Liu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/execlist.h | 13 +++++-------- drivers/gpu/drm/i915/gvt/scheduler.c | 5 ++--- 2 files changed, 7 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h index 427e40e64d41..714d709829a2 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.h +++ b/drivers/gpu/drm/i915/gvt/execlist.h @@ -146,14 +146,11 @@ struct execlist_ring_context { u32 nop4; u32 lri_cmd_2; struct execlist_mmio_pair ctx_timestamp; - struct execlist_mmio_pair pdp3_UDW; - struct execlist_mmio_pair pdp3_LDW; - struct execlist_mmio_pair pdp2_UDW; - struct execlist_mmio_pair pdp2_LDW; - struct execlist_mmio_pair pdp1_UDW; - struct execlist_mmio_pair pdp1_LDW; - struct execlist_mmio_pair pdp0_UDW; - struct execlist_mmio_pair pdp0_LDW; + /* + * pdps[8]={ pdp3_UDW, pdp3_LDW, pdp2_UDW, pdp2_LDW, + * pdp1_UDW, pdp1_LDW, pdp0_UDW, pdp0_LDW} + */ + struct execlist_mmio_pair pdps[8]; }; struct intel_vgpu_elsp_dwords { diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index cf5a22cb6e06..462cf560492e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -45,11 +45,10 @@ static void set_context_pdp_root_pointer( struct execlist_ring_context *ring_context, u32 pdp[8]) { - struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW; int i; for (i = 0; i < 8; i++) - pdp_pair[i].val = pdp[7 - i]; + ring_context->pdps[i].val = pdp[7 - i]; } static void update_shadow_pdps(struct intel_vgpu_workload *workload) @@ -1230,7 +1229,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu, u64 gpa; int i; - gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val); + gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val); for (i = 0; i < 8; i++) intel_gvt_hypervisor_read_gpa(vgpu, -- cgit v1.2.3 From 2939db9e2879a1f80bcf863ccbd4919b3eef0d91 Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 11 Jun 2018 15:39:29 +0800 Subject: drm/i915/gvt: Add D_BXT device type define for BXT. Broxton belongs to GEN9 family so add to SKL and GEN9 plus. Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 2 ++ drivers/gpu/drm/i915/gvt/mmio.h | 11 ++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index d60c2bee00fb..4dfe8ba55849 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -55,6 +55,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) return D_SKL; else if (IS_KABYLAKE(gvt->dev_priv)) return D_KBL; + else if (IS_BROXTON(gvt->dev_priv)) + return D_BXT; return 0; } diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 71b620875943..e474188b46d2 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -42,15 +42,16 @@ struct intel_vgpu; #define D_BDW (1 << 0) #define D_SKL (1 << 1) #define D_KBL (1 << 2) +#define D_BXT (1 << 3) -#define D_GEN9PLUS (D_SKL | D_KBL) -#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL) +#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT) +#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT) -#define D_SKL_PLUS (D_SKL | D_KBL) -#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL) +#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT) +#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT) #define D_PRE_SKL (D_BDW) -#define D_ALL (D_BDW | D_SKL | D_KBL) +#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT) typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *, unsigned int); -- cgit v1.2.3 From 02b966c12b49ba6dae627c2135b7c1a258e32303 Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 11 Jun 2018 15:39:30 +0800 Subject: drm/i915/gvt: Add MEDIA_POOL_STATE for BXT. As referred in PRM for Broxton Graphics on 01.org Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 718ca08f9575..2b8e6e15e936 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -172,6 +172,7 @@ struct decode_info { #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2) #define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3) #define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4) +#define OP_MEDIA_POOL_STATE OP_3D_MEDIA(0x2, 0x0, 0x5) #define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0) #define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2) @@ -2349,6 +2350,9 @@ static struct cmd_info cmd_info[] = { {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, + {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL, + 0, 16, NULL}, + {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL, -- cgit v1.2.3 From 6fa6616b8f7adaf8380d72e53733bc2472b0d157 Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 11 Jun 2018 15:39:31 +0800 Subject: drm/i915/gvt: Enable device info initialization for BXT. Initialize BXT device info as SKL/KBL. v2: All supported platforms share the same device configuration. Remove the platform check by now and let is_supported_device() be the gate keeper. Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gvt.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 22a3ddff38a3..4e65266e7b95 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -238,18 +238,15 @@ static void init_device_info(struct intel_gvt *gvt) struct intel_gvt_device_info *info = &gvt->device_info; struct pci_dev *pdev = gvt->dev_priv->drm.pdev; - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) - || IS_KABYLAKE(gvt->dev_priv)) { - info->max_support_vgpus = 8; - info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; - info->mmio_size = 2 * 1024 * 1024; - info->mmio_bar = 0; - info->gtt_start_offset = 8 * 1024 * 1024; - info->gtt_entry_size = 8; - info->gtt_entry_size_shift = 3; - info->gmadr_bytes_in_cmd = 8; - info->max_surface_size = 36 * 1024 * 1024; - } + info->max_support_vgpus = 8; + info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; + info->mmio_size = 2 * 1024 * 1024; + info->mmio_bar = 0; + info->gtt_start_offset = 8 * 1024 * 1024; + info->gtt_entry_size = 8; + info->gtt_entry_size_shift = 3; + info->gmadr_bytes_in_cmd = 8; + info->max_surface_size = 36 * 1024 * 1024; info->msi_cap_offset = pdev->msi_cap; } -- cgit v1.2.3 From 665004b8f50612d1e354eb9abd6d97e6eb85a619 Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 11 Jun 2018 15:39:32 +0800 Subject: drm/i915/gvt: Enable gtt initialization for BXT. Initialize BXT gtt as SKL/KBL. v2: All supported platforms share the same gtt ops. Remove the platform check by now and let is_supported_device() be the gate keeper. Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 49400ab129ff..6a8a9868bcfb 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2256,13 +2256,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) gvt_dbg_core("init gtt\n"); - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) - || IS_KABYLAKE(gvt->dev_priv)) { - gvt->gtt.pte_ops = &gen8_gtt_pte_ops; - gvt->gtt.gma_ops = &gen8_gtt_gma_ops; - } else { - return -ENODEV; - } + gvt->gtt.pte_ops = &gen8_gtt_pte_ops; + gvt->gtt.gma_ops = &gen8_gtt_gma_ops; page = (void *)get_zeroed_page(GFP_KERNEL); if (!page) { -- cgit v1.2.3 From d0f827bb18fe2ceb39707718334b636b89be4af0 Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 11 Jun 2018 15:39:33 +0800 Subject: drm/i915/gvt: Enable irq initialization for BXT. Initialize BXT irq handler as SKL/KBL. v2: All supported platforms share the same irq ops and map. Remove the platform check by now and let is_supported_device() be the gate keeper. Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/interrupt.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 1d79596da510..5daa23ae566b 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -581,7 +581,9 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); - } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) { + } else if (IS_SKYLAKE(gvt->dev_priv) + || IS_KABYLAKE(gvt->dev_priv) + || IS_BROXTON(gvt->dev_priv)) { SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT); @@ -691,14 +693,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt) gvt_dbg_core("init irq framework\n"); - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) - || IS_KABYLAKE(gvt->dev_priv)) { - irq->ops = &gen8_irq_ops; - irq->irq_map = gen8_irq_map; - } else { - WARN_ON(1); - return -ENODEV; - } + irq->ops = &gen8_irq_ops; + irq->irq_map = gen8_irq_map; /* common event initialization */ init_events(irq); -- cgit v1.2.3 From a94cf2e0ef7fef5cb42ed96f73c18c3ad4f0d170 Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 11 Jun 2018 15:39:34 +0800 Subject: drm/i915/gvt: Enable mmio context init and switch for BXT. Handle pending tlb flush, mocs/mmio switch and context as KBL. Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/mmio_context.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 708170e61625..20be9a92600f 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -364,7 +364,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) */ fw = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ | FW_REG_WRITE); - if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) + if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || + IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))) fw |= FORCEWAKE_RENDER; intel_uncore_forcewake_get(dev_priv, fw); @@ -401,7 +402,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) return; - if (IS_KABYLAKE(dev_priv) && ring_id == RCS) + if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS) return; if (!pre && !gen9_render_mocs.initialized) @@ -467,7 +468,9 @@ static void switch_mmio(struct intel_vgpu *pre, u32 old_v, new_v; dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) switch_mocs(pre, next, ring_id); for (mmio = dev_priv->gvt->engine_mmio_list.mmio; @@ -479,7 +482,8 @@ static void switch_mmio(struct intel_vgpu *pre, * state image on kabylake, it's initialized by lri command and * save or restore with context together. */ - if (IS_KABYLAKE(dev_priv) && mmio->in_context) + if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) + && mmio->in_context) continue; // save @@ -574,7 +578,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) { struct engine_mmio *mmio; - if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) + if (IS_SKYLAKE(gvt->dev_priv) || + IS_KABYLAKE(gvt->dev_priv) || + IS_BROXTON(gvt->dev_priv)) gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; else gvt->engine_mmio_list.mmio = gen8_engine_mmio_list; -- cgit v1.2.3 From f093f182ace9ce3c618c110e316cc1e22d0c9d3b Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 11 Jun 2018 15:39:35 +0800 Subject: drm/i915/gvt: Enable cmd_parser support for BXT. Handle BXT cmd_parser as SKL/KBL. v2: All supported platforms share the same routines. Remove the platform check by now and let is_supported_device() be the gate keeper. Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 39 +++++++++++++---------------------- 1 file changed, 14 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 2b8e6e15e936..0dd88fe2e39a 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1257,7 +1257,9 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s, if (!info->async_flip) return 0; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) { stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0); tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & GENMASK(12, 10)) >> 10; @@ -1285,7 +1287,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip( set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12), info->surf_val << 12); - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) { set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0), info->stride_val); set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10), @@ -1309,7 +1313,9 @@ static int decode_mi_display_flip(struct parser_exec_state *s, if (IS_BROADWELL(dev_priv)) return gen8_decode_mi_display_flip(s, info); - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) return skl_decode_mi_display_flip(s, info); return -ENODEV; @@ -1318,26 +1324,14 @@ static int decode_mi_display_flip(struct parser_exec_state *s, static int check_mi_display_flip(struct parser_exec_state *s, struct mi_display_flip_command_info *info) { - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; - - if (IS_BROADWELL(dev_priv) - || IS_SKYLAKE(dev_priv) - || IS_KABYLAKE(dev_priv)) - return gen8_check_mi_display_flip(s, info); - return -ENODEV; + return gen8_check_mi_display_flip(s, info); } static int update_plane_mmio_from_mi_display_flip( struct parser_exec_state *s, struct mi_display_flip_command_info *info) { - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; - - if (IS_BROADWELL(dev_priv) - || IS_SKYLAKE(dev_priv) - || IS_KABYLAKE(dev_priv)) - return gen8_update_plane_mmio_from_mi_display_flip(s, info); - return -ENODEV; + return gen8_update_plane_mmio_from_mi_display_flip(s, info); } static int cmd_handler_mi_display_flip(struct parser_exec_state *s) @@ -1616,15 +1610,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, */ static int batch_buffer_needs_scan(struct parser_exec_state *s) { - struct intel_gvt *gvt = s->vgpu->gvt; - - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) - || IS_KABYLAKE(gvt->dev_priv)) { - /* BDW decides privilege based on address space */ - if (cmd_val(s, 0) & (1 << 8) && + /* Decide privilege based on address space */ + if (cmd_val(s, 0) & (1 << 8) && !(s->vgpu->scan_nonprivbb & (1 << s->ring_id))) - return 0; - } + return 0; return 1; } -- cgit v1.2.3 From 47d9d3be5925688dfcc7407b0ae70013ba2dce2e Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 11 Jun 2018 15:39:36 +0800 Subject: drm/i915/gvt: Enable force wake support for BXT. BXT forcewake is handled in the same way as SKL/KBL. v2: Add missing inhibit_context restore for BXT. Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/scheduler.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 462cf560492e..928818f218f7 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -297,7 +297,8 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) void *shadow_ring_buffer_va; u32 *cs; - if (IS_KABYLAKE(req->i915) && is_inhibit_context(req->hw_context)) + if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915)) + && is_inhibit_context(req->hw_context)) intel_vgpu_restore_inhibit_context(vgpu, req); /* allocate shadow ring buffer */ @@ -904,7 +905,8 @@ static int workload_thread(void *priv) struct intel_vgpu *vgpu = NULL; int ret; bool need_force_wake = IS_SKYLAKE(gvt->dev_priv) - || IS_KABYLAKE(gvt->dev_priv); + || IS_KABYLAKE(gvt->dev_priv) + || IS_BROXTON(gvt->dev_priv); DEFINE_WAIT_FUNC(wait, woken_wake_function); kfree(p); -- cgit v1.2.3 From 72bad9972876933b3c428fcee122bb1dcbf8cf5a Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 11 Jun 2018 15:39:37 +0800 Subject: drm/i915/gvt: Enable virtual display support for BXT. Virtual monitor on BXT start from port B. Unlike SKL/KBL, digital display port connectivity is detected via GEN8_DE_PORT_ISR so emulate monitor state change by setting it. Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/display.c | 23 +++++++++++++++++++++++ drivers/gpu/drm/i915/gvt/edid.c | 20 +++++++++++++++++++- 2 files changed, 42 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 38521fa81bf9..6ee50cb328f8 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -171,6 +171,29 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int pipe; + if (IS_BROXTON(dev_priv)) { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA | + BXT_DE_PORT_HP_DDIB | + BXT_DE_PORT_HP_DDIC); + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= + BXT_DE_PORT_HP_DDIA; + } + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= + BXT_DE_PORT_HP_DDIB; + } + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= + BXT_DE_PORT_HP_DDIC; + } + + return; + } + vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index f61337632969..4b98539025c5 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -77,6 +77,20 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) return chr; } +static inline int bxt_get_port_from_gmbus0(u32 gmbus0) +{ + int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK; + int port = -EINVAL; + + if (port_select == 1) + port = PORT_B; + else if (port_select == 2) + port = PORT_C; + else if (port_select == 3) + port = PORT_D; + return port; +} + static inline int get_port_from_gmbus0(u32 gmbus0) { int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK; @@ -105,6 +119,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu) static int gmbus0_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int port, pin_select; memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes); @@ -116,7 +131,10 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu, if (pin_select == 0) return 0; - port = get_port_from_gmbus0(pin_select); + if (IS_BROXTON(dev_priv)) + port = bxt_get_port_from_gmbus0(pin_select); + else + port = get_port_from_gmbus0(pin_select); if (WARN_ON(port < 0)) return 0; -- cgit v1.2.3 From 84eb04f6d77a824b33da11a08118ed2766038f13 Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 11 Jun 2018 15:39:38 +0800 Subject: drm/i915/gvt: Enable dma_buf support for BXT. Handle dma_buf on BXT as SKL and KBL. Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 4 +++- drivers/gpu/drm/i915/gvt/fb_decoder.c | 12 +++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index d2eb2f7754b9..6e3f56684f4e 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -164,7 +164,9 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = 0; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) { unsigned int tiling_mode = 0; unsigned int stride = 0; diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 20b502c44eae..face664be3e8 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -151,7 +151,9 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask; u32 stride = stride_reg; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) { switch (tiled) { case PLANE_CTL_TILED_LINEAR: stride = stride_reg * 64; @@ -215,7 +217,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, if (!plane->enabled) return -ENODEV; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) { plane->tiled = (val & PLANE_CTL_TILED_MASK) >> _PLANE_CTL_TILED_SHIFT; fmt = skl_format_to_drm( @@ -257,7 +261,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, } plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10), - (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) ? + (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) ? (_PRI_PLANE_STRIDE_MASK >> 6) : _PRI_PLANE_STRIDE_MASK, plane->bpp); -- cgit v1.2.3 From d71cb7129e7cf038ed1444781a50766e486bb2bd Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 11 Jun 2018 15:39:39 +0800 Subject: drm/i915/gvt: Add mmio handler for for BXT. Leverage most SKL/KBL mmio init info and add different mmio to BXT specific function init_bxt_mmio_info(). Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 389 +++++++++++++++++++++++++++++++----- 1 file changed, 344 insertions(+), 45 deletions(-) (limited to 'drivers/gpu/drm/i915/gvt') diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 4dfe8ba55849..0bc0c5418adb 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -257,7 +257,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); if (IS_SKYLAKE(vgpu->gvt->dev_priv) - || IS_KABYLAKE(vgpu->gvt->dev_priv)) { + || IS_KABYLAKE(vgpu->gvt->dev_priv) + || IS_BROXTON(vgpu->gvt->dev_priv)) { switch (offset) { case FORCEWAKE_RENDER_GEN9_REG: ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG; @@ -863,7 +864,8 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, data = vgpu_vreg(vgpu, offset); if ((IS_SKYLAKE(vgpu->gvt->dev_priv) - || IS_KABYLAKE(vgpu->gvt->dev_priv)) + || IS_KABYLAKE(vgpu->gvt->dev_priv) + || IS_BROXTON(vgpu->gvt->dev_priv)) && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { /* SKL DPB/C/D aux ctl register changed */ return 0; @@ -1370,6 +1372,16 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, *data0 = 0x1e1a1100; else *data0 = 0x61514b3d; + } else if (IS_BROXTON(vgpu->gvt->dev_priv)) { + /** + * "Read memory latency" command on gen9. + * Below memory latency values are read + * from Broxton MRB. + */ + if (!*data0) + *data0 = 0x16080707; + else + *data0 = 0x16161616; } break; case SKL_PCODE_CDCLK_CONTROL: @@ -1427,8 +1439,11 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu, { u32 v = *(u32 *)p_data; - v &= (1 << 31) | (1 << 29) | (1 << 9) | - (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1); + if (IS_BROXTON(vgpu->gvt->dev_priv)) + v &= (1 << 31) | (1 << 29); + else + v &= (1 << 31) | (1 << 29) | (1 << 9) | + (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1); v |= (v >> 1); return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes); @@ -1448,6 +1463,102 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } +static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + + if (v & BXT_DE_PLL_PLL_ENABLE) + v |= BXT_DE_PLL_LOCK; + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + + if (v & PORT_PLL_ENABLE) + v |= PORT_PLL_LOCK; + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; + + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 v = vgpu_vreg(vgpu, offset); + + v &= ~UNIQUE_TRANGE_EN_METHOD; + + vgpu_vreg(vgpu, offset) = v; + + return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); +} + +static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + + if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) { + vgpu_vreg(vgpu, offset - 0x600) = v; + vgpu_vreg(vgpu, offset - 0x800) = v; + } else { + vgpu_vreg(vgpu, offset - 0x400) = v; + vgpu_vreg(vgpu, offset - 0x600) = v; + } + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + + if (v & BIT(0)) { + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= + ~PHY_RESERVED; + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= + PHY_POWER_GOOD; + } + + if (v & BIT(1)) { + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= + ~PHY_RESERVED; + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |= + PHY_POWER_GOOD; + } + + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + static int mmio_read_from_hw(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -2671,17 +2782,17 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x45504), D_SKL_PLUS); MMIO_D(_MMIO(0x45520), D_SKL_PLUS); MMIO_D(_MMIO(0x46000), D_SKL_PLUS); - MMIO_DH(_MMIO(0x46010), D_SKL | D_KBL, NULL, skl_lcpll_write); - MMIO_DH(_MMIO(0x46014), D_SKL | D_KBL, NULL, skl_lcpll_write); - MMIO_D(_MMIO(0x6C040), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6C048), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6C050), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6C044), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6C04C), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6C054), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6c058), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6c05c), D_SKL | D_KBL); - MMIO_DH(_MMIO(0x6c060), D_SKL | D_KBL, dpll_status_read, NULL); + MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write); + MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write); + MMIO_D(_MMIO(0x6C040), D_SKL_PLUS); + MMIO_D(_MMIO(0x6C048), D_SKL_PLUS); + MMIO_D(_MMIO(0x6C050), D_SKL_PLUS); + MMIO_D(_MMIO(0x6C044), D_SKL_PLUS); + MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS); + MMIO_D(_MMIO(0x6C054), D_SKL_PLUS); + MMIO_D(_MMIO(0x6c058), D_SKL_PLUS); + MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS); + MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL); MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); @@ -2806,53 +2917,57 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x7239c), D_SKL_PLUS); MMIO_D(_MMIO(0x7039c), D_SKL_PLUS); - MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL); - MMIO_D(_MMIO(0x8f004), D_SKL | D_KBL); - MMIO_D(_MMIO(0x8f034), D_SKL | D_KBL); + MMIO_D(_MMIO(0x8f074), D_SKL_PLUS); + MMIO_D(_MMIO(0x8f004), D_SKL_PLUS); + MMIO_D(_MMIO(0x8f034), D_SKL_PLUS); - MMIO_D(_MMIO(0xb11c), D_SKL | D_KBL); + MMIO_D(_MMIO(0xb11c), D_SKL_PLUS); - MMIO_D(_MMIO(0x51000), D_SKL | D_KBL); + MMIO_D(_MMIO(0x51000), D_SKL_PLUS); MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS); - MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL); - MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL); + MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, + NULL, NULL); + MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, + NULL, NULL); MMIO_D(RPM_CONFIG0, D_SKL_PLUS); MMIO_D(_MMIO(0xd08), D_SKL_PLUS); MMIO_D(RC6_LOCATION, D_SKL_PLUS); MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL); - MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, + NULL, NULL); /* TRTT */ - MMIO_DFH(_MMIO(0x4de0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x4de4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x4de8), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x4dec), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x4df0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x4df4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write); - MMIO_DH(_MMIO(0x4dfc), D_SKL | D_KBL, NULL, gen9_trtt_chicken_write); + MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS, + NULL, gen9_trtte_write); + MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); - MMIO_D(_MMIO(0x45008), D_SKL | D_KBL); + MMIO_D(_MMIO(0x45008), D_SKL_PLUS); - MMIO_D(_MMIO(0x46430), D_SKL | D_KBL); + MMIO_D(_MMIO(0x46430), D_SKL_PLUS); - MMIO_D(_MMIO(0x46520), D_SKL | D_KBL); + MMIO_D(_MMIO(0x46520), D_SKL_PLUS); - MMIO_D(_MMIO(0xc403c), D_SKL | D_KBL); + MMIO_D(_MMIO(0xc403c), D_SKL_PLUS); MMIO_D(_MMIO(0xb004), D_SKL_PLUS); MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write); MMIO_D(_MMIO(0x65900), D_SKL_PLUS); - MMIO_D(_MMIO(0x1082c0), D_SKL | D_KBL); - MMIO_D(_MMIO(0x4068), D_SKL | D_KBL); - MMIO_D(_MMIO(0x67054), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6e560), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6e554), D_SKL | D_KBL); - MMIO_D(_MMIO(0x2b20), D_SKL | D_KBL); - MMIO_D(_MMIO(0x65f00), D_SKL | D_KBL); - MMIO_D(_MMIO(0x65f08), D_SKL | D_KBL); - MMIO_D(_MMIO(0x320f0), D_SKL | D_KBL); + MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS); + MMIO_D(_MMIO(0x4068), D_SKL_PLUS); + MMIO_D(_MMIO(0x67054), D_SKL_PLUS); + MMIO_D(_MMIO(0x6e560), D_SKL_PLUS); + MMIO_D(_MMIO(0x6e554), D_SKL_PLUS); + MMIO_D(_MMIO(0x2b20), D_SKL_PLUS); + MMIO_D(_MMIO(0x65f00), D_SKL_PLUS); + MMIO_D(_MMIO(0x65f08), D_SKL_PLUS); + MMIO_D(_MMIO(0x320f0), D_SKL_PLUS); MMIO_D(_MMIO(0x70034), D_SKL_PLUS); MMIO_D(_MMIO(0x71034), D_SKL_PLUS); @@ -2870,11 +2985,185 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x44500), D_SKL_PLUS); MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS, + MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_D(_MMIO(0x4ab8), D_KBL); - MMIO_D(_MMIO(0x2248), D_SKL_PLUS | D_KBL); + MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); + + return 0; +} + +static int init_bxt_mmio_info(struct intel_gvt *gvt) +{ + struct drm_i915_private *dev_priv = gvt->dev_priv; + int ret; + + MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL); + + MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT); + MMIO_D(GEN7_ROW_INSTDONE, D_BXT); + MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT); + MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT); + MMIO_D(ERROR_GEN6, D_BXT); + MMIO_D(DONE_REG, D_BXT); + MMIO_D(EIR, D_BXT); + MMIO_D(PGTBL_ER, D_BXT); + MMIO_D(_MMIO(0x4194), D_BXT); + MMIO_D(_MMIO(0x4294), D_BXT); + MMIO_D(_MMIO(0x4494), D_BXT); + + MMIO_RING_D(RING_PSMI_CTL, D_BXT); + MMIO_RING_D(RING_DMA_FADD, D_BXT); + MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT); + MMIO_RING_D(RING_IPEHR, D_BXT); + MMIO_RING_D(RING_INSTPS, D_BXT); + MMIO_RING_D(RING_BBADDR_UDW, D_BXT); + MMIO_RING_D(RING_BBSTATE, D_BXT); + MMIO_RING_D(RING_IPEIR, D_BXT); + + MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL); + + MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write); + MMIO_D(BXT_RP_STATE_CAP, D_BXT); + MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT, + NULL, bxt_phy_ctl_family_write); + MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT, + NULL, bxt_phy_ctl_family_write); + MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT); + MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT); + MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT); + MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT, + NULL, bxt_port_pll_enable_write); + MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT, + NULL, bxt_port_pll_enable_write); + MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL, + bxt_port_pll_enable_write); + + MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT); + + MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT); + + MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT, + NULL, bxt_pcs_dw12_grp_write); + MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT, + bxt_port_tx_dw3_read, NULL); + MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT); + + MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT, + NULL, bxt_pcs_dw12_grp_write); + MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT, + bxt_port_tx_dw3_read, NULL); + MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT); + + MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT, + NULL, bxt_pcs_dw12_grp_write); + MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT, + bxt_port_tx_dw3_read, NULL); + MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT); + + MMIO_D(BXT_DE_PLL_CTL, D_BXT); + MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write); + MMIO_D(BXT_DSI_PLL_CTL, D_BXT); + MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); + + MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); + + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); + + MMIO_D(RC6_CTX_BASE, D_BXT); + + MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT); + MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT); + MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT); + MMIO_D(GEN6_GFXPAUSE, D_BXT); + MMIO_D(GEN8_L3SQCREG1, D_BXT); + + MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL); return 0; } @@ -2966,6 +3255,16 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) ret = init_skl_mmio_info(gvt); if (ret) goto err; + } else if (IS_BROXTON(dev_priv)) { + ret = init_broadwell_mmio_info(gvt); + if (ret) + goto err; + ret = init_skl_mmio_info(gvt); + if (ret) + goto err; + ret = init_bxt_mmio_info(gvt); + if (ret) + goto err; } gvt->mmio.mmio_block = mmio_blocks; -- cgit v1.2.3