summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>2022-05-23 09:34:47 +0100
committerTvrtko Ursulin <tvrtko.ursulin@intel.com>2022-05-23 09:34:47 +0100
commit8ec5c0006c50cb1909c0de0fad137909c1218990 (patch)
treeb5df17cb06943792b3a2c9f3fab8a2c357113b34 /drivers
parent39921e5f00f5a98ff9cb3229937ca339e8d9c9c6 (diff)
parent5f38c3fb55ce3814b4353320d7a205068a420e48 (diff)
downloadlinux-8ec5c0006c50cb1909c0de0fad137909c1218990.tar.bz2
Merge tag 'drm-intel-next-2022-05-20' of git://anongit.freedesktop.org/drm/drm-intel into drm-intel-gt-next
drm/i915 drm-intel-next -> drm-intel-gt-next cross-merge sync Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> # Conflicts: # drivers/gpu/drm/i915/gt/intel_rps.c # drivers/gpu/drm/i915/i915_vma.c From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/87y1ywbh5y.fsf@intel.com
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma-buf/dma-buf.c24
-rw-r--r--drivers/dma-buf/dma-resv.c403
-rw-r--r--drivers/dma-buf/st-dma-resv.c111
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c359
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h89
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c10
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c5
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c14
-rw-r--r--drivers/gpu/drm/bridge/Kconfig14
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c6
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c24
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.h4
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c119
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c15
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c802
-rw-r--r--drivers/gpu/drm/bridge/panel.c7
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig10
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c199
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c186
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.h16
-rw-r--r--drivers/gpu/drm/dp/drm_dp.c33
-rw-r--r--drivers/gpu/drm/drm_buddy.c3
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c12
-rw-r--r--drivers/gpu/drm/drm_edid.c358
-rw-r--r--drivers/gpu/drm/drm_gem.c3
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_modes.c4
-rw-r--r--drivers/gpu/drm/drm_vblank.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c5
-rw-r--r--drivers/gpu/drm/i915/Kconfig36
-rw-r--r--drivers/gpu/drm/i915/Makefile9
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c3
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c4
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c345
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c176
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c4471
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h126
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c1501
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c1813
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h132
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c54
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c196
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c268
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c78
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h24
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c42
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c5
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile30
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c89
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c55
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c340
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h128
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1035
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h82
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c40
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c1097
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h400
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c148
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c22
-rw-r--r--drivers/gpu/drm/i915/i915_deps.c2
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c19
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h6
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c5
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h42
-rw-r--r--drivers/gpu/drm/i915/i915_request.c3
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c13
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c30
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h12
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c252
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.h32
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c1292
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c93
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.h20
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c29
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c3
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig9
-rw-r--r--drivers/gpu/drm/ingenic/Makefile1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c28
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-dw-hdmi.c103
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base917c.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c2
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c31
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c39
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c12
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c3
-rw-r--r--drivers/gpu/drm/solomon/Kconfig18
-rw-r--r--drivers/gpu/drm/solomon/Makefile1
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-i2c.c56
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c178
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c108
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.h16
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c22
-rw-r--r--drivers/gpu/drm/stm/ltdc.c16
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c2
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c18
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c37
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c27
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c34
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c34
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c26
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c39
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c8
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c5
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c3
-rw-r--r--drivers/of/platform.c83
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c7
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c9
-rw-r--r--drivers/vfio/mdev/Makefile2
-rw-r--r--drivers/vfio/mdev/mdev_core.c52
-rw-r--r--drivers/vfio/mdev/mdev_driver.c10
-rw-r--r--drivers/vfio/mdev/mdev_private.h6
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c37
-rw-r--r--drivers/vfio/mdev/vfio_mdev.c152
-rw-r--r--drivers/video/fbdev/core/fbcon.c708
-rw-r--r--drivers/video/fbdev/core/fbcon.h8
-rw-r--r--drivers/video/fbdev/core/fbmem.c37
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c2
-rw-r--r--drivers/video/fbdev/offb.c98
219 files changed, 11023 insertions, 9548 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 775d3afb4169..79795857be3e 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -216,7 +216,8 @@ static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
struct dma_fence *fence;
int r;
- dma_resv_for_each_fence(&cursor, resv, write, fence) {
+ dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
+ fence) {
dma_fence_get(fence);
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
if (!r)
@@ -660,12 +661,24 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
+ signed long ret;
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ if (IS_ERR_OR_NULL(sg_table))
+ return sg_table;
+
+ if (!dma_buf_attachment_is_dynamic(attach)) {
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0) {
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
+ direction);
+ return ERR_PTR(ret);
+ }
+ }
- if (!IS_ERR_OR_NULL(sg_table))
- mangle_sg_table(sg_table);
-
+ mangle_sg_table(sg_table);
return sg_table;
}
@@ -1124,7 +1137,8 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
long ret;
/* Wait on any implicit rendering fences */
- ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
+ ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
+ true, MAX_SCHEDULE_TIMEOUT);
if (ret < 0)
return ret;
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 8c650b96357a..0cce6e4ec946 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -44,12 +44,12 @@
/**
* DOC: Reservation Object Overview
*
- * The reservation object provides a mechanism to manage shared and
- * exclusive fences associated with a buffer. A reservation object
- * can have attached one exclusive fence (normally associated with
- * write operations) or N shared fences (read operations). The RCU
- * mechanism is used to protect read access to fences from locked
- * write-side updates.
+ * The reservation object provides a mechanism to manage a container of
+ * dma_fence object associated with a resource. A reservation object
+ * can have any number of fences attaches to it. Each fence carries an usage
+ * parameter determining how the operation represented by the fence is using the
+ * resource. The RCU mechanism is used to protect read access to fences from
+ * locked write-side updates.
*
* See struct dma_resv for more details.
*/
@@ -57,39 +57,59 @@
DEFINE_WD_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
+/* Mask for the lower fence pointer bits */
+#define DMA_RESV_LIST_MASK 0x3
+
struct dma_resv_list {
struct rcu_head rcu;
- u32 shared_count, shared_max;
- struct dma_fence __rcu *shared[];
+ u32 num_fences, max_fences;
+ struct dma_fence __rcu *table[];
};
-/**
- * dma_resv_list_alloc - allocate fence list
- * @shared_max: number of fences we need space for
- *
+/* Extract the fence and usage flags from an RCU protected entry in the list. */
+static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index,
+ struct dma_resv *resv, struct dma_fence **fence,
+ enum dma_resv_usage *usage)
+{
+ long tmp;
+
+ tmp = (long)rcu_dereference_check(list->table[index],
+ resv ? dma_resv_held(resv) : true);
+ *fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK);
+ if (usage)
+ *usage = tmp & DMA_RESV_LIST_MASK;
+}
+
+/* Set the fence and usage flags at the specific index in the list. */
+static void dma_resv_list_set(struct dma_resv_list *list,
+ unsigned int index,
+ struct dma_fence *fence,
+ enum dma_resv_usage usage)
+{
+ long tmp = ((long)fence) | usage;
+
+ RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp);
+}
+
+/*
* Allocate a new dma_resv_list and make sure to correctly initialize
- * shared_max.
+ * max_fences.
*/
-static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
+static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences)
{
struct dma_resv_list *list;
- list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
+ list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL);
if (!list)
return NULL;
- list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
- sizeof(*list->shared);
+ list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) /
+ sizeof(*list->table);
return list;
}
-/**
- * dma_resv_list_free - free fence list
- * @list: list to free
- *
- * Free a dma_resv_list and make sure to drop all references.
- */
+/* Free a dma_resv_list and make sure to drop all references. */
static void dma_resv_list_free(struct dma_resv_list *list)
{
unsigned int i;
@@ -97,9 +117,12 @@ static void dma_resv_list_free(struct dma_resv_list *list)
if (!list)
return;
- for (i = 0; i < list->shared_count; ++i)
- dma_fence_put(rcu_dereference_protected(list->shared[i], true));
+ for (i = 0; i < list->num_fences; ++i) {
+ struct dma_fence *fence;
+ dma_resv_list_entry(list, i, NULL, &fence, NULL);
+ dma_fence_put(fence);
+ }
kfree_rcu(list, rcu);
}
@@ -110,10 +133,8 @@ static void dma_resv_list_free(struct dma_resv_list *list)
void dma_resv_init(struct dma_resv *obj)
{
ww_mutex_init(&obj->lock, &reservation_ww_class);
- seqcount_ww_mutex_init(&obj->seq, &obj->lock);
- RCU_INIT_POINTER(obj->fence, NULL);
- RCU_INIT_POINTER(obj->fence_excl, NULL);
+ RCU_INIT_POINTER(obj->fences, NULL);
}
EXPORT_SYMBOL(dma_resv_init);
@@ -123,46 +144,32 @@ EXPORT_SYMBOL(dma_resv_init);
*/
void dma_resv_fini(struct dma_resv *obj)
{
- struct dma_resv_list *fobj;
- struct dma_fence *excl;
-
/*
* This object should be dead and all references must have
* been released to it, so no need to be protected with rcu.
*/
- excl = rcu_dereference_protected(obj->fence_excl, 1);
- if (excl)
- dma_fence_put(excl);
-
- fobj = rcu_dereference_protected(obj->fence, 1);
- dma_resv_list_free(fobj);
+ dma_resv_list_free(rcu_dereference_protected(obj->fences, true));
ww_mutex_destroy(&obj->lock);
}
EXPORT_SYMBOL(dma_resv_fini);
-static inline struct dma_fence *
-dma_resv_excl_fence(struct dma_resv *obj)
+/* Dereference the fences while ensuring RCU rules */
+static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj)
{
- return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
-}
-
-static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
-{
- return rcu_dereference_check(obj->fence, dma_resv_held(obj));
+ return rcu_dereference_check(obj->fences, dma_resv_held(obj));
}
/**
- * dma_resv_reserve_fences - Reserve space to add shared fences to
- * a dma_resv.
+ * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object.
* @obj: reservation object
* @num_fences: number of fences we want to add
*
- * Should be called before dma_resv_add_shared_fence(). Must
- * be called with @obj locked through dma_resv_lock().
+ * Should be called before dma_resv_add_fence(). Must be called with @obj
+ * locked through dma_resv_lock().
*
* Note that the preallocated slots need to be re-reserved if @obj is unlocked
- * at any time before calling dma_resv_add_shared_fence(). This is validated
- * when CONFIG_DEBUG_MUTEXES is enabled.
+ * at any time before calling dma_resv_add_fence(). This is validated when
+ * CONFIG_DEBUG_MUTEXES is enabled.
*
* RETURNS
* Zero for success, or -errno
@@ -174,11 +181,11 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
dma_resv_assert_held(obj);
- old = dma_resv_shared_list(obj);
- if (old && old->shared_max) {
- if ((old->shared_count + num_fences) <= old->shared_max)
+ old = dma_resv_fences_list(obj);
+ if (old && old->max_fences) {
+ if ((old->num_fences + num_fences) <= old->max_fences)
return 0;
- max = max(old->shared_count + num_fences, old->shared_max * 2);
+ max = max(old->num_fences + num_fences, old->max_fences * 2);
} else {
max = max(4ul, roundup_pow_of_two(num_fences));
}
@@ -193,27 +200,27 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
* references from the old struct are carried over to
* the new.
*/
- for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
+ for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) {
+ enum dma_resv_usage usage;
struct dma_fence *fence;
- fence = rcu_dereference_protected(old->shared[i],
- dma_resv_held(obj));
+ dma_resv_list_entry(old, i, obj, &fence, &usage);
if (dma_fence_is_signaled(fence))
- RCU_INIT_POINTER(new->shared[--k], fence);
+ RCU_INIT_POINTER(new->table[--k], fence);
else
- RCU_INIT_POINTER(new->shared[j++], fence);
+ dma_resv_list_set(new, j++, fence, usage);
}
- new->shared_count = j;
+ new->num_fences = j;
/*
* We are not changing the effective set of fences here so can
* merely update the pointer to the new array; both existing
* readers and new readers will see exactly the same set of
- * active (unsignaled) shared fences. Individual fences and the
+ * active (unsignaled) fences. Individual fences and the
* old array are protected by RCU and so will not vanish under
* the gaze of the rcu_read_lock() readers.
*/
- rcu_assign_pointer(obj->fence, new);
+ rcu_assign_pointer(obj->fences, new);
if (!old)
return 0;
@@ -222,7 +229,7 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
for (i = k; i < max; ++i) {
struct dma_fence *fence;
- fence = rcu_dereference_protected(new->shared[i],
+ fence = rcu_dereference_protected(new->table[i],
dma_resv_held(obj));
dma_fence_put(fence);
}
@@ -234,37 +241,39 @@ EXPORT_SYMBOL(dma_resv_reserve_fences);
#ifdef CONFIG_DEBUG_MUTEXES
/**
- * dma_resv_reset_shared_max - reset shared fences for debugging
+ * dma_resv_reset_max_fences - reset fences for debugging
* @obj: the dma_resv object to reset
*
- * Reset the number of pre-reserved shared slots to test that drivers do
+ * Reset the number of pre-reserved fence slots to test that drivers do
* correct slot allocation using dma_resv_reserve_fences(). See also
- * &dma_resv_list.shared_max.
+ * &dma_resv_list.max_fences.
*/
-void dma_resv_reset_shared_max(struct dma_resv *obj)
+void dma_resv_reset_max_fences(struct dma_resv *obj)
{
- struct dma_resv_list *fences = dma_resv_shared_list(obj);
+ struct dma_resv_list *fences = dma_resv_fences_list(obj);
dma_resv_assert_held(obj);
- /* Test shared fence slot reservation */
+ /* Test fence slot reservation */
if (fences)
- fences->shared_max = fences->shared_count;
+ fences->max_fences = fences->num_fences;
}
-EXPORT_SYMBOL(dma_resv_reset_shared_max);
+EXPORT_SYMBOL(dma_resv_reset_max_fences);
#endif
/**
- * dma_resv_add_shared_fence - Add a fence to a shared slot
+ * dma_resv_add_fence - Add a fence to the dma_resv obj
* @obj: the reservation object
- * @fence: the shared fence to add
+ * @fence: the fence to add
+ * @usage: how the fence is used, see enum dma_resv_usage
*
- * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
+ * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
* dma_resv_reserve_fences() has been called.
*
* See also &dma_resv.fence for a discussion of the semantics.
*/
-void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
+void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+ enum dma_resv_usage usage)
{
struct dma_resv_list *fobj;
struct dma_fence *old;
@@ -279,39 +288,36 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
*/
WARN_ON(dma_fence_is_container(fence));
- fobj = dma_resv_shared_list(obj);
- count = fobj->shared_count;
-
- write_seqcount_begin(&obj->seq);
+ fobj = dma_resv_fences_list(obj);
+ count = fobj->num_fences;
for (i = 0; i < count; ++i) {
-
- old = rcu_dereference_protected(fobj->shared[i],
- dma_resv_held(obj));
- if (old->context == fence->context ||
- dma_fence_is_signaled(old))
- goto replace;
+ enum dma_resv_usage old_usage;
+
+ dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
+ if ((old->context == fence->context && old_usage >= usage) ||
+ dma_fence_is_signaled(old)) {
+ dma_resv_list_set(fobj, i, fence, usage);
+ dma_fence_put(old);
+ return;
+ }
}
- BUG_ON(fobj->shared_count >= fobj->shared_max);
- old = NULL;
+ BUG_ON(fobj->num_fences >= fobj->max_fences);
count++;
-replace:
- RCU_INIT_POINTER(fobj->shared[i], fence);
- /* pointer update must be visible before we extend the shared_count */
- smp_store_mb(fobj->shared_count, count);
-
- write_seqcount_end(&obj->seq);
- dma_fence_put(old);
+ dma_resv_list_set(fobj, i, fence, usage);
+ /* pointer update must be visible before we extend the num_fences */
+ smp_store_mb(fobj->num_fences, count);
}
-EXPORT_SYMBOL(dma_resv_add_shared_fence);
+EXPORT_SYMBOL(dma_resv_add_fence);
/**
* dma_resv_replace_fences - replace fences in the dma_resv obj
* @obj: the reservation object
* @context: the context of the fences to replace
* @replacement: the new fence to use instead
+ * @usage: how the new fence is used, see enum dma_resv_usage
*
* Replace fences with a specified context with a new fence. Only valid if the
* operation represented by the original fence has no longer access to the
@@ -321,107 +327,66 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
* update fence which makes the resource inaccessible.
*/
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
- struct dma_fence *replacement)
+ struct dma_fence *replacement,
+ enum dma_resv_usage usage)
{
struct dma_resv_list *list;
- struct dma_fence *old;
unsigned int i;
dma_resv_assert_held(obj);
- write_seqcount_begin(&obj->seq);
+ list = dma_resv_fences_list(obj);
+ for (i = 0; list && i < list->num_fences; ++i) {
+ struct dma_fence *old;
- old = dma_resv_excl_fence(obj);
- if (old->context == context) {
- RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement));
- dma_fence_put(old);
- }
-
- list = dma_resv_shared_list(obj);
- for (i = 0; list && i < list->shared_count; ++i) {
- old = rcu_dereference_protected(list->shared[i],
- dma_resv_held(obj));
+ dma_resv_list_entry(list, i, obj, &old, NULL);
if (old->context != context)
continue;
- rcu_assign_pointer(list->shared[i], dma_fence_get(replacement));
+ dma_resv_list_set(list, i, replacement, usage);
dma_fence_put(old);
}
-
- write_seqcount_end(&obj->seq);
}
EXPORT_SYMBOL(dma_resv_replace_fences);
-/**
- * dma_resv_add_excl_fence - Add an exclusive fence.
- * @obj: the reservation object
- * @fence: the exclusive fence to add
- *
- * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
- * See also &dma_resv.fence_excl for a discussion of the semantics.
- */
-void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
-{
- struct dma_fence *old_fence = dma_resv_excl_fence(obj);
-
- dma_resv_assert_held(obj);
-
- dma_fence_get(fence);
-
- write_seqcount_begin(&obj->seq);
- /* write_seqcount_begin provides the necessary memory barrier */
- RCU_INIT_POINTER(obj->fence_excl, fence);
- write_seqcount_end(&obj->seq);
-
- dma_fence_put(old_fence);
-}
-EXPORT_SYMBOL(dma_resv_add_excl_fence);
-
-/* Restart the iterator by initializing all the necessary fields, but not the
- * relation to the dma_resv object. */
+/* Restart the unlocked iteration by initializing the cursor object. */
static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
{
- cursor->seq = read_seqcount_begin(&cursor->obj->seq);
- cursor->index = -1;
- cursor->shared_count = 0;
- if (cursor->all_fences) {
- cursor->fences = dma_resv_shared_list(cursor->obj);
- if (cursor->fences)
- cursor->shared_count = cursor->fences->shared_count;
- } else {
- cursor->fences = NULL;
- }
+ cursor->index = 0;
+ cursor->num_fences = 0;
+ cursor->fences = dma_resv_fences_list(cursor->obj);
+ if (cursor->fences)
+ cursor->num_fences = cursor->fences->num_fences;
cursor->is_restarted = true;
}
/* Walk to the next not signaled fence and grab a reference to it */
static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
{
- struct dma_resv *obj = cursor->obj;
+ if (!cursor->fences)
+ return;
do {
/* Drop the reference from the previous round */
dma_fence_put(cursor->fence);
- if (cursor->index == -1) {
- cursor->fence = dma_resv_excl_fence(obj);
- cursor->index++;
- if (!cursor->fence)
- continue;
-
- } else if (!cursor->fences ||
- cursor->index >= cursor->shared_count) {
+ if (cursor->index >= cursor->num_fences) {
cursor->fence = NULL;
break;
- } else {
- struct dma_resv_list *fences = cursor->fences;
- unsigned int idx = cursor->index++;
-
- cursor->fence = rcu_dereference(fences->shared[idx]);
}
+
+ dma_resv_list_entry(cursor->fences, cursor->index++,
+ cursor->obj, &cursor->fence,
+ &cursor->fence_usage);
cursor->fence = dma_fence_get_rcu(cursor->fence);
- if (!cursor->fence || !dma_fence_is_signaled(cursor->fence))
+ if (!cursor->fence) {
+ dma_resv_iter_restart_unlocked(cursor);
+ continue;
+ }
+
+ if (!dma_fence_is_signaled(cursor->fence) &&
+ cursor->usage >= cursor->fence_usage)
break;
} while (true);
}
@@ -444,7 +409,7 @@ struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
do {
dma_resv_iter_restart_unlocked(cursor);
dma_resv_iter_walk_unlocked(cursor);
- } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
+ } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
rcu_read_unlock();
return cursor->fence;
@@ -467,13 +432,13 @@ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
rcu_read_lock();
cursor->is_restarted = false;
- restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq);
+ restart = dma_resv_fences_list(cursor->obj) != cursor->fences;
do {
if (restart)
dma_resv_iter_restart_unlocked(cursor);
dma_resv_iter_walk_unlocked(cursor);
restart = true;
- } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
+ } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
rcu_read_unlock();
return cursor->fence;
@@ -496,15 +461,9 @@ struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
dma_resv_assert_held(cursor->obj);
cursor->index = 0;
- if (cursor->all_fences)
- cursor->fences = dma_resv_shared_list(cursor->obj);
- else
- cursor->fences = NULL;
-
- fence = dma_resv_excl_fence(cursor->obj);
- if (!fence)
- fence = dma_resv_iter_next(cursor);
+ cursor->fences = dma_resv_fences_list(cursor->obj);
+ fence = dma_resv_iter_next(cursor);
cursor->is_restarted = true;
return fence;
}
@@ -519,17 +478,22 @@ EXPORT_SYMBOL_GPL(dma_resv_iter_first);
*/
struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
{
- unsigned int idx;
+ struct dma_fence *fence;
dma_resv_assert_held(cursor->obj);
cursor->is_restarted = false;
- if (!cursor->fences || cursor->index >= cursor->fences->shared_count)
- return NULL;
- idx = cursor->index++;
- return rcu_dereference_protected(cursor->fences->shared[idx],
- dma_resv_held(cursor->obj));
+ do {
+ if (!cursor->fences ||
+ cursor->index >= cursor->fences->num_fences)
+ return NULL;
+
+ dma_resv_list_entry(cursor->fences, cursor->index++,
+ cursor->obj, &fence, &cursor->fence_usage);
+ } while (cursor->fence_usage > cursor->usage);
+
+ return fence;
}
EXPORT_SYMBOL_GPL(dma_resv_iter_next);
@@ -544,60 +508,43 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
{
struct dma_resv_iter cursor;
struct dma_resv_list *list;
- struct dma_fence *f, *excl;
+ struct dma_fence *f;
dma_resv_assert_held(dst);
list = NULL;
- excl = NULL;
- dma_resv_iter_begin(&cursor, src, true);
+ dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP);
dma_resv_for_each_fence_unlocked(&cursor, f) {
if (dma_resv_iter_is_restarted(&cursor)) {
dma_resv_list_free(list);
- dma_fence_put(excl);
-
- if (cursor.shared_count) {
- list = dma_resv_list_alloc(cursor.shared_count);
- if (!list) {
- dma_resv_iter_end(&cursor);
- return -ENOMEM;
- }
- list->shared_count = 0;
-
- } else {
- list = NULL;
+ list = dma_resv_list_alloc(cursor.num_fences);
+ if (!list) {
+ dma_resv_iter_end(&cursor);
+ return -ENOMEM;
}
- excl = NULL;
+ list->num_fences = 0;
}
dma_fence_get(f);
- if (dma_resv_iter_is_exclusive(&cursor))
- excl = f;
- else
- RCU_INIT_POINTER(list->shared[list->shared_count++], f);
+ dma_resv_list_set(list, list->num_fences++, f,
+ dma_resv_iter_usage(&cursor));
}
dma_resv_iter_end(&cursor);
- write_seqcount_begin(&dst->seq);
- excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
- list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
- write_seqcount_end(&dst->seq);
-
+ list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst));
dma_resv_list_free(list);
- dma_fence_put(excl);
-
return 0;
}
EXPORT_SYMBOL(dma_resv_copy_fences);
/**
- * dma_resv_get_fences - Get an object's shared and exclusive
+ * dma_resv_get_fences - Get an object's fences
* fences without update side lock held
* @obj: the reservation object
- * @write: true if we should return all fences
+ * @usage: controls which fences to include, see enum dma_resv_usage.
* @num_fences: the number of fences returned
* @fences: the array of fence ptrs returned (array is krealloc'd to the
* required size, and must be freed by caller)
@@ -605,7 +552,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
* Retrieve all fences from the reservation object.
* Returns either zero or -ENOMEM.
*/
-int dma_resv_get_fences(struct dma_resv *obj, bool write,
+int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
unsigned int *num_fences, struct dma_fence ***fences)
{
struct dma_resv_iter cursor;
@@ -614,7 +561,7 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
*num_fences = 0;
*fences = NULL;
- dma_resv_iter_begin(&cursor, obj, write);
+ dma_resv_iter_begin(&cursor, obj, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (dma_resv_iter_is_restarted(&cursor)) {
@@ -623,7 +570,7 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
while (*num_fences)
dma_fence_put((*fences)[--(*num_fences)]);
- count = cursor.shared_count + 1;
+ count = cursor.num_fences + 1;
/* Eventually re-allocate the array */
*fences = krealloc_array(*fences, count,
@@ -646,7 +593,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences);
/**
* dma_resv_get_singleton - Get a single fence for all the fences
* @obj: the reservation object
- * @write: true if we should return all fences
+ * @usage: controls which fences to include, see enum dma_resv_usage.
* @fence: the resulting fence
*
* Get a single fence representing all the fences inside the resv object.
@@ -658,7 +605,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences);
*
* Returns 0 on success and negative error values on failure.
*/
-int dma_resv_get_singleton(struct dma_resv *obj, bool write,
+int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
struct dma_fence **fence)
{
struct dma_fence_array *array;
@@ -666,7 +613,7 @@ int dma_resv_get_singleton(struct dma_resv *obj, bool write,
unsigned count;
int r;
- r = dma_resv_get_fences(obj, write, &count, &fences);
+ r = dma_resv_get_fences(obj, usage, &count, &fences);
if (r)
return r;
@@ -697,10 +644,9 @@ int dma_resv_get_singleton(struct dma_resv *obj, bool write,
EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
/**
- * dma_resv_wait_timeout - Wait on reservation's objects
- * shared and/or exclusive fences.
+ * dma_resv_wait_timeout - Wait on reservation's objects fences
* @obj: the reservation object
- * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @usage: controls which fences to include, see enum dma_resv_usage.
* @intr: if true, do interruptible wait
* @timeout: timeout value in jiffies or zero to return immediately
*
@@ -710,14 +656,14 @@ EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
* greater than zer on success.
*/
-long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
- unsigned long timeout)
+long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
+ bool intr, unsigned long timeout)
{
long ret = timeout ? timeout : 1;
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_iter_begin(&cursor, obj, wait_all);
+ dma_resv_iter_begin(&cursor, obj, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
ret = dma_fence_wait_timeout(fence, intr, ret);
@@ -737,8 +683,7 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
* dma_resv_test_signaled - Test if a reservation object's fences have been
* signaled.
* @obj: the reservation object
- * @test_all: if true, test all fences, otherwise only test the exclusive
- * fence
+ * @usage: controls which fences to include, see enum dma_resv_usage.
*
* Callers are not required to hold specific locks, but maybe hold
* dma_resv_lock() already.
@@ -747,12 +692,12 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
*
* True if all fences signaled, else false.
*/
-bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
+bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage)
{
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_iter_begin(&cursor, obj, test_all);
+ dma_resv_iter_begin(&cursor, obj, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
dma_resv_iter_end(&cursor);
return false;
@@ -772,13 +717,13 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
*/
void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
{
+ static const char *usage[] = { "kernel", "write", "read", "bookkeep" };
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_for_each_fence(&cursor, obj, true, fence) {
+ dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
seq_printf(seq, "\t%s fence:",
- dma_resv_iter_is_exclusive(&cursor) ?
- "Exclusive" : "Shared");
+ usage[dma_resv_iter_usage(&cursor)]);
dma_fence_describe(fence, seq);
}
}
diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
index d2e61f6ae989..813779e3c9be 100644
--- a/drivers/dma-buf/st-dma-resv.c
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -58,8 +58,9 @@ static int sanitycheck(void *arg)
return r;
}
-static int test_signaling(void *arg, bool shared)
+static int test_signaling(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_resv resv;
struct dma_fence *f;
int r;
@@ -81,18 +82,14 @@ static int test_signaling(void *arg, bool shared)
goto err_unlock;
}
- if (shared)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
-
- if (dma_resv_test_signaled(&resv, shared)) {
+ dma_resv_add_fence(&resv, f, usage);
+ if (dma_resv_test_signaled(&resv, usage)) {
pr_err("Resv unexpectedly signaled\n");
r = -EINVAL;
goto err_unlock;
}
dma_fence_signal(f);
- if (!dma_resv_test_signaled(&resv, shared)) {
+ if (!dma_resv_test_signaled(&resv, usage)) {
pr_err("Resv not reporting signaled\n");
r = -EINVAL;
goto err_unlock;
@@ -105,18 +102,9 @@ err_free:
return r;
}
-static int test_excl_signaling(void *arg)
-{
- return test_signaling(arg, false);
-}
-
-static int test_shared_signaling(void *arg)
-{
- return test_signaling(arg, true);
-}
-
-static int test_for_each(void *arg, bool shared)
+static int test_for_each(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_resv_iter cursor;
struct dma_fence *f, *fence;
struct dma_resv resv;
@@ -139,13 +127,10 @@ static int test_for_each(void *arg, bool shared)
goto err_unlock;
}
- if (shared)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
+ dma_resv_add_fence(&resv, f, usage);
r = -ENOENT;
- dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
+ dma_resv_for_each_fence(&cursor, &resv, usage, fence) {
if (!r) {
pr_err("More than one fence found\n");
r = -EINVAL;
@@ -156,7 +141,7 @@ static int test_for_each(void *arg, bool shared)
r = -EINVAL;
goto err_unlock;
}
- if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
+ if (dma_resv_iter_usage(&cursor) != usage) {
pr_err("Unexpected fence usage\n");
r = -EINVAL;
goto err_unlock;
@@ -176,18 +161,9 @@ err_free:
return r;
}
-static int test_excl_for_each(void *arg)
-{
- return test_for_each(arg, false);
-}
-
-static int test_shared_for_each(void *arg)
-{
- return test_for_each(arg, true);
-}
-
-static int test_for_each_unlocked(void *arg, bool shared)
+static int test_for_each_unlocked(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_resv_iter cursor;
struct dma_fence *f, *fence;
struct dma_resv resv;
@@ -211,14 +187,11 @@ static int test_for_each_unlocked(void *arg, bool shared)
goto err_free;
}
- if (shared)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
+ dma_resv_add_fence(&resv, f, usage);
dma_resv_unlock(&resv);
r = -ENOENT;
- dma_resv_iter_begin(&cursor, &resv, shared);
+ dma_resv_iter_begin(&cursor, &resv, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (!r) {
pr_err("More than one fence found\n");
@@ -234,7 +207,7 @@ static int test_for_each_unlocked(void *arg, bool shared)
r = -EINVAL;
goto err_iter_end;
}
- if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
+ if (dma_resv_iter_usage(&cursor) != usage) {
pr_err("Unexpected fence usage\n");
r = -EINVAL;
goto err_iter_end;
@@ -244,7 +217,7 @@ static int test_for_each_unlocked(void *arg, bool shared)
if (r == -ENOENT) {
r = -EINVAL;
/* That should trigger an restart */
- cursor.seq--;
+ cursor.fences = (void*)~0;
} else if (r == -EINVAL) {
r = 0;
}
@@ -260,18 +233,9 @@ err_free:
return r;
}
-static int test_excl_for_each_unlocked(void *arg)
-{
- return test_for_each_unlocked(arg, false);
-}
-
-static int test_shared_for_each_unlocked(void *arg)
-{
- return test_for_each_unlocked(arg, true);
-}
-
-static int test_get_fences(void *arg, bool shared)
+static int test_get_fences(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_fence *f, **fences = NULL;
struct dma_resv resv;
int r, i;
@@ -294,13 +258,10 @@ static int test_get_fences(void *arg, bool shared)
goto err_resv;
}
- if (shared)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
+ dma_resv_add_fence(&resv, f, usage);
dma_resv_unlock(&resv);
- r = dma_resv_get_fences(&resv, shared, &i, &fences);
+ r = dma_resv_get_fences(&resv, usage, &i, &fences);
if (r) {
pr_err("get_fences failed\n");
goto err_free;
@@ -322,30 +283,24 @@ err_resv:
return r;
}
-static int test_excl_get_fences(void *arg)
-{
- return test_get_fences(arg, false);
-}
-
-static int test_shared_get_fences(void *arg)
-{
- return test_get_fences(arg, true);
-}
-
int dma_resv(void)
{
static const struct subtest tests[] = {
SUBTEST(sanitycheck),
- SUBTEST(test_excl_signaling),
- SUBTEST(test_shared_signaling),
- SUBTEST(test_excl_for_each),
- SUBTEST(test_shared_for_each),
- SUBTEST(test_excl_for_each_unlocked),
- SUBTEST(test_shared_for_each_unlocked),
- SUBTEST(test_excl_get_fences),
- SUBTEST(test_shared_get_fences),
+ SUBTEST(test_signaling),
+ SUBTEST(test_for_each),
+ SUBTEST(test_for_each_unlocked),
+ SUBTEST(test_get_fences),
};
+ enum dma_resv_usage usage;
+ int r;
spin_lock_init(&fence_lock);
- return subtests(tests, NULL);
+ for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_BOOKKEEP;
+ ++usage) {
+ r = subtests(tests, (void *)(unsigned long)usage);
+ if (r)
+ return r;
+ }
+ return 0;
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f1422bee3dcc..5133c3f028ab 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -280,6 +280,7 @@ config DRM_AMDGPU
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
+ select DRM_BUDDY
help
Choose this option if you have a recent AMD Radeon graphics card.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 98b1736bb221..3dc5ab2764ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -263,7 +263,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
*/
replacement = dma_fence_get_stub();
dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
- replacement);
+ replacement, DMA_RESV_USAGE_READ);
dma_fence_put(replacement);
return 0;
}
@@ -2447,6 +2447,8 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
struct amdgpu_bo *bo = mem->bo;
uint32_t domain = mem->domain;
struct kfd_mem_attachment *attachment;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
total_size += amdgpu_bo_size(bo);
@@ -2461,10 +2463,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
goto validate_map_fail;
}
}
- ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
- if (ret) {
- pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
- goto validate_map_fail;
+ dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
+ DMA_RESV_USAGE_KERNEL, fence) {
+ ret = amdgpu_sync_fence(&sync_obj, fence);
+ if (ret) {
+ pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
+ goto validate_map_fail;
+ }
}
list_for_each_entry(attachment, &mem->attachments, list) {
if (!attachment->is_mapped)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index 044b41f0bfd9..529d52a204cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -34,7 +34,6 @@ struct amdgpu_fpriv;
struct amdgpu_bo_list_entry {
struct ttm_validate_buffer tv;
struct amdgpu_bo_va *bo_va;
- struct dma_fence_chain *chain;
uint32_t priority;
struct page **user_pages;
bool user_invalidated;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e85e347eb670..8de283997769 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -55,8 +55,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &bo->tbo;
- /* One for TTM and one for the CS job */
- p->uf_entry.tv.num_shared = 2;
+ /* One for TTM and two for the CS job */
+ p->uf_entry.tv.num_shared = 3;
drm_gem_object_put(gobj);
@@ -574,14 +574,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
e->bo_va = amdgpu_vm_bo_find(vm, bo);
-
- if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) {
- e->chain = dma_fence_chain_alloc();
- if (!e->chain) {
- r = -ENOMEM;
- goto error_validate;
- }
- }
}
/* Move fence waiting after getting reservation lock of
@@ -642,13 +634,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
error_validate:
- if (r) {
- amdgpu_bo_list_for_each_entry(e, p->bo_list) {
- dma_fence_chain_free(e->chain);
- e->chain = NULL;
- }
+ if (r)
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
- }
out:
return r;
}
@@ -688,17 +675,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
{
unsigned i;
- if (error && backoff) {
- struct amdgpu_bo_list_entry *e;
-
- amdgpu_bo_list_for_each_entry(e, parser->bo_list) {
- dma_fence_chain_free(e->chain);
- e->chain = NULL;
- }
-
+ if (error && backoff)
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
- }
for (i = 0; i < parser->num_post_deps; i++) {
drm_syncobj_put(parser->post_deps[i].syncobj);
@@ -1272,29 +1251,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
- amdgpu_bo_list_for_each_entry(e, p->bo_list) {
- struct dma_resv *resv = e->tv.bo->base.resv;
- struct dma_fence_chain *chain = e->chain;
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
-
- if (!chain)
- continue;
-
- /*
- * Temporary workaround dma_resv shortcommings by wrapping up
- * the submission in a dma_fence_chain and add it as exclusive
- * fence.
- *
- * TODO: Remove together with dma_resv rework.
- */
- dma_resv_for_each_fence(&cursor, resv, false, fence) {
- break;
- }
- dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
- rcu_assign_pointer(resv->fence_excl, &chain->base);
- e->chain = NULL;
- }
+ /* Make sure all BOs are remembered as writers */
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
+ e->tv.num_shared = 0;
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
mutex_unlock(&p->adev->notifier_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index fae5c1debfad..7a6908d71820 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -200,8 +200,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto unpin;
}
- /* TODO: Unify this with other drivers */
- r = dma_resv_get_fences(new_abo->tbo.base.resv, true,
+ r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 579adfafe4d0..782cbca37538 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -102,21 +102,9 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = attach->dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int r;
/* pin buffer into GTT */
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
- if (r)
- return r;
-
- if (bo->tbo.moving) {
- r = dma_fence_wait(bo->tbo.moving, true);
- if (r) {
- amdgpu_bo_unpin(bo);
- return r;
- }
- }
- return 0;
+ return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 57b74d35052f..84a53758e18e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -526,7 +526,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
- ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
+ ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
+ true, timeout);
/* ret == 0 means not signaled,
* ret > 0 means signaled
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 81207737c716..4ba4b54092f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -111,7 +111,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
struct dma_fence *fence;
int r;
- r = dma_resv_get_singleton(resv, true, &fence);
+ r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
if (r)
goto fallback;
@@ -139,7 +139,8 @@ fallback:
/* Not enough memory for the delayed delete, as last resort
* block for all the fences to complete.
*/
- dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT);
+ dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
amdgpu_pasid_free(pasid);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 4b153daf283d..b86c0b8252a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
mmu_interval_set_seq(mni, cur_seq);
- r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
mutex_unlock(&adev->notifier_lock);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index dd6693fff280..e92ecabfa7bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -612,9 +612,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (unlikely(r))
goto fail_unreserve;
- amdgpu_bo_fence(bo, fence, false);
- dma_fence_put(bo->tbo.moving);
- bo->tbo.moving = dma_fence_get(fence);
+ dma_resv_add_fence(bo->tbo.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
dma_fence_put(fence);
}
if (!bp->resv)
@@ -761,6 +760,11 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
return -EPERM;
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ return r;
+
kptr = amdgpu_bo_kptr(bo);
if (kptr) {
if (ptr)
@@ -768,11 +772,6 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
- r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
- MAX_SCHEDULE_TIMEOUT);
- if (r < 0)
- return r;
-
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
if (r)
return r;
@@ -1399,10 +1398,8 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
return;
}
- if (shared)
- dma_resv_add_shared_fence(resv, fence);
- else
- dma_resv_add_excl_fence(resv, fence);
+ dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
+ DMA_RESV_USAGE_WRITE);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index acfa207cf970..6546552e596c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -30,12 +30,15 @@
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_range_manager.h>
+#include "amdgpu_vram_mgr.h"
+
/* state back for walking over vram_mgr and gtt_mgr allocations */
struct amdgpu_res_cursor {
uint64_t start;
uint64_t size;
uint64_t remaining;
- struct drm_mm_node *node;
+ void *node;
+ uint32_t mem_type;
};
/**
@@ -52,27 +55,63 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
uint64_t start, uint64_t size,
struct amdgpu_res_cursor *cur)
{
+ struct drm_buddy_block *block;
+ struct list_head *head, *next;
struct drm_mm_node *node;
- if (!res || res->mem_type == TTM_PL_SYSTEM) {
- cur->start = start;
- cur->size = size;
- cur->remaining = size;
- cur->node = NULL;
- WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
- return;
- }
+ if (!res)
+ goto fallback;
BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
- node = to_ttm_range_mgr_node(res)->mm_nodes;
- while (start >= node->size << PAGE_SHIFT)
- start -= node++->size << PAGE_SHIFT;
+ cur->mem_type = res->mem_type;
+
+ switch (cur->mem_type) {
+ case TTM_PL_VRAM:
+ head = &to_amdgpu_vram_mgr_resource(res)->blocks;
+
+ block = list_first_entry_or_null(head,
+ struct drm_buddy_block,
+ link);
+ if (!block)
+ goto fallback;
+
+ while (start >= amdgpu_vram_mgr_block_size(block)) {
+ start -= amdgpu_vram_mgr_block_size(block);
+
+ next = block->link.next;
+ if (next != head)
+ block = list_entry(next, struct drm_buddy_block, link);
+ }
+
+ cur->start = amdgpu_vram_mgr_block_start(block) + start;
+ cur->size = min(amdgpu_vram_mgr_block_size(block) - start, size);
+ cur->remaining = size;
+ cur->node = block;
+ break;
+ case TTM_PL_TT:
+ node = to_ttm_range_mgr_node(res)->mm_nodes;
+ while (start >= node->size << PAGE_SHIFT)
+ start -= node++->size << PAGE_SHIFT;
+
+ cur->start = (node->start << PAGE_SHIFT) + start;
+ cur->size = min((node->size << PAGE_SHIFT) - start, size);
+ cur->remaining = size;
+ cur->node = node;
+ break;
+ default:
+ goto fallback;
+ }
- cur->start = (node->start << PAGE_SHIFT) + start;
- cur->size = min((node->size << PAGE_SHIFT) - start, size);
+ return;
+
+fallback:
+ cur->start = start;
+ cur->size = size;
cur->remaining = size;
- cur->node = node;
+ cur->node = NULL;
+ WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
+ return;
}
/**
@@ -85,7 +124,9 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
*/
static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
{
- struct drm_mm_node *node = cur->node;
+ struct drm_buddy_block *block;
+ struct drm_mm_node *node;
+ struct list_head *next;
BUG_ON(size > cur->remaining);
@@ -99,9 +140,27 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
return;
}
- cur->node = ++node;
- cur->start = node->start << PAGE_SHIFT;
- cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
+ switch (cur->mem_type) {
+ case TTM_PL_VRAM:
+ block = cur->node;
+
+ next = block->link.next;
+ block = list_entry(next, struct drm_buddy_block, link);
+
+ cur->node = block;
+ cur->start = amdgpu_vram_mgr_block_start(block);
+ cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining);
+ break;
+ case TTM_PL_TT:
+ node = cur->node;
+
+ cur->node = ++node;
+ cur->start = node->start << PAGE_SHIFT;
+ cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
+ break;
+ default:
+ return;
+ }
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 40e06745fae9..11c46b3e4c60 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -259,7 +259,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
if (resv == NULL)
return -EINVAL;
- dma_resv_for_each_fence(&cursor, resv, true, f) {
+ /* TODO: Use DMA_RESV_USAGE_READ here */
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) {
dma_fence_chain_for_each(f, f) {
struct dma_fence *tmp = dma_fence_chain_contained(f);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index f7f149588432..ec26edd4f4d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1344,7 +1344,8 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully
*/
- dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) {
+ dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP, f) {
if (amdkfd_fence_check_mm(f, current->mm))
return false;
}
@@ -2160,17 +2161,6 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
- struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
- TTM_PL_VRAM);
- struct drm_printer p = drm_seq_file_printer(m);
-
- ttm_resource_manager_debug(man, &p);
- return 0;
-}
-
static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
{
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
@@ -2178,55 +2168,6 @@ static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
}
-static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
- struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
- TTM_PL_TT);
- struct drm_printer p = drm_seq_file_printer(m);
-
- ttm_resource_manager_debug(man, &p);
- return 0;
-}
-
-static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
- struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
- AMDGPU_PL_GDS);
- struct drm_printer p = drm_seq_file_printer(m);
-
- ttm_resource_manager_debug(man, &p);
- return 0;
-}
-
-static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
- struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
- AMDGPU_PL_GWS);
- struct drm_printer p = drm_seq_file_printer(m);
-
- ttm_resource_manager_debug(man, &p);
- return 0;
-}
-
-static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
- struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
- AMDGPU_PL_OA);
- struct drm_printer p = drm_seq_file_printer(m);
-
- ttm_resource_manager_debug(man, &p);
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table);
-DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table);
-DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table);
-DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table);
-DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table);
DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
/*
@@ -2436,17 +2377,23 @@ void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
&amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
debugfs_create_file("amdgpu_iomem", 0444, root, adev,
&amdgpu_ttm_iomem_fops);
- debugfs_create_file("amdgpu_vram_mm", 0444, root, adev,
- &amdgpu_mm_vram_table_fops);
- debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev,
- &amdgpu_mm_tt_table_fops);
- debugfs_create_file("amdgpu_gds_mm", 0444, root, adev,
- &amdgpu_mm_gds_table_fops);
- debugfs_create_file("amdgpu_gws_mm", 0444, root, adev,
- &amdgpu_mm_gws_table_fops);
- debugfs_create_file("amdgpu_oa_mm", 0444, root, adev,
- &amdgpu_mm_oa_table_fops);
debugfs_create_file("ttm_page_pool", 0444, root, adev,
&amdgpu_ttm_page_pool_fops);
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
+ TTM_PL_VRAM),
+ root, "amdgpu_vram_mm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
+ TTM_PL_TT),
+ root, "amdgpu_gtt_mm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
+ AMDGPU_PL_GDS),
+ root, "amdgpu_gds_mm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
+ AMDGPU_PL_GWS),
+ root, "amdgpu_gws_mm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
+ AMDGPU_PL_OA),
+ root, "amdgpu_oa_mm");
+
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 9120ae80ef52..6a70818039dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -26,6 +26,7 @@
#include <linux/dma-direction.h>
#include <drm/gpu_scheduler.h>
+#include "amdgpu_vram_mgr.h"
#include "amdgpu.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
@@ -38,15 +39,6 @@
#define AMDGPU_POISON 0xd0bed0be
-struct amdgpu_vram_mgr {
- struct ttm_resource_manager manager;
- struct drm_mm mm;
- spinlock_t lock;
- struct list_head reservations_pending;
- struct list_head reserved_pages;
- atomic64_t vis_usage;
-};
-
struct amdgpu_gtt_mgr {
struct ttm_resource_manager manager;
struct drm_mm mm;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 39c74d9fa7cc..6eac649499d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1163,7 +1163,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
+ r = dma_resv_wait_timeout(bo->tbo.base.resv,
+ DMA_RESV_USAGE_KERNEL, false,
msecs_to_jiffies(10));
if (r == 0)
r = -ETIMEDOUT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b13451255e8b..5277c10d901d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2059,7 +2059,7 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_for_each_fence(&cursor, resv, true, fence) {
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
/* Add a callback for each fence in the reservation object */
amdgpu_vm_prt_get(adev);
amdgpu_vm_add_prt_cb(adev, fence);
@@ -2665,7 +2665,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return true;
/* Don't evict VM page tables while they are busy */
- if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
+ if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
return false;
/* Try to block ongoing updates */
@@ -2845,7 +2845,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true,
+ timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP,
true, timeout);
if (timeout <= 0)
return timeout;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index e3fbf0f10add..31913ae86de6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -74,13 +74,12 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
{
unsigned int i;
uint64_t value;
- int r;
+ long r;
- if (vmbo->bo.tbo.moving) {
- r = dma_fence_wait(vmbo->bo.tbo.moving, true);
- if (r)
- return r;
- }
+ r = dma_resv_wait_timeout(vmbo->bo.tbo.base.resv, DMA_RESV_USAGE_KERNEL,
+ true, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ return r;
pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index dbb551762805..bdb44cee19d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -204,14 +204,19 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo = &vmbo->bo;
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
+ struct dma_resv_iter cursor;
unsigned int i, ndw, nptes;
+ struct dma_fence *fence;
uint64_t *pte;
int r;
/* Wait for PD/PT moves to be completed */
- r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving);
- if (r)
- return r;
+ dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
+ DMA_RESV_USAGE_KERNEL, fence) {
+ r = amdgpu_sync_fence(&p->job->sync, fence);
+ if (r)
+ return r;
+ }
do {
ndw = p->num_dw_left;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 0a7611648573..49e4092f447f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -32,8 +32,10 @@
#include "atom.h"
struct amdgpu_vram_reservation {
- struct list_head node;
- struct drm_mm_node mm_node;
+ u64 start;
+ u64 size;
+ struct list_head allocated;
+ struct list_head blocks;
};
static inline struct amdgpu_vram_mgr *
@@ -186,18 +188,18 @@ const struct attribute_group amdgpu_vram_mgr_attr_group = {
};
/**
- * amdgpu_vram_mgr_vis_size - Calculate visible node size
+ * amdgpu_vram_mgr_vis_size - Calculate visible block size
*
* @adev: amdgpu_device pointer
- * @node: MM node structure
+ * @block: DRM BUDDY block structure
*
- * Calculate how many bytes of the MM node are inside visible VRAM
+ * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM
*/
static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
- struct drm_mm_node *node)
+ struct drm_buddy_block *block)
{
- uint64_t start = node->start << PAGE_SHIFT;
- uint64_t end = (node->size + node->start) << PAGE_SHIFT;
+ u64 start = amdgpu_vram_mgr_block_start(block);
+ u64 end = start + amdgpu_vram_mgr_block_size(block);
if (start >= adev->gmc.visible_vram_size)
return 0;
@@ -218,9 +220,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_resource *res = bo->tbo.resource;
- unsigned pages = res->num_pages;
- struct drm_mm_node *mm;
- u64 usage;
+ struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
+ struct drm_buddy_block *block;
+ u64 usage = 0;
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
return amdgpu_bo_size(bo);
@@ -228,9 +230,8 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
return 0;
- mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
- for (usage = 0; pages; pages -= mm->size, mm++)
- usage += amdgpu_vram_mgr_vis_size(adev, mm);
+ list_for_each_entry(block, &vres->blocks, link)
+ usage += amdgpu_vram_mgr_vis_size(adev, block);
return usage;
}
@@ -240,23 +241,30 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
- struct drm_mm *mm = &mgr->mm;
+ struct drm_buddy *mm = &mgr->mm;
struct amdgpu_vram_reservation *rsv, *temp;
+ struct drm_buddy_block *block;
uint64_t vis_usage;
- list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) {
- if (drm_mm_reserve_node(mm, &rsv->mm_node))
+ list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) {
+ if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
+ rsv->size, mm->chunk_size, &rsv->allocated,
+ DRM_BUDDY_RANGE_ALLOCATION))
+ continue;
+
+ block = amdgpu_vram_mgr_first_block(&rsv->allocated);
+ if (!block)
continue;
dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
- rsv->mm_node.start, rsv->mm_node.size);
+ rsv->start, rsv->size);
- vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
+ vis_usage = amdgpu_vram_mgr_vis_size(adev, block);
atomic64_add(vis_usage, &mgr->vis_usage);
spin_lock(&man->bdev->lru_lock);
- man->usage += rsv->mm_node.size << PAGE_SHIFT;
+ man->usage += rsv->size;
spin_unlock(&man->bdev->lru_lock);
- list_move(&rsv->node, &mgr->reserved_pages);
+ list_move(&rsv->blocks, &mgr->reserved_pages);
}
}
@@ -278,14 +286,16 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
if (!rsv)
return -ENOMEM;
- INIT_LIST_HEAD(&rsv->node);
- rsv->mm_node.start = start >> PAGE_SHIFT;
- rsv->mm_node.size = size >> PAGE_SHIFT;
+ INIT_LIST_HEAD(&rsv->allocated);
+ INIT_LIST_HEAD(&rsv->blocks);
- spin_lock(&mgr->lock);
- list_add_tail(&rsv->node, &mgr->reservations_pending);
+ rsv->start = start;
+ rsv->size = size;
+
+ mutex_lock(&mgr->lock);
+ list_add_tail(&rsv->blocks, &mgr->reservations_pending);
amdgpu_vram_mgr_do_reserve(&mgr->manager);
- spin_unlock(&mgr->lock);
+ mutex_unlock(&mgr->lock);
return 0;
}
@@ -307,19 +317,19 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
struct amdgpu_vram_reservation *rsv;
int ret;
- spin_lock(&mgr->lock);
+ mutex_lock(&mgr->lock);
- list_for_each_entry(rsv, &mgr->reservations_pending, node) {
- if ((rsv->mm_node.start <= start) &&
- (start < (rsv->mm_node.start + rsv->mm_node.size))) {
+ list_for_each_entry(rsv, &mgr->reservations_pending, blocks) {
+ if (rsv->start <= start &&
+ (start < (rsv->start + rsv->size))) {
ret = -EBUSY;
goto out;
}
}
- list_for_each_entry(rsv, &mgr->reserved_pages, node) {
- if ((rsv->mm_node.start <= start) &&
- (start < (rsv->mm_node.start + rsv->mm_node.size))) {
+ list_for_each_entry(rsv, &mgr->reserved_pages, blocks) {
+ if (rsv->start <= start &&
+ (start < (rsv->start + rsv->size))) {
ret = 0;
goto out;
}
@@ -327,33 +337,11 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
ret = -ENOENT;
out:
- spin_unlock(&mgr->lock);
+ mutex_unlock(&mgr->lock);
return ret;
}
/**
- * amdgpu_vram_mgr_virt_start - update virtual start address
- *
- * @mem: ttm_resource to update
- * @node: just allocated node
- *
- * Calculate a virtual BO start address to easily check if everything is CPU
- * accessible.
- */
-static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
- struct drm_mm_node *node)
-{
- unsigned long start;
-
- start = node->start + node->size;
- if (start > mem->num_pages)
- start -= mem->num_pages;
- else
- start = 0;
- mem->start = max(mem->start, start);
-}
-
-/**
* amdgpu_vram_mgr_new - allocate new ranges
*
* @man: TTM memory type manager
@@ -368,46 +356,44 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_resource **res)
{
- unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
+ u64 vis_usage = 0, max_bytes, cur_size, min_block_size;
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
- uint64_t vis_usage = 0, mem_bytes, max_bytes;
- struct ttm_range_mgr_node *node;
- struct drm_mm *mm = &mgr->mm;
- enum drm_mm_insert_mode mode;
- unsigned i;
+ struct amdgpu_vram_mgr_resource *vres;
+ u64 size, remaining_size, lpfn, fpfn;
+ struct drm_buddy *mm = &mgr->mm;
+ struct drm_buddy_block *block;
+ unsigned long pages_per_block;
int r;
- lpfn = place->lpfn;
+ lpfn = place->lpfn << PAGE_SHIFT;
if (!lpfn)
- lpfn = man->size >> PAGE_SHIFT;
+ lpfn = man->size;
+
+ fpfn = place->fpfn << PAGE_SHIFT;
max_bytes = adev->gmc.mc_vram_size;
if (tbo->type != ttm_bo_type_kernel)
max_bytes -= AMDGPU_VM_RESERVED_VRAM;
- mem_bytes = tbo->base.size;
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
- pages_per_node = ~0ul;
- num_nodes = 1;
+ pages_per_block = ~0ul;
} else {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- pages_per_node = HPAGE_PMD_NR;
+ pages_per_block = HPAGE_PMD_NR;
#else
/* default to 2MB */
- pages_per_node = 2UL << (20UL - PAGE_SHIFT);
+ pages_per_block = 2UL << (20UL - PAGE_SHIFT);
#endif
- pages_per_node = max_t(uint32_t, pages_per_node,
- tbo->page_alignment);
- num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node);
+ pages_per_block = max_t(uint32_t, pages_per_block,
+ tbo->page_alignment);
}
- node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
- GFP_KERNEL | __GFP_ZERO);
- if (!node)
+ vres = kzalloc(sizeof(*vres), GFP_KERNEL);
+ if (!vres)
return -ENOMEM;
- ttm_resource_init(tbo, place, &node->base);
+ ttm_resource_init(tbo, place, &vres->base);
/* bail out quickly if there's likely not enough VRAM for this BO */
if (ttm_resource_manager_usage(man) > max_bytes) {
@@ -415,66 +401,130 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
goto error_fini;
}
- mode = DRM_MM_INSERT_BEST;
+ INIT_LIST_HEAD(&vres->blocks);
+
if (place->flags & TTM_PL_FLAG_TOPDOWN)
- mode = DRM_MM_INSERT_HIGH;
-
- pages_left = node->base.num_pages;
-
- /* Limit maximum size to 2GB due to SG table limitations */
- pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
-
- i = 0;
- spin_lock(&mgr->lock);
- while (pages_left) {
- uint32_t alignment = tbo->page_alignment;
-
- if (pages >= pages_per_node)
- alignment = pages_per_node;
-
- r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
- alignment, 0, place->fpfn,
- lpfn, mode);
- if (unlikely(r)) {
- if (pages > pages_per_node) {
- if (is_power_of_2(pages))
- pages = pages / 2;
- else
- pages = rounddown_pow_of_two(pages);
- continue;
+ vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
+
+ if (fpfn || lpfn != man->size)
+ /* Allocate blocks in desired range */
+ vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
+
+ remaining_size = vres->base.num_pages << PAGE_SHIFT;
+
+ mutex_lock(&mgr->lock);
+ while (remaining_size) {
+ if (tbo->page_alignment)
+ min_block_size = tbo->page_alignment << PAGE_SHIFT;
+ else
+ min_block_size = mgr->default_page_size;
+
+ BUG_ON(min_block_size < mm->chunk_size);
+
+ /* Limit maximum size to 2GiB due to SG table limitations */
+ size = min(remaining_size, 2ULL << 30);
+
+ if (size >= pages_per_block << PAGE_SHIFT)
+ min_block_size = pages_per_block << PAGE_SHIFT;
+
+ cur_size = size;
+
+ if (fpfn + size != place->lpfn << PAGE_SHIFT) {
+ /*
+ * Except for actual range allocation, modify the size and
+ * min_block_size conforming to continuous flag enablement
+ */
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
+ size = roundup_pow_of_two(size);
+ min_block_size = size;
+ /*
+ * Modify the size value if size is not
+ * aligned with min_block_size
+ */
+ } else if (!IS_ALIGNED(size, min_block_size)) {
+ size = round_up(size, min_block_size);
}
- goto error_free;
}
- vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
- amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
- pages_left -= pages;
- ++i;
+ r = drm_buddy_alloc_blocks(mm, fpfn,
+ lpfn,
+ size,
+ min_block_size,
+ &vres->blocks,
+ vres->flags);
+ if (unlikely(r))
+ goto error_free_blocks;
+
+ if (size > remaining_size)
+ remaining_size = 0;
+ else
+ remaining_size -= size;
+ }
+ mutex_unlock(&mgr->lock);
+
+ if (cur_size != size) {
+ struct drm_buddy_block *block;
+ struct list_head *trim_list;
+ u64 original_size;
+ LIST_HEAD(temp);
+
+ trim_list = &vres->blocks;
+ original_size = vres->base.num_pages << PAGE_SHIFT;
+
+ /*
+ * If size value is rounded up to min_block_size, trim the last
+ * block to the required size
+ */
+ if (!list_is_singular(&vres->blocks)) {
+ block = list_last_entry(&vres->blocks, typeof(*block), link);
+ list_move_tail(&block->link, &temp);
+ trim_list = &temp;
+ /*
+ * Compute the original_size value by subtracting the
+ * last block size with (aligned size - original size)
+ */
+ original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size);
+ }
- if (pages > pages_left)
- pages = pages_left;
+ mutex_lock(&mgr->lock);
+ drm_buddy_block_trim(mm,
+ original_size,
+ trim_list);
+ mutex_unlock(&mgr->lock);
+
+ if (!list_empty(&temp))
+ list_splice_tail(trim_list, &vres->blocks);
+ }
+
+ list_for_each_entry(block, &vres->blocks, link)
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
+
+ block = amdgpu_vram_mgr_first_block(&vres->blocks);
+ if (!block) {
+ r = -EINVAL;
+ goto error_fini;
}
- spin_unlock(&mgr->lock);
- if (i == 1)
- node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
+ vres->base.start = amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
+
+ if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
+ vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
if (adev->gmc.xgmi.connected_to_cpu)
- node->base.bus.caching = ttm_cached;
+ vres->base.bus.caching = ttm_cached;
else
- node->base.bus.caching = ttm_write_combined;
+ vres->base.bus.caching = ttm_write_combined;
atomic64_add(vis_usage, &mgr->vis_usage);
- *res = &node->base;
+ *res = &vres->base;
return 0;
-error_free:
- while (i--)
- drm_mm_remove_node(&node->mm_nodes[i]);
- spin_unlock(&mgr->lock);
+error_free_blocks:
+ drm_buddy_free_list(mm, &vres->blocks);
+ mutex_unlock(&mgr->lock);
error_fini:
- ttm_resource_fini(man, &node->base);
- kvfree(node);
+ ttm_resource_fini(man, &vres->base);
+ kfree(vres);
return r;
}
@@ -490,27 +540,26 @@ error_fini:
static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
- struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
+ struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
+ struct drm_buddy *mm = &mgr->mm;
+ struct drm_buddy_block *block;
uint64_t vis_usage = 0;
- unsigned i, pages;
- spin_lock(&mgr->lock);
- for (i = 0, pages = res->num_pages; pages;
- pages -= node->mm_nodes[i].size, ++i) {
- struct drm_mm_node *mm = &node->mm_nodes[i];
+ mutex_lock(&mgr->lock);
+ list_for_each_entry(block, &vres->blocks, link)
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
- drm_mm_remove_node(mm);
- vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
- }
amdgpu_vram_mgr_do_reserve(man);
- spin_unlock(&mgr->lock);
+
+ drm_buddy_free_list(mm, &vres->blocks);
+ mutex_unlock(&mgr->lock);
atomic64_sub(vis_usage, &mgr->vis_usage);
ttm_resource_fini(man, res);
- kvfree(node);
+ kfree(vres);
}
/**
@@ -542,7 +591,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
if (!*sgt)
return -ENOMEM;
- /* Determine the number of DRM_MM nodes to export */
+ /* Determine the number of DRM_BUDDY blocks to export */
amdgpu_res_first(res, offset, length, &cursor);
while (cursor.remaining) {
num_entries++;
@@ -558,10 +607,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
sg->length = 0;
/*
- * Walk down DRM_MM nodes to populate scatterlist nodes
- * @note: Use iterator api to get first the DRM_MM node
+ * Walk down DRM_BUDDY blocks to populate scatterlist nodes
+ * @note: Use iterator api to get first the DRM_BUDDY block
* and the number of bytes from it. Access the following
- * DRM_MM node(s) if more buffer needs to exported
+ * DRM_BUDDY block(s) if more buffer needs to exported
*/
amdgpu_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
@@ -648,13 +697,22 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ struct drm_buddy *mm = &mgr->mm;
+ struct drm_buddy_block *block;
drm_printf(printer, " vis usage:%llu\n",
amdgpu_vram_mgr_vis_usage(mgr));
- spin_lock(&mgr->lock);
- drm_mm_print(&mgr->mm, printer);
- spin_unlock(&mgr->lock);
+ mutex_lock(&mgr->lock);
+ drm_printf(printer, "default_page_size: %lluKiB\n",
+ mgr->default_page_size >> 10);
+
+ drm_buddy_print(mm, printer);
+
+ drm_printf(printer, "reserved:\n");
+ list_for_each_entry(block, &mgr->reserved_pages, link)
+ drm_buddy_block_print(mm, block, printer);
+ mutex_unlock(&mgr->lock);
}
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
@@ -674,16 +732,21 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
{
struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
struct ttm_resource_manager *man = &mgr->manager;
+ int err;
ttm_resource_manager_init(man, &adev->mman.bdev,
adev->gmc.real_vram_size);
man->func = &amdgpu_vram_mgr_func;
- drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT);
- spin_lock_init(&mgr->lock);
+ err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
+ if (err)
+ return err;
+
+ mutex_init(&mgr->lock);
INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages);
+ mgr->default_page_size = PAGE_SIZE;
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
ttm_resource_manager_set_used(man, true);
@@ -711,16 +774,16 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
if (ret)
return;
- spin_lock(&mgr->lock);
- list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node)
+ mutex_lock(&mgr->lock);
+ list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks)
kfree(rsv);
- list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) {
- drm_mm_remove_node(&rsv->mm_node);
+ list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
+ drm_buddy_free_list(&mgr->mm, &rsv->blocks);
kfree(rsv);
}
- drm_mm_takedown(&mgr->mm);
- spin_unlock(&mgr->lock);
+ drm_buddy_fini(&mgr->mm);
+ mutex_unlock(&mgr->lock);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
new file mode 100644
index 000000000000..9a2db87186c7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_VRAM_MGR_H__
+#define __AMDGPU_VRAM_MGR_H__
+
+#include <drm/drm_buddy.h>
+
+struct amdgpu_vram_mgr {
+ struct ttm_resource_manager manager;
+ struct drm_buddy mm;
+ /* protects access to buffer objects */
+ struct mutex lock;
+ struct list_head reservations_pending;
+ struct list_head reserved_pages;
+ atomic64_t vis_usage;
+ u64 default_page_size;
+};
+
+struct amdgpu_vram_mgr_resource {
+ struct ttm_resource base;
+ struct list_head blocks;
+ unsigned long flags;
+};
+
+static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_offset(block);
+}
+
+static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
+{
+ return PAGE_SIZE << drm_buddy_block_order(block);
+}
+
+static inline struct drm_buddy_block *
+amdgpu_vram_mgr_first_block(struct list_head *list)
+{
+ return list_first_entry_or_null(list, struct drm_buddy_block, link);
+}
+
+static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
+{
+ struct drm_buddy_block *block;
+ u64 start, size;
+
+ block = amdgpu_vram_mgr_first_block(head);
+ if (!block)
+ return false;
+
+ while (head != block->link.next) {
+ start = amdgpu_vram_mgr_block_start(block);
+ size = amdgpu_vram_mgr_block_size(block);
+
+ block = list_entry(block->link.next, struct drm_buddy_block, link);
+ if (start + size != amdgpu_vram_mgr_block_start(block))
+ return false;
+ }
+
+ return true;
+}
+
+static inline struct amdgpu_vram_mgr_resource *
+to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
+{
+ return container_of(res, struct amdgpu_vram_mgr_resource, base);
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 62139ff35476..73423b805b54 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9238,7 +9238,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* deadlock during GPU reset when this fence will not signal
* but we hold reservation lock for the BO.
*/
- r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
+ r = dma_resv_wait_timeout(abo->tbo.base.resv,
+ DMA_RESV_USAGE_WRITE, false,
msecs_to_jiffies(5000));
if (unlikely(r <= 0))
DRM_ERROR("Waiting for fences timed out!");
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index 541949f2d44a..e0b9f7063b20 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -256,6 +256,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms,
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
layer->layer_type, &n_formats);
+ if (!formats) {
+ kfree(kplane);
+ return -ENOMEM;
+ }
err = drm_universal_plane_init(&kms->base, plane,
get_possible_crtcs(kms, c->pipeline),
@@ -266,8 +270,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms,
komeda_put_fourcc_list(formats);
- if (err)
- goto cleanup;
+ if (err) {
+ kfree(kplane);
+ return err;
+ }
drm_plane_helper_add(plane, &komeda_plane_helper_funcs);
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 494075ddbef6..b5928b52e279 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -487,7 +487,10 @@ static void malidp_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
malidp_crtc_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ if (state)
+ __drm_atomic_helper_crtc_reset(crtc, &state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 0562bdaac00c..81d9f5004025 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -310,17 +310,13 @@ static int malidp_se_check_scaling(struct malidp_plane *mp,
static u32 malidp_get_pgsize_bitmap(struct malidp_plane *mp)
{
- u32 pgsize_bitmap = 0;
+ struct iommu_domain *mmu_dom;
- if (iommu_present(&platform_bus_type)) {
- struct iommu_domain *mmu_dom =
- iommu_get_domain_for_dev(mp->base.dev->dev);
+ mmu_dom = iommu_get_domain_for_dev(mp->base.dev->dev);
+ if (mmu_dom)
+ return mmu_dom->pgsize_bitmap;
- if (mmu_dom)
- pgsize_bitmap = mmu_dom->pgsize_bitmap;
- }
-
- return pgsize_bitmap;
+ return 0;
}
/*
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 658837caaf39..c08ccb4b332b 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -32,6 +32,7 @@ config DRM_CHIPONE_ICN6211
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL_BRIDGE
+ select REGMAP_I2C
help
ICN6211 is MIPI-DSI/RGB Converter bridge from chipone.
@@ -99,6 +100,19 @@ config DRM_LONTIUM_LT8912B
Say M here if you want to support this hardware as a module.
The module will be named "lontium-lt8912b".
+config DRM_LONTIUM_LT9211
+ tristate "Lontium LT9211 DSI/LVDS/DPI bridge"
+ depends on OF
+ select DRM_PANEL_BRIDGE
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select REGMAP_I2C
+ help
+ Driver for Lontium LT9211 Single/Dual-Link DSI/LVDS or Single DPI
+ input to Single-link/Dual-Link DSI/LVDS or Single DPI output bridge
+ chip.
+ Please say Y if you have such hardware.
+
config DRM_LONTIUM_LT9611
tristate "Lontium LT9611 DSI/HDMI bridge"
select SND_SOC_HDMI_CODEC if SND_SOC
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 425844c30495..b0edf2022fa0 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_ITE_IT6505) += ite-it6505.o
obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o
+obj-$(CONFIG_DRM_LONTIUM_LT9211) += lontium-lt9211.o
obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o
obj-$(CONFIG_DRM_LONTIUM_LT9611UXC) += lontium-lt9611uxc.o
obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 668dcefbae17..b3f10c54e064 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1292,8 +1292,10 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
goto err_unregister_cec;
adv7511->bridge.funcs = &adv7511_bridge_funcs;
- adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
- | DRM_BRIDGE_OP_HPD;
+ adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ if (adv7511->i2c_main->irq)
+ adv7511->bridge.ops |= DRM_BRIDGE_OP_HPD;
+
adv7511->bridge.of_node = dev->of_node;
adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 6516f9570b86..376da01243a3 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1486,12 +1486,12 @@ static void anx7625_dp_adjust_swing(struct anx7625_data *ctx)
for (i = 0; i < ctx->pdata.dp_lane0_swing_reg_cnt; i++)
anx7625_reg_write(ctx, ctx->i2c.tx_p1_client,
DP_TX_LANE0_SWING_REG0 + i,
- ctx->pdata.lane0_reg_data[i] & 0xFF);
+ ctx->pdata.lane0_reg_data[i]);
for (i = 0; i < ctx->pdata.dp_lane1_swing_reg_cnt; i++)
anx7625_reg_write(ctx, ctx->i2c.tx_p1_client,
DP_TX_LANE1_SWING_REG0 + i,
- ctx->pdata.lane1_reg_data[i] & 0xFF);
+ ctx->pdata.lane1_reg_data[i]);
}
static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on)
@@ -1598,8 +1598,8 @@ static int anx7625_get_swing_setting(struct device *dev,
num_regs = DP_TX_SWING_REG_CNT;
pdata->dp_lane0_swing_reg_cnt = num_regs;
- of_property_read_u32_array(dev->of_node, "analogix,lane0-swing",
- pdata->lane0_reg_data, num_regs);
+ of_property_read_u8_array(dev->of_node, "analogix,lane0-swing",
+ pdata->lane0_reg_data, num_regs);
}
if (of_get_property(dev->of_node,
@@ -1608,8 +1608,8 @@ static int anx7625_get_swing_setting(struct device *dev,
num_regs = DP_TX_SWING_REG_CNT;
pdata->dp_lane1_swing_reg_cnt = num_regs;
- of_property_read_u32_array(dev->of_node, "analogix,lane1-swing",
- pdata->lane1_reg_data, num_regs);
+ of_property_read_u8_array(dev->of_node, "analogix,lane1-swing",
+ pdata->lane1_reg_data, num_regs);
}
return 0;
@@ -1932,14 +1932,14 @@ static int anx7625_audio_get_eld(struct device *dev, void *data,
struct anx7625_data *ctx = dev_get_drvdata(dev);
if (!ctx->connector) {
- dev_err(dev, "connector not initial\n");
- return -EINVAL;
+ /* Pass en empty ELD if connector not available */
+ memset(buf, 0, len);
+ } else {
+ dev_dbg(dev, "audio copy eld\n");
+ memcpy(buf, ctx->connector->eld,
+ min(sizeof(ctx->connector->eld), len));
}
- dev_dbg(dev, "audio copy eld\n");
- memcpy(buf, ctx->connector->eld,
- min(sizeof(ctx->connector->eld), len));
-
return 0;
}
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
index edbbfe410a56..e257a84db962 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
@@ -426,9 +426,9 @@ struct anx7625_platform_data {
int mipi_lanes;
int audio_en;
int dp_lane0_swing_reg_cnt;
- int lane0_reg_data[DP_TX_SWING_REG_CNT];
+ u8 lane0_reg_data[DP_TX_SWING_REG_CNT];
int dp_lane1_swing_reg_cnt;
- int lane1_reg_data[DP_TX_SWING_REG_CNT];
+ u8 lane1_reg_data[DP_TX_SWING_REG_CNT];
u32 low_power_mode;
struct device_node *mipi_host_node;
};
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index b72aa3f3b5c2..47dea657a752 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -14,6 +14,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#define VENDOR_ID 0x00
@@ -134,6 +135,7 @@
struct chipone {
struct device *dev;
+ struct regmap *regmap;
struct i2c_client *client;
struct drm_bridge bridge;
struct drm_display_mode mode;
@@ -146,6 +148,77 @@ struct chipone {
bool interface_i2c;
};
+static const struct regmap_range chipone_dsi_readable_ranges[] = {
+ regmap_reg_range(VENDOR_ID, VERSION_ID),
+ regmap_reg_range(FIRMWARE_VERSION, PLL_SSC_OFFSET(3)),
+ regmap_reg_range(GPIO_OEN, MIPI_ULPS_CTRL),
+ regmap_reg_range(MIPI_CLK_CHK_VAR, MIPI_T_TA_SURE_PRE),
+ regmap_reg_range(MIPI_T_LPX_SET, MIPI_INIT_TIME_H),
+ regmap_reg_range(MIPI_T_CLK_TERM_EN, MIPI_T_CLK_SETTLE),
+ regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY_(5)),
+ regmap_reg_range(MIPI_PD_RX, MIPI_RST_NUM),
+ regmap_reg_range(MIPI_DBG_SET_(0), MIPI_DBG_SET_(9)),
+ regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS_(1)),
+};
+
+static const struct regmap_access_table chipone_dsi_readable_table = {
+ .yes_ranges = chipone_dsi_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(chipone_dsi_readable_ranges),
+};
+
+static const struct regmap_range chipone_dsi_writeable_ranges[] = {
+ regmap_reg_range(CONFIG_FINISH, PLL_SSC_OFFSET(3)),
+ regmap_reg_range(GPIO_OEN, MIPI_ULPS_CTRL),
+ regmap_reg_range(MIPI_CLK_CHK_VAR, MIPI_T_TA_SURE_PRE),
+ regmap_reg_range(MIPI_T_LPX_SET, MIPI_INIT_TIME_H),
+ regmap_reg_range(MIPI_T_CLK_TERM_EN, MIPI_T_CLK_SETTLE),
+ regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY_(5)),
+ regmap_reg_range(MIPI_PD_RX, MIPI_RST_NUM),
+ regmap_reg_range(MIPI_DBG_SET_(0), MIPI_DBG_SET_(9)),
+ regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS_(1)),
+};
+
+static const struct regmap_access_table chipone_dsi_writeable_table = {
+ .yes_ranges = chipone_dsi_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(chipone_dsi_writeable_ranges),
+};
+
+static const struct regmap_config chipone_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &chipone_dsi_readable_table,
+ .wr_table = &chipone_dsi_writeable_table,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = MIPI_ATE_STATUS_(1),
+};
+
+static int chipone_dsi_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct mipi_dsi_device *dsi = context;
+ const u16 reg16 = (val_size << 8) | *(u8 *)reg;
+ int ret;
+
+ ret = mipi_dsi_generic_read(dsi, &reg16, 2, val, val_size);
+
+ return ret == val_size ? 0 : -EINVAL;
+}
+
+static int chipone_dsi_write(void *context, const void *data, size_t count)
+{
+ struct mipi_dsi_device *dsi = context;
+
+ return mipi_dsi_generic_write(dsi, data, 2);
+}
+
+static const struct regmap_bus chipone_dsi_regmap_bus = {
+ .read = chipone_dsi_read,
+ .write = chipone_dsi_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
static inline struct chipone *bridge_to_chipone(struct drm_bridge *bridge)
{
return container_of(bridge, struct chipone, bridge);
@@ -153,18 +226,16 @@ static inline struct chipone *bridge_to_chipone(struct drm_bridge *bridge)
static void chipone_readb(struct chipone *icn, u8 reg, u8 *val)
{
- if (icn->interface_i2c)
- *val = i2c_smbus_read_byte_data(icn->client, reg);
- else
- mipi_dsi_generic_read(icn->dsi, (u8[]){reg, 1}, 2, val, 1);
+ int ret, pval;
+
+ ret = regmap_read(icn->regmap, reg, &pval);
+
+ *val = ret ? 0 : pval & 0xff;
}
static int chipone_writeb(struct chipone *icn, u8 reg, u8 val)
{
- if (icn->interface_i2c)
- return i2c_smbus_write_byte_data(icn->client, reg, val);
- else
- return mipi_dsi_generic_write(icn->dsi, (u8[]){reg, val}, 2);
+ return regmap_write(icn->regmap, reg, val);
}
static void chipone_configure_pll(struct chipone *icn,
@@ -324,6 +395,11 @@ static void chipone_atomic_enable(struct drm_bridge *bridge,
/* dsi specific sequence */
chipone_writeb(icn, SYNC_EVENT_DLY, 0x80);
chipone_writeb(icn, HFP_MIN, hfp & 0xff);
+
+ /* DSI data lane count */
+ chipone_writeb(icn, DSI_CTRL,
+ DSI_CTRL_UNKNOWN | DSI_CTRL_DSI_LANES(icn->dsi->lanes - 1));
+
chipone_writeb(icn, MIPI_PD_CK_LANE, 0xa0);
chipone_writeb(icn, PLL_CTRL(12), 0xff);
chipone_writeb(icn, MIPI_PN_SWAP, 0x00);
@@ -409,9 +485,23 @@ static void chipone_mode_set(struct drm_bridge *bridge,
static int chipone_dsi_attach(struct chipone *icn)
{
struct mipi_dsi_device *dsi = icn->dsi;
- int ret;
+ struct device *dev = icn->dev;
+ struct device_node *endpoint;
+ int dsi_lanes, ret;
+
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
+ dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
+ of_node_put(endpoint);
+
+ /*
+ * If the 'data-lanes' property does not exist in DT or is invalid,
+ * default to previously hard-coded behavior, which was 4 data lanes.
+ */
+ if (dsi_lanes >= 1 && dsi_lanes <= 4)
+ icn->dsi->lanes = dsi_lanes;
+ else
+ icn->dsi->lanes = 4;
- dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
@@ -591,6 +681,11 @@ static int chipone_dsi_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
+ icn->regmap = devm_regmap_init(dev, &chipone_dsi_regmap_bus,
+ dsi, &chipone_regmap_config);
+ if (IS_ERR(icn->regmap))
+ return PTR_ERR(icn->regmap);
+
icn->interface_i2c = false;
icn->dsi = dsi;
@@ -616,6 +711,10 @@ static int chipone_i2c_probe(struct i2c_client *client,
if (ret)
return ret;
+ icn->regmap = devm_regmap_init_i2c(client, &chipone_regmap_config);
+ if (IS_ERR(icn->regmap))
+ return PTR_ERR(icn->regmap);
+
icn->interface_i2c = true;
icn->client = client;
dev_set_drvdata(dev, icn);
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index d24f5b90feab..e4d52a7e31b7 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -24,6 +24,7 @@ struct display_connector {
int hpd_irq;
struct regulator *dp_pwr;
+ struct gpio_desc *ddc_en;
};
static inline struct display_connector *
@@ -345,6 +346,17 @@ static int display_connector_probe(struct platform_device *pdev)
}
}
+ /* enable DDC */
+ if (type == DRM_MODE_CONNECTOR_HDMIA) {
+ conn->ddc_en = devm_gpiod_get_optional(&pdev->dev, "ddc-en",
+ GPIOD_OUT_HIGH);
+
+ if (IS_ERR(conn->ddc_en)) {
+ dev_err(&pdev->dev, "Couldn't get ddc-en gpio\n");
+ return PTR_ERR(conn->ddc_en);
+ }
+ }
+
conn->bridge.funcs = &display_connector_bridge_funcs;
conn->bridge.of_node = pdev->dev.of_node;
@@ -373,6 +385,9 @@ static int display_connector_remove(struct platform_device *pdev)
{
struct display_connector *conn = platform_get_drvdata(pdev);
+ if (conn->ddc_en)
+ gpiod_set_value(conn->ddc_en, 0);
+
if (conn->dp_pwr)
regulator_disable(conn->dp_pwr);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
new file mode 100644
index 000000000000..e92821fbc639
--- /dev/null
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -0,0 +1,802 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lontium LT9211 bridge driver
+ *
+ * LT9211 is capable of converting:
+ * 2xDSI/2xLVDS/1xDPI -> 2xDSI/2xLVDS/1xDPI
+ * Currently supported is:
+ * 1xDSI -> 1xLVDS
+ *
+ * Copyright (C) 2022 Marek Vasut <marex@denx.de>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#define REG_PAGE_CONTROL 0xff
+#define REG_CHIPID0 0x8100
+#define REG_CHIPID0_VALUE 0x18
+#define REG_CHIPID1 0x8101
+#define REG_CHIPID1_VALUE 0x01
+#define REG_CHIPID2 0x8102
+#define REG_CHIPID2_VALUE 0xe3
+
+#define REG_DSI_LANE 0xd000
+/* DSI lane count - 0 means 4 lanes ; 1, 2, 3 means 1, 2, 3 lanes. */
+#define REG_DSI_LANE_COUNT(n) ((n) & 3)
+
+struct lt9211 {
+ struct drm_bridge bridge;
+ struct device *dev;
+ struct regmap *regmap;
+ struct mipi_dsi_device *dsi;
+ struct drm_bridge *panel_bridge;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vccio;
+ bool lvds_dual_link;
+ bool lvds_dual_link_even_odd_swap;
+};
+
+static const struct regmap_range lt9211_rw_ranges[] = {
+ regmap_reg_range(0xff, 0xff),
+ regmap_reg_range(0x8100, 0x816b),
+ regmap_reg_range(0x8200, 0x82aa),
+ regmap_reg_range(0x8500, 0x85ff),
+ regmap_reg_range(0x8600, 0x86a0),
+ regmap_reg_range(0x8700, 0x8746),
+ regmap_reg_range(0xd000, 0xd0a7),
+ regmap_reg_range(0xd400, 0xd42c),
+ regmap_reg_range(0xd800, 0xd838),
+ regmap_reg_range(0xd9c0, 0xd9d5),
+};
+
+static const struct regmap_access_table lt9211_rw_table = {
+ .yes_ranges = lt9211_rw_ranges,
+ .n_yes_ranges = ARRAY_SIZE(lt9211_rw_ranges),
+};
+
+static const struct regmap_range_cfg lt9211_range = {
+ .name = "lt9211",
+ .range_min = 0x0000,
+ .range_max = 0xda00,
+ .selector_reg = REG_PAGE_CONTROL,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 0x100,
+};
+
+static const struct regmap_config lt9211_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .rd_table = &lt9211_rw_table,
+ .wr_table = &lt9211_rw_table,
+ .volatile_table = &lt9211_rw_table,
+ .ranges = &lt9211_range,
+ .num_ranges = 1,
+ .cache_type = REGCACHE_RBTREE,
+ .max_register = 0xda00,
+};
+
+static struct lt9211 *bridge_to_lt9211(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct lt9211, bridge);
+}
+
+static int lt9211_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct lt9211 *ctx = bridge_to_lt9211(bridge);
+
+ return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
+ &ctx->bridge, flags);
+}
+
+static int lt9211_read_chipid(struct lt9211 *ctx)
+{
+ u8 chipid[3];
+ int ret;
+
+ /* Read Chip ID registers and verify the chip can communicate. */
+ ret = regmap_bulk_read(ctx->regmap, REG_CHIPID0, chipid, 3);
+ if (ret < 0) {
+ dev_err(ctx->dev, "Failed to read Chip ID: %d\n", ret);
+ return ret;
+ }
+
+ /* Test for known Chip ID. */
+ if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE ||
+ chipid[2] != REG_CHIPID2_VALUE) {
+ dev_err(ctx->dev, "Unknown Chip ID: 0x%02x 0x%02x 0x%02x\n",
+ chipid[0], chipid[1], chipid[2]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int lt9211_system_init(struct lt9211 *ctx)
+{
+ const struct reg_sequence lt9211_system_init_seq[] = {
+ { 0x8201, 0x18 },
+ { 0x8606, 0x61 },
+ { 0x8607, 0xa8 },
+ { 0x8714, 0x08 },
+ { 0x8715, 0x00 },
+ { 0x8718, 0x0f },
+ { 0x8722, 0x08 },
+ { 0x8723, 0x00 },
+ { 0x8726, 0x0f },
+ { 0x810b, 0xfe },
+ };
+
+ return regmap_multi_reg_write(ctx->regmap, lt9211_system_init_seq,
+ ARRAY_SIZE(lt9211_system_init_seq));
+}
+
+static int lt9211_configure_rx(struct lt9211 *ctx)
+{
+ const struct reg_sequence lt9211_rx_phy_seq[] = {
+ { 0x8202, 0x44 },
+ { 0x8204, 0xa0 },
+ { 0x8205, 0x22 },
+ { 0x8207, 0x9f },
+ { 0x8208, 0xfc },
+ /* ORR with 0xf8 here to enable DSI DN/DP swap. */
+ { 0x8209, 0x01 },
+ { 0x8217, 0x0c },
+ { 0x8633, 0x1b },
+ };
+
+ const struct reg_sequence lt9211_rx_cal_reset_seq[] = {
+ { 0x8120, 0x7f },
+ { 0x8120, 0xff },
+ };
+
+ const struct reg_sequence lt9211_rx_dig_seq[] = {
+ { 0x8630, 0x85 },
+ /* 0x8588: BIT 6 set = MIPI-RX, BIT 4 unset = LVDS-TX */
+ { 0x8588, 0x40 },
+ { 0x85ff, 0xd0 },
+ { REG_DSI_LANE, REG_DSI_LANE_COUNT(ctx->dsi->lanes) },
+ { 0xd002, 0x05 },
+ };
+
+ const struct reg_sequence lt9211_rx_div_reset_seq[] = {
+ { 0x810a, 0xc0 },
+ { 0x8120, 0xbf },
+ };
+
+ const struct reg_sequence lt9211_rx_div_clear_seq[] = {
+ { 0x810a, 0xc1 },
+ { 0x8120, 0xff },
+ };
+
+ int ret;
+
+ ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_phy_seq,
+ ARRAY_SIZE(lt9211_rx_phy_seq));
+ if (ret)
+ return ret;
+
+ ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_cal_reset_seq,
+ ARRAY_SIZE(lt9211_rx_cal_reset_seq));
+ if (ret)
+ return ret;
+
+ ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_dig_seq,
+ ARRAY_SIZE(lt9211_rx_dig_seq));
+ if (ret)
+ return ret;
+
+ ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_div_reset_seq,
+ ARRAY_SIZE(lt9211_rx_div_reset_seq));
+ if (ret)
+ return ret;
+
+ usleep_range(10000, 15000);
+
+ return regmap_multi_reg_write(ctx->regmap, lt9211_rx_div_clear_seq,
+ ARRAY_SIZE(lt9211_rx_div_clear_seq));
+}
+
+static int lt9211_autodetect_rx(struct lt9211 *ctx,
+ const struct drm_display_mode *mode)
+{
+ u16 width, height;
+ u32 byteclk;
+ u8 buf[5];
+ u8 format;
+ u8 bc[3];
+ int ret;
+
+ /* Measure ByteClock frequency. */
+ ret = regmap_write(ctx->regmap, 0x8600, 0x01);
+ if (ret)
+ return ret;
+
+ /* Give the chip time to lock onto RX stream. */
+ msleep(100);
+
+ /* Read the ByteClock frequency from the chip. */
+ ret = regmap_bulk_read(ctx->regmap, 0x8608, bc, sizeof(bc));
+ if (ret)
+ return ret;
+
+ /* RX ByteClock in kHz */
+ byteclk = ((bc[0] & 0xf) << 16) | (bc[1] << 8) | bc[2];
+
+ /* Width/Height/Format Auto-detection */
+ ret = regmap_bulk_read(ctx->regmap, 0xd082, buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ width = (buf[0] << 8) | buf[1];
+ height = (buf[3] << 8) | buf[4];
+ format = buf[2] & 0xf;
+
+ if (format == 0x3) { /* YUV422 16bit */
+ width /= 2;
+ } else if (format == 0xa) { /* RGB888 24bit */
+ width /= 3;
+ } else {
+ dev_err(ctx->dev, "Unsupported DSI pixel format 0x%01x\n",
+ format);
+ return -EINVAL;
+ }
+
+ if (width != mode->hdisplay) {
+ dev_err(ctx->dev,
+ "RX: Detected DSI width (%d) does not match mode hdisplay (%d)\n",
+ width, mode->hdisplay);
+ return -EINVAL;
+ }
+
+ if (height != mode->vdisplay) {
+ dev_err(ctx->dev,
+ "RX: Detected DSI height (%d) does not match mode vdisplay (%d)\n",
+ height, mode->vdisplay);
+ return -EINVAL;
+ }
+
+ dev_dbg(ctx->dev, "RX: %dx%d format=0x%01x byteclock=%d kHz\n",
+ width, height, format, byteclk);
+
+ return 0;
+}
+
+static int lt9211_configure_timing(struct lt9211 *ctx,
+ const struct drm_display_mode *mode)
+{
+ const struct reg_sequence lt9211_timing[] = {
+ { 0xd00d, (mode->vtotal >> 8) & 0xff },
+ { 0xd00e, mode->vtotal & 0xff },
+ { 0xd00f, (mode->vdisplay >> 8) & 0xff },
+ { 0xd010, mode->vdisplay & 0xff },
+ { 0xd011, (mode->htotal >> 8) & 0xff },
+ { 0xd012, mode->htotal & 0xff },
+ { 0xd013, (mode->hdisplay >> 8) & 0xff },
+ { 0xd014, mode->hdisplay & 0xff },
+ { 0xd015, (mode->vsync_end - mode->vsync_start) & 0xff },
+ { 0xd016, (mode->hsync_end - mode->hsync_start) & 0xff },
+ { 0xd017, ((mode->vsync_start - mode->vdisplay) >> 8) & 0xff },
+ { 0xd018, (mode->vsync_start - mode->vdisplay) & 0xff },
+ { 0xd019, ((mode->hsync_start - mode->hdisplay) >> 8) & 0xff },
+ { 0xd01a, (mode->hsync_start - mode->hdisplay) & 0xff },
+ };
+
+ return regmap_multi_reg_write(ctx->regmap, lt9211_timing,
+ ARRAY_SIZE(lt9211_timing));
+}
+
+static int lt9211_configure_plls(struct lt9211 *ctx,
+ const struct drm_display_mode *mode)
+{
+ const struct reg_sequence lt9211_pcr_seq[] = {
+ { 0xd026, 0x17 },
+ { 0xd027, 0xc3 },
+ { 0xd02d, 0x30 },
+ { 0xd031, 0x10 },
+ { 0xd023, 0x20 },
+ { 0xd038, 0x02 },
+ { 0xd039, 0x10 },
+ { 0xd03a, 0x20 },
+ { 0xd03b, 0x60 },
+ { 0xd03f, 0x04 },
+ { 0xd040, 0x08 },
+ { 0xd041, 0x10 },
+ { 0x810b, 0xee },
+ { 0x810b, 0xfe },
+ };
+
+ unsigned int pval;
+ int ret;
+
+ /* DeSSC PLL reference clock is 25 MHz XTal. */
+ ret = regmap_write(ctx->regmap, 0x822d, 0x48);
+ if (ret)
+ return ret;
+
+ if (mode->clock < 44000) {
+ ret = regmap_write(ctx->regmap, 0x8235, 0x83);
+ } else if (mode->clock < 88000) {
+ ret = regmap_write(ctx->regmap, 0x8235, 0x82);
+ } else if (mode->clock < 176000) {
+ ret = regmap_write(ctx->regmap, 0x8235, 0x81);
+ } else {
+ dev_err(ctx->dev,
+ "Unsupported mode clock (%d kHz) above 176 MHz.\n",
+ mode->clock);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Wait for the DeSSC PLL to stabilize. */
+ msleep(100);
+
+ ret = regmap_multi_reg_write(ctx->regmap, lt9211_pcr_seq,
+ ARRAY_SIZE(lt9211_pcr_seq));
+ if (ret)
+ return ret;
+
+ /* PCR stability test takes seconds. */
+ ret = regmap_read_poll_timeout(ctx->regmap, 0xd087, pval, pval & 0x8,
+ 20000, 10000000);
+ if (ret)
+ dev_err(ctx->dev, "PCR unstable, ret=%i\n", ret);
+
+ return ret;
+}
+
+static int lt9211_configure_tx(struct lt9211 *ctx, bool jeida,
+ bool bpp24, bool de)
+{
+ const struct reg_sequence system_lt9211_tx_phy_seq[] = {
+ /* DPI output disable */
+ { 0x8262, 0x00 },
+ /* BIT(7) is LVDS dual-port */
+ { 0x823b, 0x38 | (ctx->lvds_dual_link ? BIT(7) : 0) },
+ { 0x823e, 0x92 },
+ { 0x823f, 0x48 },
+ { 0x8240, 0x31 },
+ { 0x8243, 0x80 },
+ { 0x8244, 0x00 },
+ { 0x8245, 0x00 },
+ { 0x8249, 0x00 },
+ { 0x824a, 0x01 },
+ { 0x824e, 0x00 },
+ { 0x824f, 0x00 },
+ { 0x8250, 0x00 },
+ { 0x8253, 0x00 },
+ { 0x8254, 0x01 },
+ /* LVDS channel order, Odd:Even 0x10..A:B, 0x40..B:A */
+ { 0x8646, ctx->lvds_dual_link_even_odd_swap ? 0x40 : 0x10 },
+ { 0x8120, 0x7b },
+ { 0x816b, 0xff },
+ };
+
+ const struct reg_sequence system_lt9211_tx_dig_seq[] = {
+ { 0x8559, 0x40 | (jeida ? BIT(7) : 0) |
+ (de ? BIT(5) : 0) | (bpp24 ? BIT(4) : 0) },
+ { 0x855a, 0xaa },
+ { 0x855b, 0xaa },
+ { 0x855c, ctx->lvds_dual_link ? BIT(0) : 0 },
+ { 0x85a1, 0x77 },
+ { 0x8640, 0x40 },
+ { 0x8641, 0x34 },
+ { 0x8642, 0x10 },
+ { 0x8643, 0x23 },
+ { 0x8644, 0x41 },
+ { 0x8645, 0x02 },
+ };
+
+ const struct reg_sequence system_lt9211_tx_pll_seq[] = {
+ /* TX PLL power down */
+ { 0x8236, 0x01 },
+ { 0x8237, ctx->lvds_dual_link ? 0x2a : 0x29 },
+ { 0x8238, 0x06 },
+ { 0x8239, 0x30 },
+ { 0x823a, 0x8e },
+ { 0x8737, 0x14 },
+ { 0x8713, 0x00 },
+ { 0x8713, 0x80 },
+ };
+
+ unsigned int pval;
+ int ret;
+
+ ret = regmap_multi_reg_write(ctx->regmap, system_lt9211_tx_phy_seq,
+ ARRAY_SIZE(system_lt9211_tx_phy_seq));
+ if (ret)
+ return ret;
+
+ ret = regmap_multi_reg_write(ctx->regmap, system_lt9211_tx_dig_seq,
+ ARRAY_SIZE(system_lt9211_tx_dig_seq));
+ if (ret)
+ return ret;
+
+ ret = regmap_multi_reg_write(ctx->regmap, system_lt9211_tx_pll_seq,
+ ARRAY_SIZE(system_lt9211_tx_pll_seq));
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(ctx->regmap, 0x871f, pval, pval & 0x80,
+ 10000, 1000000);
+ if (ret) {
+ dev_err(ctx->dev, "TX PLL unstable, ret=%i\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read_poll_timeout(ctx->regmap, 0x8720, pval, pval & 0x80,
+ 10000, 1000000);
+ if (ret) {
+ dev_err(ctx->dev, "TX PLL unstable, ret=%i\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lt9211_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct lt9211 *ctx = bridge_to_lt9211(bridge);
+ struct drm_atomic_state *state = old_bridge_state->base.state;
+ const struct drm_bridge_state *bridge_state;
+ const struct drm_crtc_state *crtc_state;
+ const struct drm_display_mode *mode;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ bool lvds_format_24bpp;
+ bool lvds_format_jeida;
+ u32 bus_flags;
+ int ret;
+
+ ret = regulator_enable(ctx->vccio);
+ if (ret) {
+ dev_err(ctx->dev, "Failed to enable vccio: %d\n", ret);
+ return;
+ }
+
+ /* Deassert reset */
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(20000, 21000); /* Very long post-reset delay. */
+
+ /* Get the LVDS format from the bridge state. */
+ bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+ bus_flags = bridge_state->output_bus_cfg.flags;
+
+ switch (bridge_state->output_bus_cfg.format) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ lvds_format_24bpp = false;
+ lvds_format_jeida = true;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ lvds_format_24bpp = true;
+ lvds_format_jeida = true;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ lvds_format_24bpp = true;
+ lvds_format_jeida = false;
+ break;
+ default:
+ /*
+ * Some bridges still don't set the correct
+ * LVDS bus pixel format, use SPWG24 default
+ * format until those are fixed.
+ */
+ lvds_format_24bpp = true;
+ lvds_format_jeida = false;
+ dev_warn(ctx->dev,
+ "Unsupported LVDS bus format 0x%04x, please check output bridge driver. Falling back to SPWG24.\n",
+ bridge_state->output_bus_cfg.format);
+ break;
+ }
+
+ /*
+ * Retrieve the CRTC adjusted mode. This requires a little dance to go
+ * from the bridge to the encoder, to the connector and to the CRTC.
+ */
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ mode = &crtc_state->adjusted_mode;
+
+ ret = lt9211_read_chipid(ctx);
+ if (ret)
+ return;
+
+ ret = lt9211_system_init(ctx);
+ if (ret)
+ return;
+
+ ret = lt9211_configure_rx(ctx);
+ if (ret)
+ return;
+
+ ret = lt9211_autodetect_rx(ctx, mode);
+ if (ret)
+ return;
+
+ ret = lt9211_configure_timing(ctx, mode);
+ if (ret)
+ return;
+
+ ret = lt9211_configure_plls(ctx, mode);
+ if (ret)
+ return;
+
+ ret = lt9211_configure_tx(ctx, lvds_format_jeida, lvds_format_24bpp,
+ bus_flags & DRM_BUS_FLAG_DE_HIGH);
+ if (ret)
+ return;
+
+ dev_dbg(ctx->dev, "LT9211 enabled.\n");
+}
+
+static void lt9211_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
+{
+ struct lt9211 *ctx = bridge_to_lt9211(bridge);
+ int ret;
+
+ /*
+ * Put the chip in reset, pull nRST line low,
+ * and assure lengthy 10ms reset low timing.
+ */
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000); /* Very long reset duration. */
+
+ ret = regulator_disable(ctx->vccio);
+ if (ret)
+ dev_err(ctx->dev, "Failed to disable vccio: %d\n", ret);
+
+ regcache_mark_dirty(ctx->regmap);
+}
+
+static enum drm_mode_status
+lt9211_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ /* LVDS output clock range 25..176 MHz */
+ if (mode->clock < 25000)
+ return MODE_CLOCK_LOW;
+ if (mode->clock > 176000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+#define MAX_INPUT_SEL_FORMATS 1
+
+static u32 *
+lt9211_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ /* This is the DSI-end bus format */
+ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static const struct drm_bridge_funcs lt9211_funcs = {
+ .attach = lt9211_attach,
+ .mode_valid = lt9211_mode_valid,
+ .atomic_enable = lt9211_atomic_enable,
+ .atomic_disable = lt9211_atomic_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = lt9211_atomic_get_input_bus_fmts,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+};
+
+static int lt9211_parse_dt(struct lt9211 *ctx)
+{
+ struct device_node *port2, *port3;
+ struct drm_bridge *panel_bridge;
+ struct device *dev = ctx->dev;
+ struct drm_panel *panel;
+ int dual_link;
+ int ret;
+
+ ctx->vccio = devm_regulator_get(dev, "vccio");
+ if (IS_ERR(ctx->vccio))
+ return dev_err_probe(dev, PTR_ERR(ctx->vccio),
+ "Failed to get supply 'vccio'\n");
+
+ ctx->lvds_dual_link = false;
+ ctx->lvds_dual_link_even_odd_swap = false;
+
+ port2 = of_graph_get_port_by_id(dev->of_node, 2);
+ port3 = of_graph_get_port_by_id(dev->of_node, 3);
+ dual_link = drm_of_lvds_get_dual_link_pixel_order(port2, port3);
+ of_node_put(port2);
+ of_node_put(port3);
+
+ if (dual_link == DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS) {
+ ctx->lvds_dual_link = true;
+ /* Odd pixels to LVDS Channel A, even pixels to B */
+ ctx->lvds_dual_link_even_odd_swap = false;
+ } else if (dual_link == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) {
+ ctx->lvds_dual_link = true;
+ /* Even pixels to LVDS Channel A, odd pixels to B */
+ ctx->lvds_dual_link_even_odd_swap = true;
+ }
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, &panel_bridge);
+ if (ret < 0)
+ return ret;
+ if (panel) {
+ panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(panel_bridge))
+ return PTR_ERR(panel_bridge);
+ }
+
+ ctx->panel_bridge = panel_bridge;
+
+ return 0;
+}
+
+static int lt9211_host_attach(struct lt9211 *ctx)
+{
+ const struct mipi_dsi_device_info info = {
+ .type = "lt9211",
+ .channel = 0,
+ .node = NULL,
+ };
+ struct device *dev = ctx->dev;
+ struct device_node *host_node;
+ struct device_node *endpoint;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int dsi_lanes;
+ int ret;
+
+ endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
+ dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
+ host_node = of_graph_get_remote_port_parent(endpoint);
+ host = of_find_mipi_dsi_host_by_node(host_node);
+ of_node_put(host_node);
+ of_node_put(endpoint);
+
+ if (!host)
+ return -EPROBE_DEFER;
+
+ if (dsi_lanes < 0 || dsi_lanes > 4)
+ return -EINVAL;
+
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ if (IS_ERR(dsi))
+ return dev_err_probe(dev, PTR_ERR(dsi),
+ "failed to create dsi device\n");
+
+ ctx->dsi = dsi;
+
+ dsi->lanes = dsi_lanes;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_VIDEO_HSE;
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to attach dsi to host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lt9211_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct lt9211 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+
+ /*
+ * Put the chip in reset, pull nRST line low,
+ * and assure lengthy 10ms reset low timing.
+ */
+ ctx->reset_gpio = devm_gpiod_get_optional(ctx->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio))
+ return PTR_ERR(ctx->reset_gpio);
+
+ usleep_range(10000, 11000); /* Very long reset duration. */
+
+ ret = lt9211_parse_dt(ctx);
+ if (ret)
+ return ret;
+
+ ctx->regmap = devm_regmap_init_i2c(client, &lt9211_regmap_config);
+ if (IS_ERR(ctx->regmap))
+ return PTR_ERR(ctx->regmap);
+
+ dev_set_drvdata(dev, ctx);
+ i2c_set_clientdata(client, ctx);
+
+ ctx->bridge.funcs = &lt9211_funcs;
+ ctx->bridge.of_node = dev->of_node;
+ drm_bridge_add(&ctx->bridge);
+
+ ret = lt9211_host_attach(ctx);
+ if (ret)
+ drm_bridge_remove(&ctx->bridge);
+
+ return ret;
+}
+
+static int lt9211_remove(struct i2c_client *client)
+{
+ struct lt9211 *ctx = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&ctx->bridge);
+
+ return 0;
+}
+
+static struct i2c_device_id lt9211_id[] = {
+ { "lontium,lt9211" },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, lt9211_id);
+
+static const struct of_device_id lt9211_match_table[] = {
+ { .compatible = "lontium,lt9211" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, lt9211_match_table);
+
+static struct i2c_driver lt9211_driver = {
+ .probe = lt9211_probe,
+ .remove = lt9211_remove,
+ .id_table = lt9211_id,
+ .driver = {
+ .name = "lt9211",
+ .of_match_table = lt9211_match_table,
+ },
+};
+module_i2c_driver(lt9211_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("Lontium LT9211 DSI/LVDS/DPI bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index ff1c37b2e6e5..0ee563eb2b6f 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -83,8 +83,11 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
drm_connector_attach_encoder(&panel_bridge->connector,
bridge->encoder);
- if (connector->funcs->reset)
- connector->funcs->reset(connector);
+ if (bridge->dev->registered) {
+ if (connector->funcs->reset)
+ connector->funcs->reset(connector);
+ drm_connector_register(connector);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index 21a1be3ced0f..a4a31b669b65 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -25,6 +25,16 @@ config DRM_DW_HDMI_I2S_AUDIO
Support the I2S Audio interface which is part of the Synopsys
Designware HDMI block.
+config DRM_DW_HDMI_GP_AUDIO
+ tristate "Synopsys Designware GP Audio interface"
+ depends on DRM_DW_HDMI && SND
+ select SND_PCM
+ select SND_PCM_ELD
+ select SND_PCM_IEC958
+ help
+ Support the GP Audio interface which is part of the Synopsys
+ Designware HDMI block.
+
config DRM_DW_HDMI_CEC
tristate "Synopsis Designware CEC interface"
depends on DRM_DW_HDMI
diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile
index 91d746ad5de1..ce715562e9e5 100644
--- a/drivers/gpu/drm/bridge/synopsys/Makefile
+++ b/drivers/gpu/drm/bridge/synopsys/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
+obj-$(CONFIG_DRM_DW_HDMI_GP_AUDIO) += dw-hdmi-gp-audio.o
obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o
obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
new file mode 100644
index 000000000000..11ea1c84eb35
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * dw-hdmi-gp-audio.c
+ *
+ * Copyright 2020-2022 NXP
+ */
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_connector.h>
+
+#include <sound/hdmi-codec.h>
+#include <sound/asoundef.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/pcm_drm_eld.h>
+#include <sound/pcm_iec958.h>
+#include <sound/dmaengine_pcm.h>
+
+#include "dw-hdmi-audio.h"
+
+#define DRIVER_NAME "dw-hdmi-gp-audio"
+#define DRV_NAME "hdmi-gp-audio"
+
+struct snd_dw_hdmi {
+ struct dw_hdmi_audio_data data;
+ struct platform_device *audio_pdev;
+ unsigned int pos;
+};
+
+struct dw_hdmi_channel_conf {
+ u8 conf1;
+ u8 ca;
+};
+
+/*
+ * The default mapping of ALSA channels to HDMI channels and speaker
+ * allocation bits. Note that we can't do channel remapping here -
+ * channels must be in the same order.
+ *
+ * Mappings for alsa-lib pcm/surround*.conf files:
+ *
+ * Front Sur4.0 Sur4.1 Sur5.0 Sur5.1 Sur7.1
+ * Channels 2 4 6 6 6 8
+ *
+ * Our mapping from ALSA channel to CEA686D speaker name and HDMI channel:
+ *
+ * Number of ALSA channels
+ * ALSA Channel 2 3 4 5 6 7 8
+ * 0 FL:0 = = = = = =
+ * 1 FR:1 = = = = = =
+ * 2 FC:3 RL:4 LFE:2 = = =
+ * 3 RR:5 RL:4 FC:3 = =
+ * 4 RR:5 RL:4 = =
+ * 5 RR:5 = =
+ * 6 RC:6 =
+ * 7 RLC/FRC RLC/FRC
+ */
+static struct dw_hdmi_channel_conf default_hdmi_channel_config[7] = {
+ { 0x03, 0x00 }, /* FL,FR */
+ { 0x0b, 0x02 }, /* FL,FR,FC */
+ { 0x33, 0x08 }, /* FL,FR,RL,RR */
+ { 0x37, 0x09 }, /* FL,FR,LFE,RL,RR */
+ { 0x3f, 0x0b }, /* FL,FR,LFE,FC,RL,RR */
+ { 0x7f, 0x0f }, /* FL,FR,LFE,FC,RL,RR,RC */
+ { 0xff, 0x13 }, /* FL,FR,LFE,FC,RL,RR,[FR]RC,[FR]LC */
+};
+
+static int audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
+ int ret = 0;
+ u8 ca;
+
+ dw_hdmi_set_sample_rate(dw->data.hdmi, params->sample_rate);
+
+ ca = default_hdmi_channel_config[params->channels - 2].ca;
+
+ dw_hdmi_set_channel_count(dw->data.hdmi, params->channels);
+ dw_hdmi_set_channel_allocation(dw->data.hdmi, ca);
+
+ dw_hdmi_set_sample_non_pcm(dw->data.hdmi,
+ params->iec.status[0] & IEC958_AES0_NONAUDIO);
+ dw_hdmi_set_sample_width(dw->data.hdmi, params->sample_width);
+
+ return ret;
+}
+
+static void audio_shutdown(struct device *dev, void *data)
+{
+}
+
+static int audio_mute_stream(struct device *dev, void *data,
+ bool enable, int direction)
+{
+ struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (!enable)
+ dw_hdmi_audio_enable(dw->data.hdmi);
+ else
+ dw_hdmi_audio_disable(dw->data.hdmi);
+
+ return ret;
+}
+
+static int audio_get_eld(struct device *dev, void *data,
+ u8 *buf, size_t len)
+{
+ struct dw_hdmi_audio_data *audio = data;
+ u8 *eld;
+
+ eld = audio->get_eld(audio->hdmi);
+ if (eld)
+ memcpy(buf, eld, min_t(size_t, MAX_ELD_BYTES, len));
+ else
+ /* Pass en empty ELD if connector not available */
+ memset(buf, 0, len);
+
+ return 0;
+}
+
+static int audio_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
+
+ return dw_hdmi_set_plugged_cb(dw->data.hdmi, fn, codec_dev);
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+ .hw_params = audio_hw_params,
+ .audio_shutdown = audio_shutdown,
+ .mute_stream = audio_mute_stream,
+ .get_eld = audio_get_eld,
+ .hook_plugged_cb = audio_hook_plugged_cb,
+};
+
+static int snd_dw_hdmi_probe(struct platform_device *pdev)
+{
+ struct dw_hdmi_audio_data *data = pdev->dev.platform_data;
+ struct snd_dw_hdmi *dw;
+
+ const struct hdmi_codec_pdata codec_data = {
+ .i2s = 1,
+ .spdif = 0,
+ .ops = &audio_codec_ops,
+ .max_i2s_channels = 8,
+ .data = data,
+ };
+
+ dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
+ if (!dw)
+ return -ENOMEM;
+
+ dw->data = *data;
+
+ platform_set_drvdata(pdev, dw);
+
+ dw->audio_pdev = platform_device_register_data(&pdev->dev,
+ HDMI_CODEC_DRV_NAME, 1,
+ &codec_data,
+ sizeof(codec_data));
+
+ return PTR_ERR_OR_ZERO(dw->audio_pdev);
+}
+
+static int snd_dw_hdmi_remove(struct platform_device *pdev)
+{
+ struct snd_dw_hdmi *dw = platform_get_drvdata(pdev);
+
+ platform_device_unregister(dw->audio_pdev);
+
+ return 0;
+}
+
+static struct platform_driver snd_dw_hdmi_driver = {
+ .probe = snd_dw_hdmi_probe,
+ .remove = snd_dw_hdmi_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(snd_dw_hdmi_driver);
+
+MODULE_AUTHOR("Shengjiu Wang <shengjiu.wang@nxp.com>");
+MODULE_DESCRIPTION("Synopsys Designware HDMI GPA ALSA interface");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index a563460f8d20..f3f82a04c252 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -191,7 +191,10 @@ struct dw_hdmi {
spinlock_t audio_lock;
struct mutex audio_mutex;
+ unsigned int sample_non_pcm;
+ unsigned int sample_width;
unsigned int sample_rate;
+ unsigned int channels;
unsigned int audio_cts;
unsigned int audio_n;
bool audio_enable;
@@ -589,6 +592,8 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk)
n = 4096;
else if (pixel_clk == 74176000 || pixel_clk == 148352000)
n = 11648;
+ else if (pixel_clk == 297000000)
+ n = 3072;
else
n = 4096;
n *= mult;
@@ -601,6 +606,8 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk)
n = 17836;
else if (pixel_clk == 148352000)
n = 8918;
+ else if (pixel_clk == 297000000)
+ n = 4704;
else
n = 6272;
n *= mult;
@@ -615,6 +622,8 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk)
n = 11648;
else if (pixel_clk == 148352000)
n = 5824;
+ else if (pixel_clk == 297000000)
+ n = 5120;
else
n = 6144;
n *= mult;
@@ -659,8 +668,8 @@ static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
config3 = hdmi_readb(hdmi, HDMI_CONFIG3_ID);
- /* Only compute CTS when using internal AHB audio */
- if (config3 & HDMI_CONFIG3_AHBAUDDMA) {
+ /* Compute CTS when using internal AHB audio or General Parallel audio*/
+ if ((config3 & HDMI_CONFIG3_AHBAUDDMA) || (config3 & HDMI_CONFIG3_GPAUD)) {
/*
* Compute the CTS value from the N value. Note that CTS and N
* can be up to 20 bits in total, so we need 64-bit math. Also
@@ -702,6 +711,22 @@ static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi)
mutex_unlock(&hdmi->audio_mutex);
}
+void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width)
+{
+ mutex_lock(&hdmi->audio_mutex);
+ hdmi->sample_width = width;
+ mutex_unlock(&hdmi->audio_mutex);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_width);
+
+void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm)
+{
+ mutex_lock(&hdmi->audio_mutex);
+ hdmi->sample_non_pcm = non_pcm;
+ mutex_unlock(&hdmi->audio_mutex);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_non_pcm);
+
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
{
mutex_lock(&hdmi->audio_mutex);
@@ -717,6 +742,7 @@ void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt)
u8 layout;
mutex_lock(&hdmi->audio_mutex);
+ hdmi->channels = cnt;
/*
* For >2 channel PCM audio, we need to select layout 1
@@ -765,6 +791,89 @@ static u8 *hdmi_audio_get_eld(struct dw_hdmi *hdmi)
return hdmi->curr_conn->eld;
}
+static void dw_hdmi_gp_audio_enable(struct dw_hdmi *hdmi)
+{
+ const struct dw_hdmi_plat_data *pdata = hdmi->plat_data;
+ int sample_freq = 0x2, org_sample_freq = 0xD;
+ int ch_mask = BIT(hdmi->channels) - 1;
+
+ switch (hdmi->sample_rate) {
+ case 32000:
+ sample_freq = 0x03;
+ org_sample_freq = 0x0C;
+ break;
+ case 44100:
+ sample_freq = 0x00;
+ org_sample_freq = 0x0F;
+ break;
+ case 48000:
+ sample_freq = 0x02;
+ org_sample_freq = 0x0D;
+ break;
+ case 88200:
+ sample_freq = 0x08;
+ org_sample_freq = 0x07;
+ break;
+ case 96000:
+ sample_freq = 0x0A;
+ org_sample_freq = 0x05;
+ break;
+ case 176400:
+ sample_freq = 0x0C;
+ org_sample_freq = 0x03;
+ break;
+ case 192000:
+ sample_freq = 0x0E;
+ org_sample_freq = 0x01;
+ break;
+ default:
+ break;
+ }
+
+ hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n);
+ hdmi_enable_audio_clk(hdmi, true);
+
+ hdmi_writeb(hdmi, 0x1, HDMI_FC_AUDSCHNLS0);
+ hdmi_writeb(hdmi, hdmi->channels, HDMI_FC_AUDSCHNLS2);
+ hdmi_writeb(hdmi, 0x22, HDMI_FC_AUDSCHNLS3);
+ hdmi_writeb(hdmi, 0x22, HDMI_FC_AUDSCHNLS4);
+ hdmi_writeb(hdmi, 0x11, HDMI_FC_AUDSCHNLS5);
+ hdmi_writeb(hdmi, 0x11, HDMI_FC_AUDSCHNLS6);
+ hdmi_writeb(hdmi, (0x3 << 4) | sample_freq, HDMI_FC_AUDSCHNLS7);
+ hdmi_writeb(hdmi, (org_sample_freq << 4) | 0xb, HDMI_FC_AUDSCHNLS8);
+
+ hdmi_writeb(hdmi, ch_mask, HDMI_GP_CONF1);
+ hdmi_writeb(hdmi, 0x02, HDMI_GP_CONF2);
+ hdmi_writeb(hdmi, 0x01, HDMI_GP_CONF0);
+
+ hdmi_modb(hdmi, 0x3, 0x3, HDMI_FC_DATAUTO3);
+
+ /* hbr */
+ if (hdmi->sample_rate == 192000 && hdmi->channels == 8 &&
+ hdmi->sample_width == 32 && hdmi->sample_non_pcm)
+ hdmi_modb(hdmi, 0x01, 0x01, HDMI_GP_CONF2);
+
+ if (pdata->enable_audio)
+ pdata->enable_audio(hdmi,
+ hdmi->channels,
+ hdmi->sample_width,
+ hdmi->sample_rate,
+ hdmi->sample_non_pcm);
+}
+
+static void dw_hdmi_gp_audio_disable(struct dw_hdmi *hdmi)
+{
+ const struct dw_hdmi_plat_data *pdata = hdmi->plat_data;
+
+ hdmi_set_cts_n(hdmi, hdmi->audio_cts, 0);
+
+ hdmi_modb(hdmi, 0, 0x3, HDMI_FC_DATAUTO3);
+ if (pdata->disable_audio)
+ pdata->disable_audio(hdmi);
+
+ hdmi_enable_audio_clk(hdmi, false);
+}
+
static void dw_hdmi_ahb_audio_enable(struct dw_hdmi *hdmi)
{
hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n);
@@ -1108,6 +1217,8 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi)
unsigned int output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_PP;
struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
u8 val, vp_conf;
+ u8 clear_gcp_auto = 0;
+
if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format) ||
hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format) ||
@@ -1117,6 +1228,7 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi)
case 8:
color_depth = 4;
output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
+ clear_gcp_auto = 1;
break;
case 10:
color_depth = 5;
@@ -1136,6 +1248,7 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi)
case 0:
case 8:
remap_size = HDMI_VP_REMAP_YCC422_16bit;
+ clear_gcp_auto = 1;
break;
case 10:
remap_size = HDMI_VP_REMAP_YCC422_20bit;
@@ -1160,6 +1273,19 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi)
HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK);
hdmi_writeb(hdmi, val, HDMI_VP_PR_CD);
+ /* HDMI1.4b specification section 6.5.3:
+ * Source shall only send GCPs with non-zero CD to sinks
+ * that indicate support for Deep Color.
+ * GCP only transmit CD and do not handle AVMUTE, PP norDefault_Phase (yet).
+ * Disable Auto GCP when 24-bit color for sinks that not support Deep Color.
+ */
+ val = hdmi_readb(hdmi, HDMI_FC_DATAUTO3);
+ if (clear_gcp_auto == 1)
+ val &= ~HDMI_FC_DATAUTO3_GCP_AUTO;
+ else
+ val |= HDMI_FC_DATAUTO3_GCP_AUTO;
+ hdmi_writeb(hdmi, val, HDMI_FC_DATAUTO3);
+
hdmi_modb(hdmi, HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE,
HDMI_VP_STUFF_PR_STUFFING_MASK, HDMI_VP_STUFF);
@@ -1357,13 +1483,21 @@ static void dw_hdmi_phy_sel_interface_control(struct dw_hdmi *hdmi, u8 enable)
HDMI_PHY_CONF0_SELDIPIF_MASK);
}
-void dw_hdmi_phy_reset(struct dw_hdmi *hdmi)
+void dw_hdmi_phy_gen1_reset(struct dw_hdmi *hdmi)
+{
+ /* PHY reset. The reset signal is active low on Gen1 PHYs. */
+ hdmi_writeb(hdmi, 0, HDMI_MC_PHYRSTZ);
+ hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_PHYRSTZ, HDMI_MC_PHYRSTZ);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_phy_gen1_reset);
+
+void dw_hdmi_phy_gen2_reset(struct dw_hdmi *hdmi)
{
/* PHY reset. The reset signal is active high on Gen2 PHYs. */
hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_PHYRSTZ, HDMI_MC_PHYRSTZ);
hdmi_writeb(hdmi, 0, HDMI_MC_PHYRSTZ);
}
-EXPORT_SYMBOL_GPL(dw_hdmi_phy_reset);
+EXPORT_SYMBOL_GPL(dw_hdmi_phy_gen2_reset);
void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address)
{
@@ -1517,7 +1651,7 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi,
if (phy->has_svsret)
dw_hdmi_phy_enable_svsret(hdmi, 1);
- dw_hdmi_phy_reset(hdmi);
+ dw_hdmi_phy_gen2_reset(hdmi);
hdmi_writeb(hdmi, HDMI_MC_HEACPHY_RST_ASSERT, HDMI_MC_HEACPHY_RST);
@@ -2086,30 +2220,21 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
* then write one of the FC registers several times.
*
* The number of iterations matters and depends on the HDMI TX revision
- * (and possibly on the platform). So far i.MX6Q (v1.30a), i.MX6DL
- * (v1.31a) and multiple Allwinner SoCs (v1.32a) have been identified
- * as needing the workaround, with 4 iterations for v1.30a and 1
- * iteration for others.
- * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing
- * the workaround with a single iteration.
- * The Rockchip RK3288 SoC (v2.00a) and RK3328/RK3399 SoCs (v2.11a) have
- * been identified as needing the workaround with a single iteration.
+ * (and possibly on the platform).
+ * 4 iterations for i.MX6Q(v1.30a) and 1 iteration for others.
+ * i.MX6DL (v1.31a), Allwinner SoCs (v1.32a), Rockchip RK3288 SoC (v2.00a),
+ * Amlogic Meson GX SoCs (v2.01a), RK3328/RK3399 SoCs (v2.11a)
+ * and i.MX8MPlus (v2.13a) have been identified as needing the workaround
+ * with a single iteration.
*/
switch (hdmi->version) {
case 0x130a:
count = 4;
break;
- case 0x131a:
- case 0x132a:
- case 0x200a:
- case 0x201a:
- case 0x211a:
- case 0x212a:
+ default:
count = 1;
break;
- default:
- return;
}
/* TMDS software reset */
@@ -3242,6 +3367,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
hdmi->plat_data = plat_data;
hdmi->dev = dev;
hdmi->sample_rate = 48000;
+ hdmi->channels = 2;
hdmi->disabled = true;
hdmi->rxsense = true;
hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE);
@@ -3465,6 +3591,24 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
pdevinfo.size_data = sizeof(audio);
pdevinfo.dma_mask = DMA_BIT_MASK(32);
hdmi->audio = platform_device_register_full(&pdevinfo);
+ } else if (iores && config3 & HDMI_CONFIG3_GPAUD) {
+ struct dw_hdmi_audio_data audio;
+
+ audio.phys = iores->start;
+ audio.base = hdmi->regs;
+ audio.irq = irq;
+ audio.hdmi = hdmi;
+ audio.get_eld = hdmi_audio_get_eld;
+
+ hdmi->enable_audio = dw_hdmi_gp_audio_enable;
+ hdmi->disable_audio = dw_hdmi_gp_audio_disable;
+
+ pdevinfo.name = "dw-hdmi-gp-audio";
+ pdevinfo.id = PLATFORM_DEVID_NONE;
+ pdevinfo.data = &audio;
+ pdevinfo.size_data = sizeof(audio);
+ pdevinfo.dma_mask = DMA_BIT_MASK(32);
+ hdmi->audio = platform_device_register_full(&pdevinfo);
}
if (!plat_data->disable_cec && (config0 & HDMI_CONFIG0_CEC)) {
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
index 1999db05bc3b..af43a0414b78 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h
@@ -158,8 +158,17 @@
#define HDMI_FC_SPDDEVICEINF 0x1062
#define HDMI_FC_AUDSCONF 0x1063
#define HDMI_FC_AUDSSTAT 0x1064
-#define HDMI_FC_AUDSCHNLS7 0x106e
-#define HDMI_FC_AUDSCHNLS8 0x106f
+#define HDMI_FC_AUDSV 0x1065
+#define HDMI_FC_AUDSU 0x1066
+#define HDMI_FC_AUDSCHNLS0 0x1067
+#define HDMI_FC_AUDSCHNLS1 0x1068
+#define HDMI_FC_AUDSCHNLS2 0x1069
+#define HDMI_FC_AUDSCHNLS3 0x106A
+#define HDMI_FC_AUDSCHNLS4 0x106B
+#define HDMI_FC_AUDSCHNLS5 0x106C
+#define HDMI_FC_AUDSCHNLS6 0x106D
+#define HDMI_FC_AUDSCHNLS7 0x106E
+#define HDMI_FC_AUDSCHNLS8 0x106F
#define HDMI_FC_DATACH0FILL 0x1070
#define HDMI_FC_DATACH1FILL 0x1071
#define HDMI_FC_DATACH2FILL 0x1072
@@ -850,6 +859,9 @@ enum {
HDMI_FC_DATAUTO0_VSD_MASK = 0x08,
HDMI_FC_DATAUTO0_VSD_OFFSET = 3,
+/* FC_DATAUTO3 field values */
+ HDMI_FC_DATAUTO3_GCP_AUTO = 0x04,
+
/* PHY_CONF0 field values */
HDMI_PHY_CONF0_PDZ_MASK = 0x80,
HDMI_PHY_CONF0_PDZ_OFFSET = 7,
diff --git a/drivers/gpu/drm/dp/drm_dp.c b/drivers/gpu/drm/dp/drm_dp.c
index 580016a1b9eb..2a1f5ff6633c 100644
--- a/drivers/gpu/drm/dp/drm_dp.c
+++ b/drivers/gpu/drm/dp/drm_dp.c
@@ -528,6 +528,31 @@ unlock:
}
/**
+ * drm_dp_dpcd_probe() - probe a given DPCD address with a 1-byte read access
+ * @aux: DisplayPort AUX channel (SST)
+ * @offset: address of the register to probe
+ *
+ * Probe the provided DPCD address by reading 1 byte from it. The function can
+ * be used to trigger some side-effect the read access has, like waking up the
+ * sink, without the need for the read-out value.
+ *
+ * Returns 0 if the read access suceeded, or a negative error code on failure.
+ */
+int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset)
+{
+ u8 buffer;
+ int ret;
+
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, &buffer, 1);
+ WARN_ON(ret == 0);
+
+ drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, &buffer, ret);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL(drm_dp_dpcd_probe);
+
+/**
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
* @aux: DisplayPort AUX channel (SST or MST)
* @offset: address of the (first) register to read
@@ -559,10 +584,9 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
* monitor doesn't power down exactly after the throw away read.
*/
if (!aux->is_remote) {
- ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV,
- buffer, 1);
- if (ret != 1)
- goto out;
+ ret = drm_dp_dpcd_probe(aux, DP_DPCD_REV);
+ if (ret < 0)
+ return ret;
}
if (aux->is_remote)
@@ -571,7 +595,6 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset,
buffer, size);
-out:
drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 72f52f293249..11bb59399471 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -665,6 +665,9 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
if (start + size == end)
return __drm_buddy_alloc_range(mm, start, size, blocks);
+ if (!IS_ALIGNED(size, min_page_size))
+ return -EINVAL;
+
pages = size >> ilog2(mm->chunk_size);
order = fls(pages) - 1;
min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index bff917531f33..b632825654a9 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -297,15 +297,15 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
return false;
}
- saved_mode = crtc->mode;
- saved_hwmode = crtc->hwmode;
+ drm_mode_init(&saved_mode, &crtc->mode);
+ drm_mode_init(&saved_hwmode, &crtc->hwmode);
saved_x = crtc->x;
saved_y = crtc->y;
/* Update crtc values up front so the driver can rely on them for mode
* setting.
*/
- crtc->mode = *mode;
+ drm_mode_copy(&crtc->mode, mode);
crtc->x = x;
crtc->y = y;
@@ -341,7 +341,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
}
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
- crtc->hwmode = *adjusted_mode;
+ drm_mode_copy(&crtc->hwmode, adjusted_mode);
/* Prepare the encoders and CRTCs before setting the mode. */
drm_for_each_encoder(encoder, dev) {
@@ -411,8 +411,8 @@ done:
drm_mode_destroy(dev, adjusted_mode);
if (!ret) {
crtc->enabled = saved_enabled;
- crtc->mode = saved_mode;
- crtc->hwmode = saved_hwmode;
+ drm_mode_copy(&crtc->mode, &saved_mode);
+ drm_mode_copy(&crtc->hwmode, &saved_hwmode);
crtc->x = saved_x;
crtc->y = saved_y;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 04e818ecd662..324ce8467915 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1568,6 +1568,38 @@ static const struct drm_display_mode edid_4k_modes[] = {
/*** DDC fetch and block validation ***/
+static int edid_extension_block_count(const struct edid *edid)
+{
+ return edid->extensions;
+}
+
+static int edid_block_count(const struct edid *edid)
+{
+ return edid_extension_block_count(edid) + 1;
+}
+
+static int edid_size_by_blocks(int num_blocks)
+{
+ return num_blocks * EDID_LENGTH;
+}
+
+static int edid_size(const struct edid *edid)
+{
+ return edid_size_by_blocks(edid_block_count(edid));
+}
+
+static const void *edid_block_data(const struct edid *edid, int index)
+{
+ BUILD_BUG_ON(sizeof(*edid) != EDID_LENGTH);
+
+ return edid + index;
+}
+
+static const void *edid_extension_block_data(const struct edid *edid, int index)
+{
+ return edid_block_data(edid, index + 1);
+}
+
static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
@@ -1632,9 +1664,9 @@ static int edid_block_tag(const void *_block)
return block[0];
}
-static bool edid_is_zero(const void *edid, int length)
+static bool edid_block_is_zero(const void *edid)
{
- return !memchr_inv(edid, 0, length);
+ return !memchr_inv(edid, 0, EDID_LENGTH);
}
/**
@@ -1654,8 +1686,8 @@ bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2)
return false;
if (edid1) {
- edid1_len = EDID_LENGTH * (1 + edid1->extensions);
- edid2_len = EDID_LENGTH * (1 + edid2->extensions);
+ edid1_len = edid_size(edid1);
+ edid2_len = edid_size(edid2);
if (edid1_len != edid2_len)
return false;
@@ -1670,7 +1702,9 @@ EXPORT_SYMBOL(drm_edid_are_equal);
enum edid_block_status {
EDID_BLOCK_OK = 0,
+ EDID_BLOCK_READ_FAIL,
EDID_BLOCK_NULL,
+ EDID_BLOCK_ZERO,
EDID_BLOCK_HEADER_CORRUPT,
EDID_BLOCK_HEADER_REPAIR,
EDID_BLOCK_HEADER_FIXED,
@@ -1689,15 +1723,23 @@ static enum edid_block_status edid_block_check(const void *_block,
if (is_base_block) {
int score = drm_edid_header_is_valid(block);
- if (score < clamp(edid_fixup, 0, 8))
- return EDID_BLOCK_HEADER_CORRUPT;
+ if (score < clamp(edid_fixup, 0, 8)) {
+ if (edid_block_is_zero(block))
+ return EDID_BLOCK_ZERO;
+ else
+ return EDID_BLOCK_HEADER_CORRUPT;
+ }
if (score < 8)
return EDID_BLOCK_HEADER_REPAIR;
}
- if (edid_block_compute_checksum(block) != edid_block_get_checksum(block))
- return EDID_BLOCK_CHECKSUM;
+ if (edid_block_compute_checksum(block) != edid_block_get_checksum(block)) {
+ if (edid_block_is_zero(block))
+ return EDID_BLOCK_ZERO;
+ else
+ return EDID_BLOCK_CHECKSUM;
+ }
if (is_base_block) {
if (block->version != 1)
@@ -1720,6 +1762,70 @@ static bool edid_block_valid(const void *block, bool base)
edid_block_tag(block));
}
+static void edid_block_status_print(enum edid_block_status status,
+ const struct edid *block,
+ int block_num)
+{
+ switch (status) {
+ case EDID_BLOCK_OK:
+ break;
+ case EDID_BLOCK_READ_FAIL:
+ pr_debug("EDID block %d read failed\n", block_num);
+ break;
+ case EDID_BLOCK_NULL:
+ pr_debug("EDID block %d pointer is NULL\n", block_num);
+ break;
+ case EDID_BLOCK_ZERO:
+ pr_notice("EDID block %d is all zeroes\n", block_num);
+ break;
+ case EDID_BLOCK_HEADER_CORRUPT:
+ pr_notice("EDID has corrupt header\n");
+ break;
+ case EDID_BLOCK_HEADER_REPAIR:
+ pr_debug("EDID corrupt header needs repair\n");
+ break;
+ case EDID_BLOCK_HEADER_FIXED:
+ pr_debug("EDID corrupt header fixed\n");
+ break;
+ case EDID_BLOCK_CHECKSUM:
+ if (edid_block_status_valid(status, edid_block_tag(block))) {
+ pr_debug("EDID block %d (tag 0x%02x) checksum is invalid, remainder is %d, ignoring\n",
+ block_num, edid_block_tag(block),
+ edid_block_compute_checksum(block));
+ } else {
+ pr_notice("EDID block %d (tag 0x%02x) checksum is invalid, remainder is %d\n",
+ block_num, edid_block_tag(block),
+ edid_block_compute_checksum(block));
+ }
+ break;
+ case EDID_BLOCK_VERSION:
+ pr_notice("EDID has major version %d, instead of 1\n",
+ block->version);
+ break;
+ default:
+ WARN(1, "EDID block %d unknown edid block status code %d\n",
+ block_num, status);
+ break;
+ }
+}
+
+static void edid_block_dump(const char *level, const void *block, int block_num)
+{
+ enum edid_block_status status;
+ char prefix[20];
+
+ status = edid_block_check(block, block_num == 0);
+ if (status == EDID_BLOCK_ZERO)
+ sprintf(prefix, "\t[%02x] ZERO ", block_num);
+ else if (!edid_block_status_valid(status, edid_block_tag(block)))
+ sprintf(prefix, "\t[%02x] BAD ", block_num);
+ else
+ sprintf(prefix, "\t[%02x] GOOD ", block_num);
+
+ print_hex_dump(level, prefix, DUMP_PREFIX_NONE, 16, 1,
+ block, EDID_LENGTH, false);
+}
+
/**
* drm_edid_block_valid - Sanity check the EDID block (base or extension)
* @raw_edid: pointer to raw EDID block
@@ -1766,33 +1872,14 @@ bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid,
*edid_corrupt = true;
}
+ edid_block_status_print(status, block, block_num);
+
/* Determine whether we can use this block with this status. */
valid = edid_block_status_valid(status, edid_block_tag(block));
- /* Some fairly random status printouts. */
- if (status == EDID_BLOCK_CHECKSUM) {
- if (valid) {
- DRM_DEBUG("EDID block checksum is invalid, remainder is %d\n",
- edid_block_compute_checksum(block));
- DRM_DEBUG("Assuming a KVM switch modified the block but left the original checksum\n");
- } else if (print_bad_edid) {
- DRM_NOTE("EDID block checksum is invalid, remainder is %d\n",
- edid_block_compute_checksum(block));
- }
- } else if (status == EDID_BLOCK_VERSION) {
- DRM_NOTE("EDID has major version %d, instead of 1\n",
- block->version);
- }
-
- if (!valid && print_bad_edid) {
- if (edid_is_zero(block, EDID_LENGTH)) {
- pr_notice("EDID block is all zeroes\n");
- } else {
- pr_notice("Raw EDID:\n");
- print_hex_dump(KERN_NOTICE,
- " \t", DUMP_PREFIX_NONE, 16, 1,
- block, EDID_LENGTH, false);
- }
+ if (!valid && print_bad_edid && status != EDID_BLOCK_ZERO) {
+ pr_notice("Raw EDID:\n");
+ edid_block_dump(KERN_NOTICE, block, block_num);
}
return valid;
@@ -1810,14 +1897,16 @@ EXPORT_SYMBOL(drm_edid_block_valid);
bool drm_edid_is_valid(struct edid *edid)
{
int i;
- u8 *raw = (u8 *)edid;
if (!edid)
return false;
- for (i = 0; i <= edid->extensions; i++)
- if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true, NULL))
+ for (i = 0; i < edid_block_count(edid); i++) {
+ void *block = (void *)edid_block_data(edid, i);
+
+ if (!drm_edid_block_valid(block, i, true, NULL))
return false;
+ }
return true;
}
@@ -1830,13 +1919,13 @@ static struct edid *edid_filter_invalid_blocks(const struct edid *edid,
int valid_extensions = edid->extensions - invalid_blocks;
int i;
- new = kmalloc_array(valid_extensions + 1, EDID_LENGTH, GFP_KERNEL);
+ new = kmalloc(edid_size_by_blocks(valid_extensions + 1), GFP_KERNEL);
if (!new)
goto out;
dest_block = new;
- for (i = 0; i <= edid->extensions; i++) {
- const void *block = edid + i;
+ for (i = 0; i < edid_block_count(edid); i++) {
+ const void *block = edid_block_data(edid, i);
if (edid_block_valid(block, i == 0))
memcpy(dest_block++, block, EDID_LENGTH);
@@ -1916,7 +2005,7 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
}
static void connector_bad_edid(struct drm_connector *connector,
- u8 *edid, int num_blocks)
+ const struct edid *edid, int num_blocks)
{
int i;
u8 last_block;
@@ -1927,32 +2016,19 @@ static void connector_bad_edid(struct drm_connector *connector,
* of 0x7e in the EDID of the _index_ of the last block in the
* combined chunk of memory.
*/
- last_block = edid[0x7e];
+ last_block = edid->extensions;
/* Calculate real checksum for the last edid extension block data */
if (last_block < num_blocks)
connector->real_edid_checksum =
- edid_block_compute_checksum(edid + last_block * EDID_LENGTH);
+ edid_block_compute_checksum(edid + last_block);
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return;
drm_dbg_kms(connector->dev, "%s: EDID is invalid:\n", connector->name);
- for (i = 0; i < num_blocks; i++) {
- u8 *block = edid + i * EDID_LENGTH;
- char prefix[20];
-
- if (edid_is_zero(block, EDID_LENGTH))
- sprintf(prefix, "\t[%02x] ZERO ", i);
- else if (!drm_edid_block_valid(block, i, false, NULL))
- sprintf(prefix, "\t[%02x] BAD ", i);
- else
- sprintf(prefix, "\t[%02x] GOOD ", i);
-
- print_hex_dump(KERN_DEBUG,
- prefix, DUMP_PREFIX_NONE, 16, 1,
- block, EDID_LENGTH, false);
- }
+ for (i = 0; i < num_blocks; i++)
+ edid_block_dump(KERN_DEBUG, edid + i, i);
}
/* Get override or firmware EDID */
@@ -1999,43 +2075,39 @@ int drm_add_override_edid_modes(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_add_override_edid_modes);
-static struct edid *drm_do_get_edid_base_block(struct drm_connector *connector,
- int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
- size_t len),
- void *data)
+typedef int read_block_fn(void *context, u8 *buf, unsigned int block, size_t len);
+
+static enum edid_block_status edid_block_read(void *block, unsigned int block_num,
+ read_block_fn read_block,
+ void *context)
{
- int *null_edid_counter = connector ? &connector->null_edid_counter : NULL;
- bool *edid_corrupt = connector ? &connector->edid_corrupt : NULL;
- void *edid;
+ enum edid_block_status status;
+ bool is_base_block = block_num == 0;
int try;
- edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
- if (edid == NULL)
- return NULL;
-
- /* base block fetch */
for (try = 0; try < 4; try++) {
- if (get_edid_block(data, edid, 0, EDID_LENGTH))
- goto out;
- if (drm_edid_block_valid(edid, 0, false, edid_corrupt))
- break;
- if (try == 0 && edid_is_zero(edid, EDID_LENGTH)) {
- if (null_edid_counter)
- (*null_edid_counter)++;
- goto carp;
+ if (read_block(context, block, block_num, EDID_LENGTH))
+ return EDID_BLOCK_READ_FAIL;
+
+ status = edid_block_check(block, is_base_block);
+ if (status == EDID_BLOCK_HEADER_REPAIR) {
+ edid_header_fix(block);
+
+ /* Retry with fixed header, update status if that worked. */
+ status = edid_block_check(block, is_base_block);
+ if (status == EDID_BLOCK_OK)
+ status = EDID_BLOCK_HEADER_FIXED;
}
- }
- if (try == 4)
- goto carp;
- return edid;
+ if (edid_block_status_valid(status, edid_block_tag(block)))
+ break;
-carp:
- if (connector)
- connector_bad_edid(connector, edid, 1);
-out:
- kfree(edid);
- return NULL;
+ /* Fail early for unrepairable base block all zeros. */
+ if (try == 0 && is_base_block && status == EDID_BLOCK_ZERO)
+ break;
+ }
+
+ return status;
}
/**
@@ -2059,53 +2131,74 @@ out:
* Return: Pointer to valid EDID or NULL if we couldn't find any.
*/
struct edid *drm_do_get_edid(struct drm_connector *connector,
- int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
- size_t len),
- void *data)
+ read_block_fn read_block,
+ void *context)
{
- int j, invalid_blocks = 0;
- struct edid *edid, *new, *override;
+ enum edid_block_status status;
+ int i, invalid_blocks = 0;
+ struct edid *edid, *new;
- override = drm_get_override_edid(connector);
- if (override)
- return override;
+ edid = drm_get_override_edid(connector);
+ if (edid)
+ goto ok;
- edid = drm_do_get_edid_base_block(connector, get_edid_block, data);
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (!edid)
return NULL;
- if (edid->extensions == 0)
- return edid;
+ status = edid_block_read(edid, 0, read_block, context);
+
+ edid_block_status_print(status, edid, 0);
+
+ if (status == EDID_BLOCK_READ_FAIL)
+ goto fail;
+
+ /* FIXME: Clarify what a corrupt EDID actually means. */
+ if (status == EDID_BLOCK_OK || status == EDID_BLOCK_VERSION)
+ connector->edid_corrupt = false;
+ else
+ connector->edid_corrupt = true;
+
+ if (!edid_block_status_valid(status, edid_block_tag(edid))) {
+ if (status == EDID_BLOCK_ZERO)
+ connector->null_edid_counter++;
+
+ connector_bad_edid(connector, edid, 1);
+ goto fail;
+ }
- new = krealloc(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ if (!edid_extension_block_count(edid))
+ goto ok;
+
+ new = krealloc(edid, edid_size(edid), GFP_KERNEL);
if (!new)
- goto out;
+ goto fail;
edid = new;
- for (j = 1; j <= edid->extensions; j++) {
- void *block = edid + j;
- int try;
+ for (i = 1; i < edid_block_count(edid); i++) {
+ void *block = (void *)edid_block_data(edid, i);
- for (try = 0; try < 4; try++) {
- if (get_edid_block(data, block, j, EDID_LENGTH))
- goto out;
- if (drm_edid_block_valid(block, j, false, NULL))
- break;
- }
+ status = edid_block_read(block, i, read_block, context);
+
+ edid_block_status_print(status, block, i);
- if (try == 4)
+ if (!edid_block_status_valid(status, edid_block_tag(block))) {
+ if (status == EDID_BLOCK_READ_FAIL)
+ goto fail;
invalid_blocks++;
+ }
}
if (invalid_blocks) {
- connector_bad_edid(connector, (u8 *)edid, edid->extensions + 1);
+ connector_bad_edid(connector, edid, edid_block_count(edid));
edid = edid_filter_invalid_blocks(edid, invalid_blocks);
}
+ok:
return edid;
-out:
+fail:
kfree(edid);
return NULL;
}
@@ -2199,20 +2292,27 @@ static u32 edid_extract_panel_id(const struct edid *edid)
u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
{
- const struct edid *edid;
- u32 panel_id;
-
- edid = drm_do_get_edid_base_block(NULL, drm_do_probe_ddc_edid, adapter);
+ enum edid_block_status status;
+ void *base_block;
+ u32 panel_id = 0;
/*
* There are no manufacturer IDs of 0, so if there is a problem reading
* the EDID then we'll just return 0.
*/
- if (!edid)
+
+ base_block = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (!base_block)
return 0;
- panel_id = edid_extract_panel_id(edid);
- kfree(edid);
+ status = edid_block_read(base_block, 0, drm_do_probe_ddc_edid, adapter);
+
+ edid_block_status_print(status, base_block, 0);
+
+ if (edid_block_status_valid(status, edid_block_tag(base_block)))
+ panel_id = edid_extract_panel_id(base_block);
+
+ kfree(base_block);
return panel_id;
}
@@ -2255,7 +2355,7 @@ EXPORT_SYMBOL(drm_get_edid_switcheroo);
*/
struct edid *drm_edid_duplicate(const struct edid *edid)
{
- return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+ return kmemdup(edid, edid_size(edid), GFP_KERNEL);
}
EXPORT_SYMBOL(drm_edid_duplicate);
@@ -2439,8 +2539,8 @@ drm_for_each_detailed_block(const struct edid *edid, detailed_cb *cb, void *clos
for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
cb(&(edid->detailed_timings[i]), closure);
- for (i = 1; i <= edid->extensions; i++) {
- const u8 *ext = (const u8 *)edid + (i * EDID_LENGTH);
+ for (i = 0; i < edid_extension_block_count(edid); i++) {
+ const u8 *ext = edid_extension_block_data(edid, i);
switch (*ext) {
case CEA_EXT:
@@ -3410,17 +3510,17 @@ const u8 *drm_find_edid_extension(const struct edid *edid,
int i;
/* No EDID or EDID extensions */
- if (edid == NULL || edid->extensions == 0)
+ if (!edid || !edid_extension_block_count(edid))
return NULL;
/* Find CEA extension */
- for (i = *ext_index; i < edid->extensions; i++) {
- edid_ext = (const u8 *)edid + EDID_LENGTH * (i + 1);
+ for (i = *ext_index; i < edid_extension_block_count(edid); i++) {
+ edid_ext = edid_extension_block_data(edid, i);
if (edid_block_tag(edid_ext) == ext_id)
break;
}
- if (i >= edid->extensions)
+ if (i >= edid_extension_block_count(edid))
return NULL;
*ext_index = i + 1;
@@ -3551,9 +3651,11 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) {
- struct drm_display_mode cea_mode = *cea_mode_for_vic(vic);
+ struct drm_display_mode cea_mode;
unsigned int clock1, clock2;
+ drm_mode_init(&cea_mode, cea_mode_for_vic(vic));
+
/* Check both 60Hz and 59.94Hz */
clock1 = cea_mode.clock;
clock2 = cea_mode_alternate_clock(&cea_mode);
@@ -3590,9 +3692,11 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) {
- struct drm_display_mode cea_mode = *cea_mode_for_vic(vic);
+ struct drm_display_mode cea_mode;
unsigned int clock1, clock2;
+ drm_mode_init(&cea_mode, cea_mode_for_vic(vic));
+
/* Check both 60Hz and 59.94Hz */
clock1 = cea_mode.clock;
clock2 = cea_mode_alternate_clock(&cea_mode);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 133dfae06fab..eb0c2d041f13 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -771,7 +771,8 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
return -EINVAL;
}
- ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout);
+ ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
+ true, timeout);
if (ret == 0)
ret = -ETIME;
else if (ret > 0)
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index 9338ddb7edff..a6d89aed0bda 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -151,7 +151,7 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
return 0;
obj = drm_gem_fb_get_obj(state->fb, 0);
- ret = dma_resv_get_singleton(obj->resv, false, &fence);
+ ret = dma_resv_get_singleton(obj->resv, DMA_RESV_USAGE_WRITE, &fence);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 6e7e10f16ec0..e699950cc34d 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -837,7 +837,9 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
void drm_mode_get_hv_timing(const struct drm_display_mode *mode,
int *hdisplay, int *vdisplay)
{
- struct drm_display_mode adjusted = *mode;
+ struct drm_display_mode adjusted;
+
+ drm_mode_init(&adjusted, mode);
drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
*hdisplay = adjusted.crtc_hdisplay;
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index b701cda86d0c..2ff31717a3de 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -644,7 +644,7 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
vblank->linedur_ns = linedur_ns;
vblank->framedur_ns = framedur_ns;
- vblank->hwmode = *mode;
+ drm_mode_copy(&vblank->hwmode, mode);
drm_dbg_core(dev,
"crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index d5314aa28ff7..507172e2780b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -380,12 +380,14 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (op & ETNA_PREP_NOSYNC) {
- if (!dma_resv_test_signaled(obj->resv, write))
+ if (!dma_resv_test_signaled(obj->resv,
+ dma_resv_usage_rw(write)))
return -EBUSY;
} else {
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
- ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
+ ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
+ true, remain);
if (ret <= 0)
return ret == 0 ? -ETIMEDOUT : ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 53f7c78628a4..98bb5c9239de 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -202,14 +202,10 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
- if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(obj->resv,
- submit->out_fence);
- else
- dma_resv_add_shared_fence(obj->resv,
- submit->out_fence);
-
+ dma_resv_add_fence(obj->resv, submit->out_fence, write ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
submit_unlock_object(submit, i);
}
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 2aff54d505e2..1d8744f3e702 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -396,9 +396,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
drm_for_each_connector_iter(connector, &conn_iter) {
gma_encoder = gma_attached_encoder(connector);
- switch (gma_encoder->type) {
- case INTEL_OUTPUT_LVDS:
- case INTEL_OUTPUT_MIPI:
+ if (gma_encoder->type == INTEL_OUTPUT_LVDS ||
+ gma_encoder->type == INTEL_OUTPUT_MIPI) {
ret = gma_backlight_init(dev);
break;
}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index aa1e7f0b1fe4..a0e86d29bf54 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -103,40 +103,30 @@ config DRM_I915_USERPTR
If in doubt, say "Y".
config DRM_I915_GVT
- bool "Enable Intel GVT-g graphics virtualization host support"
+ bool
+
+config DRM_I915_GVT_KVMGT
+ tristate "Enable KVM host support Intel GVT-g graphics virtualization"
depends on DRM_I915
depends on X86
depends on 64BIT
- default n
+ depends on KVM
+ depends on VFIO_MDEV
+ select DRM_I915_GVT
+ select KVM_EXTERNAL_WRITE_TRACKING
+
help
Choose this option if you want to enable Intel GVT-g graphics
virtualization technology host support with integrated graphics.
With GVT-g, it's possible to have one integrated graphics
- device shared by multiple VMs under different hypervisors.
-
- Note that at least one hypervisor like Xen or KVM is required for
- this driver to work, and it only supports newer device from
- Broadwell+. For further information and setup guide, you can
- visit: http://01.org/igvt-g.
+ device shared by multiple VMs under KVM.
- Now it's just a stub to support the modifications of i915 for
- GVT device model. It requires at least one MPT modules for Xen/KVM
- and other components of GVT device model to work. Use it under
- you own risk.
+ Note that this driver only supports newer device from Broadwell on.
+ For further information and setup guide, you can visit:
+ http://01.org/igvt-g.
If in doubt, say "N".
-config DRM_I915_GVT_KVMGT
- tristate "Enable KVM/VFIO support for Intel GVT-g"
- depends on DRM_I915_GVT
- depends on KVM
- depends on VFIO_MDEV
- select KVM_EXTERNAL_WRITE_TRACKING
- default n
- help
- Choose this option if you want to enable KVMGT support for
- Intel GVT-g.
-
config DRM_I915_PXP
bool "Enable Intel PXP support"
depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index cd0bf6806228..d2b18f03a33c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -223,6 +223,7 @@ i915-y += \
display/intel_cursor.o \
display/intel_display.o \
display/intel_display_power.o \
+ display/intel_display_power_map.o \
display/intel_display_power_well.o \
display/intel_dmc.o \
display/intel_dpio_phy.o \
@@ -331,13 +332,13 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
# virtual gpu code
i915-y += i915_vgpu.o
-ifeq ($(CONFIG_DRM_I915_GVT),y)
-i915-y += intel_gvt.o
+i915-$(CONFIG_DRM_I915_GVT) += \
+ intel_gvt.o \
+ intel_gvt_mmio_table.o
include $(src)/gvt/Makefile
-endif
obj-$(CONFIG_DRM_I915) += i915.o
-obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
+obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
# header test
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 16bb21ad898b..5a957acebfd6 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -13,6 +13,7 @@
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -1375,7 +1376,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DP;
- intel_encoder->power_domain = intel_port_to_power_domain(port);
+ intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->pipe_mask = BIT(PIPE_C);
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 8bfef08b7c43..5fbd2ae95869 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -10,6 +10,7 @@
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_fifo_underrun.h"
@@ -574,7 +575,7 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
intel_encoder->shutdown = intel_hdmi_encoder_shutdown;
intel_encoder->type = INTEL_OUTPUT_HDMI;
- intel_encoder->power_domain = intel_port_to_power_domain(port);
+ intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
intel_encoder->port = port;
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 38014e0cc9ad..861dcd2eb890 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -28,7 +28,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
if (IS_BROADWELL(i915)) {
drm_WARN_ON(&i915->drm,
- snb_pcode_write(i915, DISPLAY_IPS_CONTROL,
+ snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL,
IPS_ENABLE | IPS_PCODE_CONTROL));
/*
* Quoting Art Runyan: "its not safe to expect any particular
@@ -62,7 +62,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
if (IS_BROADWELL(i915)) {
drm_WARN_ON(&i915->drm,
- snb_pcode_write(i915, DISPLAY_IPS_CONTROL, 0));
+ snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 084cc51d1c41..d29211b9807c 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -399,8 +399,8 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
intel_dsi->io_wakeref[port] =
intel_display_power_get(dev_priv,
port == PORT_A ?
- POWER_DOMAIN_PORT_DDI_A_IO :
- POWER_DOMAIN_PORT_DDI_B_IO);
+ POWER_DOMAIN_PORT_DDI_IO_A :
+ POWER_DOMAIN_PORT_DDI_IO_B);
}
}
@@ -1425,8 +1425,8 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
intel_display_power_put(dev_priv,
port == PORT_A ?
- POWER_DOMAIN_PORT_DDI_A_IO :
- POWER_DOMAIN_PORT_DDI_B_IO,
+ POWER_DOMAIN_PORT_DDI_IO_A :
+ POWER_DOMAIN_PORT_DDI_IO_B,
wakeref);
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 3d87da283cfa..efe8591619e3 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -1047,7 +1047,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
if (ret < 0)
goto unpin_fb;
- dma_resv_iter_begin(&cursor, obj->base.resv, false);
+ dma_resv_iter_begin(&cursor, obj->base.resv,
+ DMA_RESV_USAGE_WRITE);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
add_rps_boost_after_vblank(new_plane_state->hw.crtc,
fence);
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 1c87bf66b092..f0f0dfce27ce 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -827,7 +827,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on pipe %c, %u bytes ELD\n",
connector->base.id, connector->name,
encoder->base.base.id, encoder->base.name,
- pipe, drm_eld_size(connector->eld));
+ pipe_name(pipe), drm_eld_size(connector->eld));
/* FIXME precompute the ELD in .compute_config() */
if (!connector->eld[0])
@@ -888,7 +888,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on pipe %c\n",
connector->base.id, connector->name,
- encoder->base.base.id, encoder->base.name, pipe);
+ encoder->base.base.id, encoder->base.name, pipe_name(pipe));
if (dev_priv->audio.funcs)
dev_priv->audio.funcs->audio_codec_disable(encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index bc195769dd07..f7f49a03a2e4 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -185,10 +185,14 @@ static const struct {
.min_size = sizeof(struct bdb_edp), },
{ .section_id = BDB_LVDS_OPTIONS,
.min_size = sizeof(struct bdb_lvds_options), },
+ /*
+ * BDB_LVDS_LFP_DATA depends on BDB_LVDS_LFP_DATA_PTRS,
+ * so keep the two ordered.
+ */
{ .section_id = BDB_LVDS_LFP_DATA_PTRS,
.min_size = sizeof(struct bdb_lvds_lfp_data_ptrs), },
{ .section_id = BDB_LVDS_LFP_DATA,
- .min_size = sizeof(struct bdb_lvds_lfp_data), },
+ .min_size = 0, /* special case */ },
{ .section_id = BDB_LVDS_BACKLIGHT,
.min_size = sizeof(struct bdb_lfp_backlight_data), },
{ .section_id = BDB_LFP_POWER,
@@ -203,6 +207,23 @@ static const struct {
.min_size = sizeof(struct bdb_generic_dtd), },
};
+static size_t lfp_data_min_size(struct drm_i915_private *i915)
+{
+ const struct bdb_lvds_lfp_data_ptrs *ptrs;
+ size_t size;
+
+ ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ if (!ptrs)
+ return 0;
+
+ size = sizeof(struct bdb_lvds_lfp_data);
+ if (ptrs->panel_name.table_size)
+ size = max(size, ptrs->panel_name.offset +
+ sizeof(struct bdb_lvds_lfp_data_tail));
+
+ return size;
+}
+
static bool validate_lfp_data_ptrs(const void *bdb,
const struct bdb_lvds_lfp_data_ptrs *ptrs)
{
@@ -310,16 +331,144 @@ static bool fixup_lfp_data_ptrs(const void *bdb, void *ptrs_block)
return validate_lfp_data_ptrs(bdb, ptrs);
}
+static const void *find_fp_timing_terminator(const u8 *data, int size)
+{
+ int i;
+
+ for (i = 0; i < size - 1; i++) {
+ if (data[i] == 0xff && data[i+1] == 0xff)
+ return &data[i];
+ }
+
+ return NULL;
+}
+
+static int make_lfp_data_ptr(struct lvds_lfp_data_ptr_table *table,
+ int table_size, int total_size)
+{
+ if (total_size < table_size)
+ return total_size;
+
+ table->table_size = table_size;
+ table->offset = total_size - table_size;
+
+ return total_size - table_size;
+}
+
+static void next_lfp_data_ptr(struct lvds_lfp_data_ptr_table *next,
+ const struct lvds_lfp_data_ptr_table *prev,
+ int size)
+{
+ next->table_size = prev->table_size;
+ next->offset = prev->offset + size;
+}
+
+static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
+ const void *bdb)
+{
+ int i, size, table_size, block_size, offset;
+ const void *t0, *t1, *block;
+ struct bdb_lvds_lfp_data_ptrs *ptrs;
+ void *ptrs_block;
+
+ block = find_raw_section(bdb, BDB_LVDS_LFP_DATA);
+ if (!block)
+ return NULL;
+
+ drm_dbg_kms(&i915->drm, "Generating LFP data table pointers\n");
+
+ block_size = get_blocksize(block);
+
+ size = block_size;
+ t0 = find_fp_timing_terminator(block, size);
+ if (!t0)
+ return NULL;
+
+ size -= t0 - block - 2;
+ t1 = find_fp_timing_terminator(t0 + 2, size);
+ if (!t1)
+ return NULL;
+
+ size = t1 - t0;
+ if (size * 16 > block_size)
+ return NULL;
+
+ ptrs_block = kzalloc(sizeof(*ptrs) + 3, GFP_KERNEL);
+ if (!ptrs_block)
+ return NULL;
+
+ *(u8 *)(ptrs_block + 0) = BDB_LVDS_LFP_DATA_PTRS;
+ *(u16 *)(ptrs_block + 1) = sizeof(*ptrs);
+ ptrs = ptrs_block + 3;
+
+ table_size = sizeof(struct lvds_pnp_id);
+ size = make_lfp_data_ptr(&ptrs->ptr[0].panel_pnp_id, table_size, size);
+
+ table_size = sizeof(struct lvds_dvo_timing);
+ size = make_lfp_data_ptr(&ptrs->ptr[0].dvo_timing, table_size, size);
+
+ table_size = t0 - block + 2;
+ size = make_lfp_data_ptr(&ptrs->ptr[0].fp_timing, table_size, size);
+
+ if (ptrs->ptr[0].fp_timing.table_size)
+ ptrs->lvds_entries++;
+ if (ptrs->ptr[0].dvo_timing.table_size)
+ ptrs->lvds_entries++;
+ if (ptrs->ptr[0].panel_pnp_id.table_size)
+ ptrs->lvds_entries++;
+
+ if (size != 0 || ptrs->lvds_entries != 3) {
+ kfree(ptrs);
+ return NULL;
+ }
+
+ size = t1 - t0;
+ for (i = 1; i < 16; i++) {
+ next_lfp_data_ptr(&ptrs->ptr[i].fp_timing, &ptrs->ptr[i-1].fp_timing, size);
+ next_lfp_data_ptr(&ptrs->ptr[i].dvo_timing, &ptrs->ptr[i-1].dvo_timing, size);
+ next_lfp_data_ptr(&ptrs->ptr[i].panel_pnp_id, &ptrs->ptr[i-1].panel_pnp_id, size);
+ }
+
+ size = t1 - t0;
+ table_size = sizeof(struct lvds_lfp_panel_name);
+
+ if (16 * (size + table_size) <= block_size) {
+ ptrs->panel_name.table_size = table_size;
+ ptrs->panel_name.offset = size * 16;
+ }
+
+ offset = block - bdb;
+
+ for (i = 0; i < 16; i++) {
+ ptrs->ptr[i].fp_timing.offset += offset;
+ ptrs->ptr[i].dvo_timing.offset += offset;
+ ptrs->ptr[i].panel_pnp_id.offset += offset;
+ }
+
+ if (ptrs->panel_name.table_size)
+ ptrs->panel_name.offset += offset;
+
+ return ptrs_block;
+}
+
static void
init_bdb_block(struct drm_i915_private *i915,
const void *bdb, enum bdb_block_id section_id,
size_t min_size)
{
struct bdb_block_entry *entry;
+ void *temp_block = NULL;
const void *block;
size_t block_size;
block = find_raw_section(bdb, section_id);
+
+ /* Modern VBTs lack the LFP data table pointers block, make one up */
+ if (!block && section_id == BDB_LVDS_LFP_DATA_PTRS) {
+ temp_block = generate_lfp_data_ptrs(i915, bdb);
+ if (temp_block)
+ block = temp_block + 3;
+ }
if (!block)
return;
@@ -330,12 +479,16 @@ init_bdb_block(struct drm_i915_private *i915,
entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3),
GFP_KERNEL);
- if (!entry)
+ if (!entry) {
+ kfree(temp_block);
return;
+ }
entry->section_id = section_id;
memcpy(entry->data, block - 3, block_size + 3);
+ kfree(temp_block);
+
drm_dbg_kms(&i915->drm, "Found BDB block %d (size %zu, min size %zu)\n",
section_id, block_size, min_size);
@@ -358,6 +511,9 @@ static void init_bdb_blocks(struct drm_i915_private *i915,
enum bdb_block_id section_id = bdb_blocks[i].section_id;
size_t min_size = bdb_blocks[i].min_size;
+ if (section_id == BDB_LVDS_LFP_DATA)
+ min_size = lfp_data_min_size(i915);
+
init_bdb_block(i915, bdb, section_id, min_size);
}
}
@@ -428,6 +584,94 @@ get_lvds_fp_timing(const struct bdb_lvds_lfp_data *data,
return (const void *)data + ptrs->ptr[index].fp_timing.offset;
}
+static const struct bdb_lvds_lfp_data_tail *
+get_lfp_data_tail(const struct bdb_lvds_lfp_data *data,
+ const struct bdb_lvds_lfp_data_ptrs *ptrs)
+{
+ if (ptrs->panel_name.table_size)
+ return (const void *)data + ptrs->panel_name.offset;
+ else
+ return NULL;
+}
+
+static int opregion_get_panel_type(struct drm_i915_private *i915)
+{
+ return intel_opregion_get_panel_type(i915);
+}
+
+static int vbt_get_panel_type(struct drm_i915_private *i915)
+{
+ const struct bdb_lvds_options *lvds_options;
+
+ lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
+ if (!lvds_options)
+ return -1;
+
+ if (lvds_options->panel_type > 0xf) {
+ drm_dbg_kms(&i915->drm, "Invalid VBT panel type 0x%x\n",
+ lvds_options->panel_type);
+ return -1;
+ }
+
+ return lvds_options->panel_type;
+}
+
+static int fallback_get_panel_type(struct drm_i915_private *i915)
+{
+ return 0;
+}
+
+enum panel_type {
+ PANEL_TYPE_OPREGION,
+ PANEL_TYPE_VBT,
+ PANEL_TYPE_FALLBACK,
+};
+
+static int get_panel_type(struct drm_i915_private *i915)
+{
+ struct {
+ const char *name;
+ int (*get_panel_type)(struct drm_i915_private *i915);
+ int panel_type;
+ } panel_types[] = {
+ [PANEL_TYPE_OPREGION] = {
+ .name = "OpRegion",
+ .get_panel_type = opregion_get_panel_type,
+ },
+ [PANEL_TYPE_VBT] = {
+ .name = "VBT",
+ .get_panel_type = vbt_get_panel_type,
+ },
+ [PANEL_TYPE_FALLBACK] = {
+ .name = "fallback",
+ .get_panel_type = fallback_get_panel_type,
+ },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(panel_types); i++) {
+ panel_types[i].panel_type = panel_types[i].get_panel_type(i915);
+
+ drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf);
+
+ if (panel_types[i].panel_type >= 0)
+ drm_dbg_kms(&i915->drm, "Panel type (%s): %d\n",
+ panel_types[i].name, panel_types[i].panel_type);
+ }
+
+ if (panel_types[PANEL_TYPE_OPREGION].panel_type >= 0)
+ i = PANEL_TYPE_OPREGION;
+ else if (panel_types[PANEL_TYPE_VBT].panel_type >= 0)
+ i = PANEL_TYPE_VBT;
+ else
+ i = PANEL_TYPE_FALLBACK;
+
+ drm_dbg_kms(&i915->drm, "Selected panel type (%s): %d\n",
+ panel_types[i].name, panel_types[i].panel_type);
+
+ return panel_types[i].panel_type;
+}
+
/* Parse general panel options */
static void
parse_panel_options(struct drm_i915_private *i915)
@@ -435,7 +679,6 @@ parse_panel_options(struct drm_i915_private *i915)
const struct bdb_lvds_options *lvds_options;
int panel_type;
int drrs_mode;
- int ret;
lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
if (!lvds_options)
@@ -443,23 +686,7 @@ parse_panel_options(struct drm_i915_private *i915)
i915->vbt.lvds_dither = lvds_options->pixel_dither;
- ret = intel_opregion_get_panel_type(i915);
- if (ret >= 0) {
- drm_WARN_ON(&i915->drm, ret > 0xf);
- panel_type = ret;
- drm_dbg_kms(&i915->drm, "Panel type: %d (OpRegion)\n",
- panel_type);
- } else {
- if (lvds_options->panel_type > 0xf) {
- drm_dbg_kms(&i915->drm,
- "Invalid VBT panel type 0x%x\n",
- lvds_options->panel_type);
- return;
- }
- panel_type = lvds_options->panel_type;
- drm_dbg_kms(&i915->drm, "Panel type: %d (VBT)\n",
- panel_type);
- }
+ panel_type = get_panel_type(i915);
i915->vbt.panel_type = panel_type;
@@ -488,25 +715,16 @@ parse_panel_options(struct drm_i915_private *i915)
}
}
-/* Try to find integrated panel timing data */
static void
-parse_lfp_panel_dtd(struct drm_i915_private *i915)
+parse_lfp_panel_dtd(struct drm_i915_private *i915,
+ const struct bdb_lvds_lfp_data *lvds_lfp_data,
+ const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs)
{
- const struct bdb_lvds_lfp_data *lvds_lfp_data;
- const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
int panel_type = i915->vbt.panel_type;
- lvds_lfp_data = find_section(i915, BDB_LVDS_LFP_DATA);
- if (!lvds_lfp_data)
- return;
-
- lvds_lfp_data_ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
- if (!lvds_lfp_data_ptrs)
- return;
-
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
panel_type);
@@ -538,6 +756,38 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915)
}
static void
+parse_lfp_data(struct drm_i915_private *i915)
+{
+ const struct bdb_lvds_lfp_data *data;
+ const struct bdb_lvds_lfp_data_tail *tail;
+ const struct bdb_lvds_lfp_data_ptrs *ptrs;
+ int panel_type = i915->vbt.panel_type;
+
+ ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
+ if (!ptrs)
+ return;
+
+ data = find_section(i915, BDB_LVDS_LFP_DATA);
+ if (!data)
+ return;
+
+ if (!i915->vbt.lfp_lvds_vbt_mode)
+ parse_lfp_panel_dtd(i915, data, ptrs);
+
+ tail = get_lfp_data_tail(data, ptrs);
+ if (!tail)
+ return;
+
+ if (i915->vbt.version >= 188) {
+ i915->vbt.seamless_drrs_min_refresh_rate =
+ tail->seamless_drrs_min_refresh_rate[panel_type];
+ drm_dbg_kms(&i915->drm,
+ "Seamless DRRS min refresh rate: %d Hz\n",
+ i915->vbt.seamless_drrs_min_refresh_rate);
+ }
+}
+
+static void
parse_generic_dtd(struct drm_i915_private *i915)
{
const struct bdb_generic_dtd *generic_dtd;
@@ -545,6 +795,17 @@ parse_generic_dtd(struct drm_i915_private *i915)
struct drm_display_mode *panel_fixed_mode;
int num_dtd;
+ /*
+ * Older VBTs provided DTD information for internal displays through
+ * the "LFP panel tables" block (42). As of VBT revision 229 the
+ * DTD information should be provided via a newer "generic DTD"
+ * block (58). Just to be safe, we'll try the new generic DTD block
+ * first on VBT >= 229, but still fall back to trying the old LFP
+ * block if that fails.
+ */
+ if (i915->vbt.version < 229)
+ return;
+
generic_dtd = find_section(i915, BDB_GENERIC_DTD);
if (!generic_dtd)
return;
@@ -616,23 +877,6 @@ parse_generic_dtd(struct drm_i915_private *i915)
}
static void
-parse_panel_dtd(struct drm_i915_private *i915)
-{
- /*
- * Older VBTs provided provided DTD information for internal displays
- * through the "LFP panel DTD" block (42). As of VBT revision 229,
- * that block is now deprecated and DTD information should be provided
- * via a newer "generic DTD" block (58). Just to be safe, we'll
- * try the new generic DTD block first on VBT >= 229, but still fall
- * back to trying the old LFP block if that fails.
- */
- if (i915->vbt.version >= 229)
- parse_generic_dtd(i915);
- if (!i915->vbt.lfp_lvds_vbt_mode)
- parse_lfp_panel_dtd(i915);
-}
-
-static void
parse_lfp_backlight(struct drm_i915_private *i915)
{
const struct bdb_lfp_backlight_data *backlight_data;
@@ -2708,7 +2952,8 @@ void intel_bios_init(struct drm_i915_private *i915)
parse_general_features(i915);
parse_general_definitions(i915);
parse_panel_options(i915);
- parse_panel_dtd(i915);
+ parse_generic_dtd(i915);
+ parse_lfp_data(i915);
parse_lfp_backlight(i915);
parse_sdvo_panel_data(i915);
parse_driver_features(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 37bd7b17f3d0..79269d2c476b 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -78,7 +78,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
u16 dclk;
int ret;
- ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
&val, &val2);
if (ret)
@@ -104,7 +104,7 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
int ret;
int i;
- ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
if (ret)
return ret;
@@ -123,7 +123,7 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
int ret;
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
points_mask,
ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index b2017d8161b4..6e80162632dd 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -800,7 +800,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
"trying to change cdclk frequency with cdclk not enabled\n"))
return;
- ret = snb_pcode_write(dev_priv, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+ ret = snb_pcode_write(&dev_priv->uncore, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
if (ret) {
drm_err(&dev_priv->drm,
"failed to inform pcode about cdclk change\n");
@@ -828,7 +828,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n");
- snb_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ snb_pcode_write(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_config->voltage_level);
intel_de_write(dev_priv, CDCLK_FREQ,
@@ -1086,7 +1086,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
drm_WARN_ON_ONCE(&dev_priv->drm,
IS_SKYLAKE(dev_priv) && vco == 8640000);
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
@@ -1132,7 +1132,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
intel_de_posting_read(dev_priv, CDCLK_CTL);
/* inform PCU of the change */
- snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
cdclk_config->voltage_level);
intel_update_cdclk(dev_priv);
@@ -1702,7 +1702,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
/* Inform power controller of upcoming frequency change. */
if (DISPLAY_VER(dev_priv) >= 11)
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
@@ -1711,7 +1711,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* BSpec requires us to wait up to 150usec, but that leads to
* timeouts; the 2ms used here is based on experiment.
*/
- ret = snb_pcode_write_timeout(dev_priv,
+ ret = snb_pcode_write_timeout(&dev_priv->uncore,
HSW_PCODE_DE_WRITE_FREQ_REQ,
0x80000000, 150, 2);
if (ret) {
@@ -1774,7 +1774,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
if (DISPLAY_VER(dev_priv) >= 11) {
- ret = snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
cdclk_config->voltage_level);
} else {
/*
@@ -1783,7 +1783,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* FIXME: Waiting for the request completion could be delayed
* until the next PCODE request based on BSpec.
*/
- ret = snb_pcode_write_timeout(dev_priv,
+ ret = snb_pcode_write_timeout(&dev_priv->uncore,
HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_config->voltage_level,
150, 2);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 027cc4cc38d9..d9f238edf547 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -40,6 +40,7 @@
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -4364,7 +4365,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->get_power_domains = intel_ddi_get_power_domains;
encoder->type = INTEL_OUTPUT_DDI;
- encoder->power_domain = intel_port_to_power_domain(port);
+ encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
encoder->port = port;
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
@@ -4492,8 +4493,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
}
drm_WARN_ON(&dev_priv->drm, port > PORT_I);
- dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
- port - PORT_A;
+ dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port);
if (init_dp) {
if (!intel_ddi_init_dp_connector(dig_port))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 94e64661b4fd..85f58dd3df72 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -1673,7 +1673,9 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
encoder->get_buf_trans = skl_get_buf_trans;
} else if (IS_BROADWELL(i915)) {
encoder->get_buf_trans = bdw_get_buf_trans;
- } else {
+ } else if (IS_HASWELL(i915)) {
encoder->get_buf_trans = hsw_get_buf_trans;
+ } else {
+ MISSING_CASE(INTEL_INFO(i915)->platform);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 29044cf58b87..9f105250f474 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -51,6 +51,7 @@
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
#include "display/intel_display_debugfs.h"
+#include "display/intel_display_power.h"
#include "display/intel_dp.h"
#include "display/intel_dp_mst.h"
#include "display/intel_dpll.h"
@@ -2157,153 +2158,82 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
return TC_PORT_1 + port - PORT_C;
}
-enum intel_display_power_domain intel_port_to_power_domain(enum port port)
-{
- switch (port) {
- case PORT_A:
- return POWER_DOMAIN_PORT_DDI_A_LANES;
- case PORT_B:
- return POWER_DOMAIN_PORT_DDI_B_LANES;
- case PORT_C:
- return POWER_DOMAIN_PORT_DDI_C_LANES;
- case PORT_D:
- return POWER_DOMAIN_PORT_DDI_D_LANES;
- case PORT_E:
- return POWER_DOMAIN_PORT_DDI_E_LANES;
- case PORT_F:
- return POWER_DOMAIN_PORT_DDI_F_LANES;
- case PORT_G:
- return POWER_DOMAIN_PORT_DDI_G_LANES;
- case PORT_H:
- return POWER_DOMAIN_PORT_DDI_H_LANES;
- case PORT_I:
- return POWER_DOMAIN_PORT_DDI_I_LANES;
- default:
- MISSING_CASE(port);
- return POWER_DOMAIN_PORT_OTHER;
- }
-}
-
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port)
{
- if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
- switch (dig_port->aux_ch) {
- case AUX_CH_C:
- return POWER_DOMAIN_AUX_C_TBT;
- case AUX_CH_D:
- return POWER_DOMAIN_AUX_D_TBT;
- case AUX_CH_E:
- return POWER_DOMAIN_AUX_E_TBT;
- case AUX_CH_F:
- return POWER_DOMAIN_AUX_F_TBT;
- case AUX_CH_G:
- return POWER_DOMAIN_AUX_G_TBT;
- case AUX_CH_H:
- return POWER_DOMAIN_AUX_H_TBT;
- case AUX_CH_I:
- return POWER_DOMAIN_AUX_I_TBT;
- default:
- MISSING_CASE(dig_port->aux_ch);
- return POWER_DOMAIN_AUX_C_TBT;
- }
- }
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
-}
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
-/*
- * Converts aux_ch to power_domain without caring about TBT ports for that use
- * intel_aux_power_domain()
- */
-enum intel_display_power_domain
-intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
-{
- switch (aux_ch) {
- case AUX_CH_A:
- return POWER_DOMAIN_AUX_A;
- case AUX_CH_B:
- return POWER_DOMAIN_AUX_B;
- case AUX_CH_C:
- return POWER_DOMAIN_AUX_C;
- case AUX_CH_D:
- return POWER_DOMAIN_AUX_D;
- case AUX_CH_E:
- return POWER_DOMAIN_AUX_E;
- case AUX_CH_F:
- return POWER_DOMAIN_AUX_F;
- case AUX_CH_G:
- return POWER_DOMAIN_AUX_G;
- case AUX_CH_H:
- return POWER_DOMAIN_AUX_H;
- case AUX_CH_I:
- return POWER_DOMAIN_AUX_I;
- default:
- MISSING_CASE(aux_ch);
- return POWER_DOMAIN_AUX_A;
- }
+ return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
}
-static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
+static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
+ struct intel_power_domain_mask *mask)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct drm_encoder *encoder;
enum pipe pipe = crtc->pipe;
- u64 mask;
+
+ bitmap_zero(mask->bits, POWER_DOMAIN_NUM);
if (!crtc_state->hw.active)
- return 0;
+ return;
- mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
- mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
+ set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
+ set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
if (crtc_state->pch_pfit.enabled ||
crtc_state->pch_pfit.force_thru)
- mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+ set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
drm_for_each_encoder_mask(encoder, &dev_priv->drm,
crtc_state->uapi.encoder_mask) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- mask |= BIT_ULL(intel_encoder->power_domain);
+ set_bit(intel_encoder->power_domain, mask->bits);
}
if (HAS_DDI(dev_priv) && crtc_state->has_audio)
- mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
+ set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
if (crtc_state->shared_dpll)
- mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
+ set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
if (crtc_state->dsc.compression_enable)
- mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
-
- return mask;
+ set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
}
-static u64
-modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
+static void
+modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
+ struct intel_power_domain_mask *old_domains)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain domain;
- u64 domains, new_domains, old_domains;
+ struct intel_power_domain_mask domains, new_domains;
- domains = get_crtc_power_domains(crtc_state);
+ get_crtc_power_domains(crtc_state, &domains);
- new_domains = domains & ~crtc->enabled_power_domains.mask;
- old_domains = crtc->enabled_power_domains.mask & ~domains;
+ bitmap_andnot(new_domains.bits,
+ domains.bits,
+ crtc->enabled_power_domains.mask.bits,
+ POWER_DOMAIN_NUM);
+ bitmap_andnot(old_domains->bits,
+ crtc->enabled_power_domains.mask.bits,
+ domains.bits,
+ POWER_DOMAIN_NUM);
- for_each_power_domain(domain, new_domains)
+ for_each_power_domain(domain, &new_domains)
intel_display_power_get_in_set(dev_priv,
&crtc->enabled_power_domains,
domain);
-
- return old_domains;
}
static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
- u64 domains)
+ struct intel_power_domain_mask *domains)
{
intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
&crtc->enabled_power_domains,
@@ -4974,9 +4904,12 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
mode_changed && !crtc_state->hw.active)
crtc_state->update_wm_post = true;
- if (mode_changed && crtc_state->hw.enable &&
- !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
- ret = intel_dpll_crtc_compute_clock(crtc_state);
+ if (mode_changed) {
+ ret = intel_dpll_crtc_compute_clock(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
if (ret)
return ret;
}
@@ -6969,8 +6902,9 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_display_mode adjusted_mode =
- crtc_state->hw.adjusted_mode;
+ struct drm_display_mode adjusted_mode;
+
+ drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
if (crtc_state->vrr.enable) {
adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
@@ -7028,14 +6962,10 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
static void intel_modeset_clear_plls(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
- if (!dev_priv->dpll_funcs)
- return;
-
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
@@ -8505,7 +8435,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
- u64 put_domains[I915_MAX_PIPES] = {};
+ struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
intel_wakeref_t wakeref = 0;
int i;
@@ -8522,9 +8452,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
new_crtc_state, i) {
if (intel_crtc_needs_modeset(new_crtc_state) ||
new_crtc_state->update_pipe) {
-
- put_domains[crtc->pipe] =
- modeset_get_crtc_power_domains(new_crtc_state);
+ modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
}
}
@@ -8624,7 +8552,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
- modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
+ modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
@@ -9737,7 +9665,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
}
intel_plane_possible_crtcs_init(i915);
- intel_shared_dpll_init(dev);
+ intel_shared_dpll_init(i915);
intel_fdi_pll_freq_update(i915);
intel_update_czclk(i915);
@@ -9844,9 +9772,6 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
PLL_REF_INPUT_DREFCLK |
DPLL_VCO_ENABLE;
- intel_de_write(dev_priv, FP0(pipe), fp);
- intel_de_write(dev_priv, FP1(pipe), fp);
-
intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
@@ -9855,6 +9780,9 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
+ intel_de_write(dev_priv, FP0(pipe), fp);
+ intel_de_write(dev_priv, FP1(pipe), fp);
+
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
@@ -10465,11 +10393,11 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- u64 put_domains;
+ struct intel_power_domain_mask put_domains;
- put_domains = modeset_get_crtc_power_domains(crtc_state);
- if (drm_WARN_ON(dev, put_domains))
- modeset_put_crtc_power_domains(crtc, put_domains);
+ modeset_get_crtc_power_domains(crtc_state, &put_domains);
+ if (drm_WARN_ON(dev, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
+ modeset_put_crtc_power_domains(crtc, &put_domains);
}
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 867fa248f042..187910d94ec6 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -635,11 +635,9 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-enum intel_display_power_domain intel_port_to_power_domain(enum port port);
+enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port);
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
-enum intel_display_power_domain
-intel_legacy_aux_to_power_domain(enum aux_ch aux_ch);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 6a5695008f7c..fb17439bd4f8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -9,26 +9,27 @@
#include "i915_irq.h"
#include "intel_cdclk.h"
#include "intel_combo_phy.h"
-#include "intel_combo_phy_regs.h"
-#include "intel_crt.h"
#include "intel_de.h"
#include "intel_display_power.h"
+#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
-#include "intel_dpio_phy.h"
-#include "intel_dpll.h"
-#include "intel_hotplug.h"
#include "intel_mchbar_regs.h"
#include "intel_pch_refclk.h"
#include "intel_pcode.h"
#include "intel_pm.h"
-#include "intel_pps.h"
#include "intel_snps_phy.h"
-#include "intel_tc.h"
-#include "intel_vga.h"
#include "vlv_sideband.h"
+#define for_each_power_domain_well(__dev_priv, __power_well, __domain) \
+ for_each_power_well(__dev_priv, __power_well) \
+ for_each_if(test_bit((__domain), (__power_well)->domains.bits))
+
+#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain) \
+ for_each_power_well_reverse(__dev_priv, __power_well) \
+ for_each_if(test_bit((__domain), (__power_well)->domains.bits))
+
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain)
{
@@ -43,14 +44,14 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "PIPE_C";
case POWER_DOMAIN_PIPE_D:
return "PIPE_D";
- case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
- return "PIPE_A_PANEL_FITTER";
- case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
- return "PIPE_B_PANEL_FITTER";
- case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
- return "PIPE_C_PANEL_FITTER";
- case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
- return "PIPE_D_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_PANEL_FITTER_A:
+ return "PIPE_PANEL_FITTER_A";
+ case POWER_DOMAIN_PIPE_PANEL_FITTER_B:
+ return "PIPE_PANEL_FITTER_B";
+ case POWER_DOMAIN_PIPE_PANEL_FITTER_C:
+ return "PIPE_PANEL_FITTER_C";
+ case POWER_DOMAIN_PIPE_PANEL_FITTER_D:
+ return "PIPE_PANEL_FITTER_D";
case POWER_DOMAIN_TRANSCODER_A:
return "TRANSCODER_A";
case POWER_DOMAIN_TRANSCODER_B:
@@ -67,42 +68,54 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "TRANSCODER_DSI_C";
case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
return "TRANSCODER_VDSC_PW2";
- case POWER_DOMAIN_PORT_DDI_A_LANES:
- return "PORT_DDI_A_LANES";
- case POWER_DOMAIN_PORT_DDI_B_LANES:
- return "PORT_DDI_B_LANES";
- case POWER_DOMAIN_PORT_DDI_C_LANES:
- return "PORT_DDI_C_LANES";
- case POWER_DOMAIN_PORT_DDI_D_LANES:
- return "PORT_DDI_D_LANES";
- case POWER_DOMAIN_PORT_DDI_E_LANES:
- return "PORT_DDI_E_LANES";
- case POWER_DOMAIN_PORT_DDI_F_LANES:
- return "PORT_DDI_F_LANES";
- case POWER_DOMAIN_PORT_DDI_G_LANES:
- return "PORT_DDI_G_LANES";
- case POWER_DOMAIN_PORT_DDI_H_LANES:
- return "PORT_DDI_H_LANES";
- case POWER_DOMAIN_PORT_DDI_I_LANES:
- return "PORT_DDI_I_LANES";
- case POWER_DOMAIN_PORT_DDI_A_IO:
- return "PORT_DDI_A_IO";
- case POWER_DOMAIN_PORT_DDI_B_IO:
- return "PORT_DDI_B_IO";
- case POWER_DOMAIN_PORT_DDI_C_IO:
- return "PORT_DDI_C_IO";
- case POWER_DOMAIN_PORT_DDI_D_IO:
- return "PORT_DDI_D_IO";
- case POWER_DOMAIN_PORT_DDI_E_IO:
- return "PORT_DDI_E_IO";
- case POWER_DOMAIN_PORT_DDI_F_IO:
- return "PORT_DDI_F_IO";
- case POWER_DOMAIN_PORT_DDI_G_IO:
- return "PORT_DDI_G_IO";
- case POWER_DOMAIN_PORT_DDI_H_IO:
- return "PORT_DDI_H_IO";
- case POWER_DOMAIN_PORT_DDI_I_IO:
- return "PORT_DDI_I_IO";
+ case POWER_DOMAIN_PORT_DDI_LANES_A:
+ return "PORT_DDI_LANES_A";
+ case POWER_DOMAIN_PORT_DDI_LANES_B:
+ return "PORT_DDI_LANES_B";
+ case POWER_DOMAIN_PORT_DDI_LANES_C:
+ return "PORT_DDI_LANES_C";
+ case POWER_DOMAIN_PORT_DDI_LANES_D:
+ return "PORT_DDI_LANES_D";
+ case POWER_DOMAIN_PORT_DDI_LANES_E:
+ return "PORT_DDI_LANES_E";
+ case POWER_DOMAIN_PORT_DDI_LANES_F:
+ return "PORT_DDI_LANES_F";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC1:
+ return "PORT_DDI_LANES_TC1";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC2:
+ return "PORT_DDI_LANES_TC2";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC3:
+ return "PORT_DDI_LANES_TC3";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC4:
+ return "PORT_DDI_LANES_TC4";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC5:
+ return "PORT_DDI_LANES_TC5";
+ case POWER_DOMAIN_PORT_DDI_LANES_TC6:
+ return "PORT_DDI_LANES_TC6";
+ case POWER_DOMAIN_PORT_DDI_IO_A:
+ return "PORT_DDI_IO_A";
+ case POWER_DOMAIN_PORT_DDI_IO_B:
+ return "PORT_DDI_IO_B";
+ case POWER_DOMAIN_PORT_DDI_IO_C:
+ return "PORT_DDI_IO_C";
+ case POWER_DOMAIN_PORT_DDI_IO_D:
+ return "PORT_DDI_IO_D";
+ case POWER_DOMAIN_PORT_DDI_IO_E:
+ return "PORT_DDI_IO_E";
+ case POWER_DOMAIN_PORT_DDI_IO_F:
+ return "PORT_DDI_IO_F";
+ case POWER_DOMAIN_PORT_DDI_IO_TC1:
+ return "PORT_DDI_IO_TC1";
+ case POWER_DOMAIN_PORT_DDI_IO_TC2:
+ return "PORT_DDI_IO_TC2";
+ case POWER_DOMAIN_PORT_DDI_IO_TC3:
+ return "PORT_DDI_IO_TC3";
+ case POWER_DOMAIN_PORT_DDI_IO_TC4:
+ return "PORT_DDI_IO_TC4";
+ case POWER_DOMAIN_PORT_DDI_IO_TC5:
+ return "PORT_DDI_IO_TC5";
+ case POWER_DOMAIN_PORT_DDI_IO_TC6:
+ return "PORT_DDI_IO_TC6";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
@@ -127,28 +140,32 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "AUX_E";
case POWER_DOMAIN_AUX_F:
return "AUX_F";
- case POWER_DOMAIN_AUX_G:
- return "AUX_G";
- case POWER_DOMAIN_AUX_H:
- return "AUX_H";
- case POWER_DOMAIN_AUX_I:
- return "AUX_I";
+ case POWER_DOMAIN_AUX_USBC1:
+ return "AUX_USBC1";
+ case POWER_DOMAIN_AUX_USBC2:
+ return "AUX_USBC2";
+ case POWER_DOMAIN_AUX_USBC3:
+ return "AUX_USBC3";
+ case POWER_DOMAIN_AUX_USBC4:
+ return "AUX_USBC4";
+ case POWER_DOMAIN_AUX_USBC5:
+ return "AUX_USBC5";
+ case POWER_DOMAIN_AUX_USBC6:
+ return "AUX_USBC6";
case POWER_DOMAIN_AUX_IO_A:
return "AUX_IO_A";
- case POWER_DOMAIN_AUX_C_TBT:
- return "AUX_C_TBT";
- case POWER_DOMAIN_AUX_D_TBT:
- return "AUX_D_TBT";
- case POWER_DOMAIN_AUX_E_TBT:
- return "AUX_E_TBT";
- case POWER_DOMAIN_AUX_F_TBT:
- return "AUX_F_TBT";
- case POWER_DOMAIN_AUX_G_TBT:
- return "AUX_G_TBT";
- case POWER_DOMAIN_AUX_H_TBT:
- return "AUX_H_TBT";
- case POWER_DOMAIN_AUX_I_TBT:
- return "AUX_I_TBT";
+ case POWER_DOMAIN_AUX_TBT1:
+ return "AUX_TBT1";
+ case POWER_DOMAIN_AUX_TBT2:
+ return "AUX_TBT2";
+ case POWER_DOMAIN_AUX_TBT3:
+ return "AUX_TBT3";
+ case POWER_DOMAIN_AUX_TBT4:
+ return "AUX_TBT4";
+ case POWER_DOMAIN_AUX_TBT5:
+ return "AUX_TBT5";
+ case POWER_DOMAIN_AUX_TBT6:
+ return "AUX_TBT6";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
@@ -190,7 +207,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
is_enabled = true;
- for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
+ for_each_power_domain_well_reverse(dev_priv, power_well, domain) {
if (intel_power_well_is_always_on(power_well))
continue;
@@ -235,604 +252,6 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
return ret;
}
-/*
- * Starting with Haswell, we have a "Power Down Well" that can be turned off
- * when not needed anymore. We have 4 registers that can request the power well
- * to be enabled, and it will only be disabled if none of the registers is
- * requesting it to be enabled.
- */
-static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
- u8 irq_pipe_mask, bool has_vga)
-{
- if (has_vga)
- intel_vga_reset_io_mem(dev_priv);
-
- if (irq_pipe_mask)
- gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
-}
-
-static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
- u8 irq_pipe_mask)
-{
- if (irq_pipe_mask)
- gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
-}
-
-#define ICL_AUX_PW_TO_CH(pw_idx) \
- ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
-
-#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
- ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
-
-static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
-{
- int pw_idx = power_well->desc->hsw.idx;
-
- return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
- ICL_AUX_PW_TO_CH(pw_idx);
-}
-
-static struct intel_digital_port *
-aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
- enum aux_ch aux_ch)
-{
- struct intel_digital_port *dig_port = NULL;
- struct intel_encoder *encoder;
-
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- /* We'll check the MST primary port */
- if (encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- dig_port = enc_to_dig_port(encoder);
- if (!dig_port)
- continue;
-
- if (dig_port->aux_ch != aux_ch) {
- dig_port = NULL;
- continue;
- }
-
- break;
- }
-
- return dig_port;
-}
-
-static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
- const struct i915_power_well *power_well)
-{
- enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
- struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
-
- return intel_port_to_phy(i915, dig_port->base.port);
-}
-
-static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well,
- bool timeout_expected)
-{
- const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
- int pw_idx = power_well->desc->hsw.idx;
- int enable_delay = power_well->desc->hsw.fixed_enable_delay;
-
- /*
- * For some power wells we're not supposed to watch the status bit for
- * an ack, but rather just wait a fixed amount of time and then
- * proceed. This is only used on DG2.
- */
- if (IS_DG2(dev_priv) && enable_delay) {
- usleep_range(enable_delay, 2 * enable_delay);
- return;
- }
-
- /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- if (intel_de_wait_for_set(dev_priv, regs->driver,
- HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
- drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
- intel_power_well_name(power_well));
-
- drm_WARN_ON(&dev_priv->drm, !timeout_expected);
-
- }
-}
-
-static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
- const struct i915_power_well_regs *regs,
- int pw_idx)
-{
- u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
- u32 ret;
-
- ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
- ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
- if (regs->kvmr.reg)
- ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
- ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
-
- return ret;
-}
-
-static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
- int pw_idx = power_well->desc->hsw.idx;
- bool disabled;
- u32 reqs;
-
- /*
- * Bspec doesn't require waiting for PWs to get disabled, but still do
- * this for paranoia. The known cases where a PW will be forced on:
- * - a KVMR request on any power well via the KVMR request register
- * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
- * DEBUG request registers
- * Skip the wait in case any of the request bits are set and print a
- * diagnostic message.
- */
- wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
- HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
- (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
- if (disabled)
- return;
-
- drm_dbg_kms(&dev_priv->drm,
- "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
- intel_power_well_name(power_well),
- !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
-}
-
-static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
- enum skl_power_gate pg)
-{
- /* Timeout 5us for PG#0, for other PGs 1us */
- drm_WARN_ON(&dev_priv->drm,
- intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
- SKL_FUSE_PG_DIST_STATUS(pg), 1));
-}
-
-static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
- int pw_idx = power_well->desc->hsw.idx;
- u32 val;
-
- if (power_well->desc->hsw.has_fuses) {
- enum skl_power_gate pg;
-
- pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
- SKL_PW_CTL_IDX_TO_PG(pw_idx);
-
- /* Wa_16013190616:adlp */
- if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1)
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
-
- /*
- * For PW1 we have to wait both for the PW0/PG0 fuse state
- * before enabling the power well and PW1/PG1's own fuse
- * state after the enabling. For all other power wells with
- * fuses we only have to wait for that PW/PG's fuse state
- * after the enabling.
- */
- if (pg == SKL_PG1)
- gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
- }
-
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(pw_idx));
-
- hsw_wait_for_power_well_enable(dev_priv, power_well, false);
-
- if (power_well->desc->hsw.has_fuses) {
- enum skl_power_gate pg;
-
- pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
- SKL_PW_CTL_IDX_TO_PG(pw_idx);
- gen9_wait_for_power_well_fuses(dev_priv, pg);
- }
-
- hsw_power_well_post_enable(dev_priv,
- power_well->desc->hsw.irq_pipe_mask,
- power_well->desc->hsw.has_vga);
-}
-
-static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
- int pw_idx = power_well->desc->hsw.idx;
- u32 val;
-
- hsw_power_well_pre_disable(dev_priv,
- power_well->desc->hsw.irq_pipe_mask);
-
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
- hsw_wait_for_power_well_disable(dev_priv, power_well);
-}
-
-static void
-icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
- int pw_idx = power_well->desc->hsw.idx;
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- u32 val;
-
- drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
-
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(pw_idx));
-
- if (DISPLAY_VER(dev_priv) < 12) {
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
- intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
- val | ICL_LANE_ENABLE_AUX);
- }
-
- hsw_wait_for_power_well_enable(dev_priv, power_well, false);
-
- /* Display WA #1178: icl */
- if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
- val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
- val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
- intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
- }
-}
-
-static void
-icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
- int pw_idx = power_well->desc->hsw.idx;
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
- u32 val;
-
- drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
-
- val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
- intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
- val & ~ICL_LANE_ENABLE_AUX);
-
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
-
- hsw_wait_for_power_well_disable(dev_priv, power_well);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-
-static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well,
- struct intel_digital_port *dig_port)
-{
- if (drm_WARN_ON(&dev_priv->drm, !dig_port))
- return;
-
- if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
- return;
-
- drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
-}
-
-#else
-
-static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well,
- struct intel_digital_port *dig_port)
-{
-}
-
-#endif
-
-#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
-
-static void icl_tc_cold_exit(struct drm_i915_private *i915)
-{
- int ret, tries = 0;
-
- while (1) {
- ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0,
- 250, 1);
- if (ret != -EAGAIN || ++tries == 3)
- break;
- msleep(1);
- }
-
- /* Spec states that TC cold exit can take up to 1ms to complete */
- if (!ret)
- msleep(1);
-
- /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
- drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
- "succeeded");
-}
-
-static void
-icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
- struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
- const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
- bool is_tbt = power_well->desc->hsw.is_tc_tbt;
- bool timeout_expected;
- u32 val;
-
- icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
-
- val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
- val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (is_tbt)
- val |= DP_AUX_CH_CTL_TBT_IO;
- intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
-
- val = intel_de_read(dev_priv, regs->driver);
- intel_de_write(dev_priv, regs->driver,
- val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx));
-
- /*
- * An AUX timeout is expected if the TBT DP tunnel is down,
- * or need to enable AUX on a legacy TypeC port as part of the TC-cold
- * exit sequence.
- */
- timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
- if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
- icl_tc_cold_exit(dev_priv);
-
- hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
-
- if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
- enum tc_port tc_port;
-
- tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
- intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
- HIP_INDEX_VAL(tc_port, 0x2));
-
- if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
- DKL_CMN_UC_DW27_UC_HEALTH, 1))
- drm_warn(&dev_priv->drm,
- "Timeout waiting TC uC health\n");
- }
-}
-
-static void
-icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
-
- if (intel_phy_is_tc(dev_priv, phy))
- return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
- else if (IS_ICELAKE(dev_priv))
- return icl_combo_phy_aux_power_well_enable(dev_priv,
- power_well);
- else
- return hsw_power_well_enable(dev_priv, power_well);
-}
-
-static void
-icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
-
- if (intel_phy_is_tc(dev_priv, phy))
- return hsw_power_well_disable(dev_priv, power_well);
- else if (IS_ICELAKE(dev_priv))
- return icl_combo_phy_aux_power_well_disable(dev_priv,
- power_well);
- else
- return hsw_power_well_disable(dev_priv, power_well);
-}
-
-/*
- * We should only use the power well if we explicitly asked the hardware to
- * enable it, so check if it's enabled and also check if we've requested it to
- * be enabled.
- */
-static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
- enum i915_power_well_id id = power_well->desc->id;
- int pw_idx = power_well->desc->hsw.idx;
- u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
- HSW_PWR_WELL_CTL_STATE(pw_idx);
- u32 val;
-
- val = intel_de_read(dev_priv, regs->driver);
-
- /*
- * On GEN9 big core due to a DMC bug the driver's request bits for PW1
- * and the MISC_IO PW will be not restored, so check instead for the
- * BIOS's own request bits, which are forced-on for these power wells
- * when exiting DC5/6.
- */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
- (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
- val |= intel_de_read(dev_priv, regs->bios);
-
- return (val & mask) == mask;
-}
-
-static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
-{
- drm_WARN_ONCE(&dev_priv->drm,
- (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
- "DC9 already programmed to be enabled.\n");
- drm_WARN_ONCE(&dev_priv->drm,
- intel_de_read(dev_priv, DC_STATE_EN) &
- DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled to enable DC9.\n");
- drm_WARN_ONCE(&dev_priv->drm,
- intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
- HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
- "Power well 2 on.\n");
- drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
-
- /*
- * TODO: check for the following to verify the conditions to enter DC9
- * state are satisfied:
- * 1] Check relevant display engine registers to verify if mode set
- * disable sequence was followed.
- * 2] Check if display uninitialize sequence is initialized.
- */
-}
-
-static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
-{
- drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
- drm_WARN_ONCE(&dev_priv->drm,
- intel_de_read(dev_priv, DC_STATE_EN) &
- DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled.\n");
-
- /*
- * TODO: check for the following to verify DC9 state was indeed
- * entered before programming to disable it:
- * 1] Check relevant display engine registers to verify if mode
- * set disable sequence was followed.
- * 2] Check if display uninitialize sequence is initialized.
- */
-}
-
-static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
- u32 state)
-{
- int rewrites = 0;
- int rereads = 0;
- u32 v;
-
- intel_de_write(dev_priv, DC_STATE_EN, state);
-
- /* It has been observed that disabling the dc6 state sometimes
- * doesn't stick and dmc keeps returning old value. Make sure
- * the write really sticks enough times and also force rewrite until
- * we are confident that state is exactly what we want.
- */
- do {
- v = intel_de_read(dev_priv, DC_STATE_EN);
-
- if (v != state) {
- intel_de_write(dev_priv, DC_STATE_EN, state);
- rewrites++;
- rereads = 0;
- } else if (rereads++ > 5) {
- break;
- }
-
- } while (rewrites < 100);
-
- if (v != state)
- drm_err(&dev_priv->drm,
- "Writing dc state to 0x%x failed, now 0x%x\n",
- state, v);
-
- /* Most of the times we need one retry, avoid spam */
- if (rewrites > 1)
- drm_dbg_kms(&dev_priv->drm,
- "Rewrote dc state to 0x%x %d times\n",
- state, rewrites);
-}
-
-static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
-{
- u32 mask;
-
- mask = DC_STATE_EN_UPTO_DC5;
-
- if (DISPLAY_VER(dev_priv) >= 12)
- mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
- | DC_STATE_EN_DC9;
- else if (DISPLAY_VER(dev_priv) == 11)
- mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- mask |= DC_STATE_EN_DC9;
- else
- mask |= DC_STATE_EN_UPTO_DC6;
-
- return mask;
-}
-
-static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
-
- drm_dbg_kms(&dev_priv->drm,
- "Resetting DC state tracking from %02x to %02x\n",
- dev_priv->dmc.dc_state, val);
- dev_priv->dmc.dc_state = val;
-}
-
-/**
- * gen9_set_dc_state - set target display C power state
- * @dev_priv: i915 device instance
- * @state: target DC power state
- * - DC_STATE_DISABLE
- * - DC_STATE_EN_UPTO_DC5
- * - DC_STATE_EN_UPTO_DC6
- * - DC_STATE_EN_DC9
- *
- * Signal to DMC firmware/HW the target DC power state passed in @state.
- * DMC/HW can turn off individual display clocks and power rails when entering
- * a deeper DC power state (higher in number) and turns these back when exiting
- * that state to a shallower power state (lower in number). The HW will decide
- * when to actually enter a given state on an on-demand basis, for instance
- * depending on the active state of display pipes. The state of display
- * registers backed by affected power rails are saved/restored as needed.
- *
- * Based on the above enabling a deeper DC power state is asynchronous wrt.
- * enabling it. Disabling a deeper power state is synchronous: for instance
- * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
- * back on and register state is restored. This is guaranteed by the MMIO write
- * to DC_STATE_EN blocking until the state is restored.
- */
-static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
-{
- u32 val;
- u32 mask;
-
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- if (drm_WARN_ON_ONCE(&dev_priv->drm,
- state & ~dev_priv->dmc.allowed_dc_mask))
- state &= dev_priv->dmc.allowed_dc_mask;
-
- val = intel_de_read(dev_priv, DC_STATE_EN);
- mask = gen9_dc_mask(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
- val & mask, state);
-
- /* Check if DMC is ignoring our DC state requests */
- if ((val & mask) != dev_priv->dmc.dc_state)
- drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->dmc.dc_state, val & mask);
-
- val &= ~mask;
- val |= state;
-
- gen9_write_dc_state(dev_priv, val);
-
- dev_priv->dmc.dc_state = val & mask;
-}
-
static u32
sanitize_target_dc_state(struct drm_i915_private *dev_priv,
u32 target_dc_state)
@@ -858,53 +277,6 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv,
return target_dc_state;
}
-static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
-{
- drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
- gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
-}
-
-static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
- val = intel_de_read(dev_priv, DC_STATE_EN);
- val &= ~DC_STATE_DC3CO_STATUS;
- intel_de_write(dev_priv, DC_STATE_EN, val);
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- /*
- * Delay of 200us DC3CO Exit time B.Spec 49196
- */
- usleep_range(200, 210);
-}
-
-static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
-{
- assert_can_enable_dc9(dev_priv);
-
- drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
- /*
- * Power sequencer reset is not needed on
- * platforms with South Display Engine on PCH,
- * because PPS registers are always on.
- */
- if (!HAS_PCH_SPLIT(dev_priv))
- intel_pps_reset_all(dev_priv);
- gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
-}
-
-static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
-{
- assert_can_disable_dc9(dev_priv);
-
- drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- intel_pps_unlock_regs_wa(dev_priv);
-}
-
/**
* intel_display_power_set_target_dc_state - Set target dc state.
* @dev_priv: i915 device
@@ -949,916 +321,15 @@ unlock:
mutex_unlock(&power_domains->lock);
}
-static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
-{
- enum i915_power_well_id high_pg;
-
- /* Power wells at this level and above must be disabled for DC5 entry */
- if (DISPLAY_VER(dev_priv) == 12)
- high_pg = ICL_DISP_PW_3;
- else
- high_pg = SKL_DISP_PW_2;
-
- drm_WARN_ONCE(&dev_priv->drm,
- intel_display_power_well_is_enabled(dev_priv, high_pg),
- "Power wells above platform's DC5 limit still enabled.\n");
-
- drm_WARN_ONCE(&dev_priv->drm,
- (intel_de_read(dev_priv, DC_STATE_EN) &
- DC_STATE_EN_UPTO_DC5),
- "DC5 already programmed to be enabled.\n");
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
-
- assert_dmc_loaded(dev_priv);
-}
-
-static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
-{
- assert_can_enable_dc5(dev_priv);
-
- drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
-
- /* Wa Display #1183: skl,kbl,cfl */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
- intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
-
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
-}
-
-static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
-{
- drm_WARN_ONCE(&dev_priv->drm,
- intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
- "Backlight is not disabled.\n");
- drm_WARN_ONCE(&dev_priv->drm,
- (intel_de_read(dev_priv, DC_STATE_EN) &
- DC_STATE_EN_UPTO_DC6),
- "DC6 already programmed to be enabled.\n");
-
- assert_dmc_loaded(dev_priv);
-}
-
-static void skl_enable_dc6(struct drm_i915_private *dev_priv)
-{
- assert_can_enable_dc6(dev_priv);
-
- drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
-
- /* Wa Display #1183: skl,kbl,cfl */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
- intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
-
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
-}
-
-static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
- int pw_idx = power_well->desc->hsw.idx;
- u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
- u32 bios_req = intel_de_read(dev_priv, regs->bios);
-
- /* Take over the request bit if set by BIOS. */
- if (bios_req & mask) {
- u32 drv_req = intel_de_read(dev_priv, regs->driver);
-
- if (!(drv_req & mask))
- intel_de_write(dev_priv, regs->driver, drv_req | mask);
- intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
- }
-}
-
-static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy);
-}
-
-static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy);
-}
-
-static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy);
-}
-
-static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *power_well;
-
- power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
- if (intel_power_well_refcount(power_well) > 0)
- bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
-
- power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
- if (intel_power_well_refcount(power_well) > 0)
- bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy);
-
- if (IS_GEMINILAKE(dev_priv)) {
- power_well = lookup_power_well(dev_priv,
- GLK_DISP_PW_DPIO_CMN_C);
- if (intel_power_well_refcount(power_well) > 0)
- bxt_ddi_phy_verify_state(dev_priv,
- power_well->desc->bxt.phy);
- }
-}
-
-static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
- (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
-}
-
-static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
-{
- u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
- u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
-
- drm_WARN(&dev_priv->drm,
- hw_enabled_dbuf_slices != enabled_dbuf_slices,
- "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
- hw_enabled_dbuf_slices,
- enabled_dbuf_slices);
-}
-
-static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
-{
- struct intel_cdclk_config cdclk_config = {};
-
- if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) {
- tgl_disable_dc3co(dev_priv);
- return;
- }
-
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
- /* Can't read out voltage_level so can't use intel_cdclk_changed() */
- drm_WARN_ON(&dev_priv->drm,
- intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
- &cdclk_config));
-
- gen9_assert_dbuf_enabled(dev_priv);
-
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_verify_ddi_phy_power_wells(dev_priv);
-
- if (DISPLAY_VER(dev_priv) >= 11)
- /*
- * DMC retains HW context only for port A, the other combo
- * PHY's HW context for port B is lost after DC transitions,
- * so we need to restore it manually.
- */
- intel_combo_phy_init(dev_priv);
-}
-
-static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- gen9_disable_dc_states(dev_priv);
-}
-
-static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if (!intel_dmc_has_payload(dev_priv))
- return;
-
- switch (dev_priv->dmc.target_dc_state) {
- case DC_STATE_EN_DC3CO:
- tgl_enable_dc3co(dev_priv);
- break;
- case DC_STATE_EN_UPTO_DC6:
- skl_enable_dc6(dev_priv);
- break;
- case DC_STATE_EN_UPTO_DC5:
- gen9_enable_dc5(dev_priv);
- break;
- }
-}
-
-static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
-}
-
-static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
-}
-
-static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return true;
-}
-
-static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
- i830_enable_pipe(dev_priv, PIPE_A);
- if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
- i830_enable_pipe(dev_priv, PIPE_B);
-}
-
-static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- i830_disable_pipe(dev_priv, PIPE_B);
- i830_disable_pipe(dev_priv, PIPE_A);
-}
-
-static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
- intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
-}
-
-static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if (intel_power_well_refcount(power_well) > 0)
- i830_pipes_power_well_enable(dev_priv, power_well);
- else
- i830_pipes_power_well_disable(dev_priv, power_well);
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
-{
- int pw_idx = power_well->desc->vlv.idx;
- u32 mask;
- u32 state;
- u32 ctrl;
-
- mask = PUNIT_PWRGT_MASK(pw_idx);
- state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
- PUNIT_PWRGT_PWR_GATE(pw_idx);
-
- vlv_punit_get(dev_priv);
-
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
-
- if (COND)
- goto out;
-
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
- ctrl &= ~mask;
- ctrl |= state;
- vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
-
- if (wait_for(COND, 100))
- drm_err(&dev_priv->drm,
- "timeout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
-
-#undef COND
-
-out:
- vlv_punit_put(dev_priv);
-}
-
-static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, true);
-}
-
-static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- int pw_idx = power_well->desc->vlv.idx;
- bool enabled = false;
- u32 mask;
- u32 state;
- u32 ctrl;
-
- mask = PUNIT_PWRGT_MASK(pw_idx);
- ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
-
- vlv_punit_get(dev_priv);
-
- state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
- /*
- * We only ever set the power-on and power-gate states, anything
- * else is unexpected.
- */
- drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
- state != PUNIT_PWRGT_PWR_GATE(pw_idx));
- if (state == ctrl)
- enabled = true;
-
- /*
- * A transient state at this point would mean some unexpected party
- * is poking at the power controls too.
- */
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
- drm_WARN_ON(&dev_priv->drm, ctrl != state);
-
- vlv_punit_put(dev_priv);
-
- return enabled;
-}
-
-static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- /*
- * On driver load, a pipe may be active and driving a DSI display.
- * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
- * (and never recovering) in this case. intel_dsi_post_disable() will
- * clear it when we turn off the display.
- */
- val = intel_de_read(dev_priv, DSPCLK_GATE_D);
- val &= DPOUNIT_CLOCK_GATE_DISABLE;
- val |= VRHUNIT_CLOCK_GATE_DISABLE;
- intel_de_write(dev_priv, DSPCLK_GATE_D, val);
-
- /*
- * Disable trickle feed and enable pnd deadline calculation
- */
- intel_de_write(dev_priv, MI_ARB_VLV,
- MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
- intel_de_write(dev_priv, CBR1_VLV, 0);
-
- drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
- intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
- DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
- 1000));
-}
-
-static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
- enum pipe pipe;
-
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection. Supposedly DSI also
- * needs the ref clock up and running.
- *
- * CHV DPLL B/C have some issues if VGA mode is enabled.
- */
- for_each_pipe(dev_priv, pipe) {
- u32 val = intel_de_read(dev_priv, DPLL(pipe));
-
- val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
- if (pipe != PIPE_A)
- val |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- intel_de_write(dev_priv, DPLL(pipe), val);
- }
-
- vlv_init_display_clock_gating(dev_priv);
-
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_enable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /*
- * During driver initialization/resume we can avoid restoring the
- * part of the HW/SW state that will be inited anyway explicitly.
- */
- if (dev_priv->power_domains.initializing)
- return;
-
- intel_hpd_init(dev_priv);
- intel_hpd_poll_disable(dev_priv);
-
- /* Re-enable the ADPA, if we have one */
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- if (encoder->type == INTEL_OUTPUT_ANALOG)
- intel_crt_reset(&encoder->base);
- }
-
- intel_vga_redisable_power_on(dev_priv);
-
- intel_pps_unlock_regs_wa(dev_priv);
-}
-
-static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
-{
- spin_lock_irq(&dev_priv->irq_lock);
- valleyview_disable_display_irqs(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /* make sure we're done processing display irqs */
- intel_synchronize_irq(dev_priv);
-
- intel_pps_reset_all(dev_priv);
-
- /* Prevent us from re-enabling polling on accident in late suspend */
- if (!dev_priv->drm.dev->power.is_suspended)
- intel_hpd_poll_enable(dev_priv);
-}
-
-static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, true);
-
- vlv_display_power_well_init(dev_priv);
-}
-
-static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_display_power_well_deinit(dev_priv);
-
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
-static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- /* since ref/cri clock was enabled */
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-
- vlv_set_power_well(dev_priv, power_well, true);
-
- /*
- * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
- * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
- * a. GUnit 0x2110 bit[0] set to 1 (def 0)
- * b. The other bits such as sfr settings / modesel may all
- * be set to 0.
- *
- * This should only be done on init and resume from S3 with
- * both PLLs disabled, or we risk losing DPIO and PLL
- * synchronization.
- */
- intel_de_write(dev_priv, DPIO_CTL,
- intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe)
- assert_pll_disabled(dev_priv, pipe);
-
- /* Assert common reset */
- intel_de_write(dev_priv, DPIO_CTL,
- intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
-
- vlv_set_power_well(dev_priv, power_well, false);
-}
-
#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
-#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
-
-static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *cmn_bc =
- lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
- struct i915_power_well *cmn_d =
- lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
- u32 phy_control = dev_priv->chv_phy_control;
- u32 phy_status = 0;
- u32 phy_status_mask = 0xffffffff;
-
- /*
- * The BIOS can leave the PHY is some weird state
- * where it doesn't fully power down some parts.
- * Disable the asserts until the PHY has been fully
- * reset (ie. the power well has been disabled at
- * least once).
- */
- if (!dev_priv->chv_phy_assert[DPIO_PHY0])
- phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
- PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
-
- if (!dev_priv->chv_phy_assert[DPIO_PHY1])
- phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
- PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
-
- if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
- phy_status |= PHY_POWERGOOD(DPIO_PHY0);
-
- /* this assumes override is only used to enable lanes */
- if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
- phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
-
- if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
- phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
-
- /* CL1 is on whenever anything is on in either channel */
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
- phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
-
- /*
- * The DPLLB check accounts for the pipe B + port A usage
- * with CL2 powered up but all the lanes in the second channel
- * powered down.
- */
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
- (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
- phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
- }
-
- if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
- phy_status |= PHY_POWERGOOD(DPIO_PHY1);
-
- /* this assumes override is only used to enable lanes */
- if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
- phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
- phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
-
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
- if (BITS_SET(phy_control,
- PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
- phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
- }
-
- phy_status &= phy_status_mask;
-
- /*
- * The PHY may be busy with some initial calibration and whatnot,
- * so the power state can take a while to actually change.
- */
- if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
- phy_status_mask, phy_status, 10))
- drm_err(&dev_priv->drm,
- "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
- intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
- phy_status, dev_priv->chv_phy_control);
-}
-
-#undef BITS_SET
-
-static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum dpio_phy phy;
- enum pipe pipe;
- u32 tmp;
-
- drm_WARN_ON_ONCE(&dev_priv->drm,
- power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
-
- if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
- pipe = PIPE_A;
- phy = DPIO_PHY0;
- } else {
- pipe = PIPE_C;
- phy = DPIO_PHY1;
- }
-
- /* since ref/cri clock was enabled */
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- vlv_set_power_well(dev_priv, power_well, true);
-
- /* Poll for phypwrgood signal */
- if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
- PHY_POWERGOOD(phy), 1))
- drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
- phy);
-
- vlv_dpio_get(dev_priv);
-
- /* Enable dynamic power down */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
- tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
- DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
-
- if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
- tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
- tmp |= DPIO_DYNPWRDOWNEN_CH1;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
- } else {
- /*
- * Force the non-existing CL2 off. BXT does this
- * too, so maybe it saves some power even though
- * CL2 doesn't exist?
- */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
- tmp |= DPIO_CL2_LDOFUSE_PWRENB;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
- }
-
- vlv_dpio_put(dev_priv);
-
- dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
-
- drm_dbg_kms(&dev_priv->drm,
- "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
-
- assert_chv_phy_status(dev_priv);
-}
-
-static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum dpio_phy phy;
-
- drm_WARN_ON_ONCE(&dev_priv->drm,
- power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
-
- if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
- phy = DPIO_PHY0;
- assert_pll_disabled(dev_priv, PIPE_A);
- assert_pll_disabled(dev_priv, PIPE_B);
- } else {
- phy = DPIO_PHY1;
- assert_pll_disabled(dev_priv, PIPE_C);
- }
-
- dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
-
- vlv_set_power_well(dev_priv, power_well, false);
-
- drm_dbg_kms(&dev_priv->drm,
- "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
-
- /* PHY is fully reset now, so we can enable the PHY state asserts */
- dev_priv->chv_phy_assert[phy] = true;
-
- assert_chv_phy_status(dev_priv);
-}
-
-static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- enum dpio_channel ch, bool override, unsigned int mask)
-{
- enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
- u32 reg, val, expected, actual;
-
- /*
- * The BIOS can leave the PHY is some weird state
- * where it doesn't fully power down some parts.
- * Disable the asserts until the PHY has been fully
- * reset (ie. the power well has been disabled at
- * least once).
- */
- if (!dev_priv->chv_phy_assert[phy])
- return;
-
- if (ch == DPIO_CH0)
- reg = _CHV_CMN_DW0_CH0;
- else
- reg = _CHV_CMN_DW6_CH1;
-
- vlv_dpio_get(dev_priv);
- val = vlv_dpio_read(dev_priv, pipe, reg);
- vlv_dpio_put(dev_priv);
-
- /*
- * This assumes !override is only used when the port is disabled.
- * All lanes should power down even without the override when
- * the port is disabled.
- */
- if (!override || mask == 0xf) {
- expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
- /*
- * If CH1 common lane is not active anymore
- * (eg. for pipe B DPLL) the entire channel will
- * shut down, which causes the common lane registers
- * to read as 0. That means we can't actually check
- * the lane power down status bits, but as the entire
- * register reads as 0 it's a good indication that the
- * channel is indeed entirely powered down.
- */
- if (ch == DPIO_CH1 && val == 0)
- expected = 0;
- } else if (mask != 0x0) {
- expected = DPIO_ANYDL_POWERDOWN;
- } else {
- expected = 0;
- }
-
- if (ch == DPIO_CH0)
- actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
- else
- actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
- actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
-
- drm_WARN(&dev_priv->drm, actual != expected,
- "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
- !!(actual & DPIO_ALLDL_POWERDOWN),
- !!(actual & DPIO_ANYDL_POWERDOWN),
- !!(expected & DPIO_ALLDL_POWERDOWN),
- !!(expected & DPIO_ANYDL_POWERDOWN),
- reg, val);
-}
-
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- enum dpio_channel ch, bool override)
-{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- bool was_override;
-
- mutex_lock(&power_domains->lock);
-
- was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-
- if (override == was_override)
- goto out;
-
- if (override)
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- else
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
-
- drm_dbg_kms(&dev_priv->drm,
- "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
- phy, ch, dev_priv->chv_phy_control);
-
- assert_chv_phy_status(dev_priv);
-
-out:
- mutex_unlock(&power_domains->lock);
-
- return was_override;
-}
-
-void chv_phy_powergate_lanes(struct intel_encoder *encoder,
- bool override, unsigned int mask)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
- enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
-
- mutex_lock(&power_domains->lock);
-
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
-
- if (override)
- dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- else
- dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
-
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
-
- drm_dbg_kms(&dev_priv->drm,
- "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
- phy, ch, mask, dev_priv->chv_phy_control);
-
- assert_chv_phy_status(dev_priv);
-
- assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
-
- mutex_unlock(&power_domains->lock);
-}
-
-static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- enum pipe pipe = PIPE_A;
- bool enabled;
- u32 state, ctrl;
-
- vlv_punit_get(dev_priv);
-
- state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
- /*
- * We only ever set the power-on and power-gate states, anything
- * else is unexpected.
- */
- drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
- state != DP_SSS_PWR_GATE(pipe));
- enabled = state == DP_SSS_PWR_ON(pipe);
-
- /*
- * A transient state at this point would mean some unexpected party
- * is poking at the power controls too.
- */
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
- drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
-
- vlv_punit_put(dev_priv);
-
- return enabled;
-}
-
-static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well,
- bool enable)
-{
- enum pipe pipe = PIPE_A;
- u32 state;
- u32 ctrl;
-
- state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
-
- vlv_punit_get(dev_priv);
-
-#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
-
- if (COND)
- goto out;
-
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
- ctrl &= ~DP_SSC_MASK(pipe);
- ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
-
- if (wait_for(COND, 100))
- drm_err(&dev_priv->drm,
- "timeout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
-
-#undef COND
-
-out:
- vlv_punit_put(dev_priv);
-}
-
-static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
- dev_priv->chv_phy_control);
-}
-
-static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- chv_set_pipe_power_well(dev_priv, power_well, true);
-
- vlv_display_power_well_init(dev_priv);
-}
-
-static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_display_power_well_deinit(dev_priv);
-
- chv_set_pipe_power_well(dev_priv, power_well, false);
-}
-
-static u64 __async_put_domains_mask(struct i915_power_domains *power_domains)
+static void __async_put_domains_mask(struct i915_power_domains *power_domains,
+ struct intel_power_domain_mask *mask)
{
- return power_domains->async_put_domains[0] |
- power_domains->async_put_domains[1];
+ bitmap_or(mask->bits,
+ power_domains->async_put_domains[0].bits,
+ power_domains->async_put_domains[1].bits,
+ POWER_DOMAIN_NUM);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
@@ -1869,8 +340,11 @@ assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains)
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
power_domains);
- return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] &
- power_domains->async_put_domains[1]);
+
+ return !drm_WARN_ON(&i915->drm,
+ bitmap_intersects(power_domains->async_put_domains[0].bits,
+ power_domains->async_put_domains[1].bits,
+ POWER_DOMAIN_NUM));
}
static bool
@@ -1879,14 +353,17 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
power_domains);
+ struct intel_power_domain_mask async_put_mask;
enum intel_display_power_domain domain;
bool err = false;
err |= !assert_async_put_domain_masks_disjoint(power_domains);
- err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref !=
- !!__async_put_domains_mask(power_domains));
+ __async_put_domains_mask(power_domains, &async_put_mask);
+ err |= drm_WARN_ON(&i915->drm,
+ !!power_domains->async_put_wakeref !=
+ !bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM));
- for_each_power_domain(domain, __async_put_domains_mask(power_domains))
+ for_each_power_domain(domain, &async_put_mask)
err |= drm_WARN_ON(&i915->drm,
power_domains->domain_use_count[domain] != 1);
@@ -1894,14 +371,14 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
}
static void print_power_domains(struct i915_power_domains *power_domains,
- const char *prefix, u64 mask)
+ const char *prefix, struct intel_power_domain_mask *mask)
{
struct drm_i915_private *i915 = container_of(power_domains,
struct drm_i915_private,
power_domains);
enum intel_display_power_domain domain;
- drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask));
+ drm_dbg(&i915->drm, "%s (%d):\n", prefix, bitmap_weight(mask->bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, mask)
drm_dbg(&i915->drm, "%s use_count %d\n",
intel_display_power_domain_str(domain),
@@ -1919,9 +396,9 @@ print_async_put_domains_state(struct i915_power_domains *power_domains)
power_domains->async_put_wakeref);
print_power_domains(power_domains, "async_put_domains[0]",
- power_domains->async_put_domains[0]);
+ &power_domains->async_put_domains[0]);
print_power_domains(power_domains, "async_put_domains[1]",
- power_domains->async_put_domains[1]);
+ &power_domains->async_put_domains[1]);
}
static void
@@ -1945,11 +422,13 @@ verify_async_put_domains_state(struct i915_power_domains *power_domains)
#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */
-static u64 async_put_domains_mask(struct i915_power_domains *power_domains)
+static void async_put_domains_mask(struct i915_power_domains *power_domains,
+ struct intel_power_domain_mask *mask)
+
{
assert_async_put_domain_masks_disjoint(power_domains);
- return __async_put_domains_mask(power_domains);
+ __async_put_domains_mask(power_domains, mask);
}
static void
@@ -1958,8 +437,8 @@ async_put_domains_clear_domain(struct i915_power_domains *power_domains,
{
assert_async_put_domain_masks_disjoint(power_domains);
- power_domains->async_put_domains[0] &= ~BIT_ULL(domain);
- power_domains->async_put_domains[1] &= ~BIT_ULL(domain);
+ clear_bit(domain, power_domains->async_put_domains[0].bits);
+ clear_bit(domain, power_domains->async_put_domains[1].bits);
}
static bool
@@ -1967,16 +446,19 @@ intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct intel_power_domain_mask async_put_mask;
bool ret = false;
- if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain)))
+ async_put_domains_mask(power_domains, &async_put_mask);
+ if (!test_bit(domain, async_put_mask.bits))
goto out_verify;
async_put_domains_clear_domain(power_domains, domain);
ret = true;
- if (async_put_domains_mask(power_domains))
+ async_put_domains_mask(power_domains, &async_put_mask);
+ if (!bitmap_empty(async_put_mask.bits, POWER_DOMAIN_NUM))
goto out_verify;
cancel_delayed_work(&power_domains->async_put_work);
@@ -1998,7 +480,7 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
if (intel_display_power_grab_async_put_ref(dev_priv, domain))
return;
- for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
+ for_each_power_domain_well(dev_priv, power_well, domain)
intel_power_well_get(dev_priv, power_well);
power_domains->domain_use_count[domain]++;
@@ -2079,20 +561,22 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
const char *name = intel_display_power_domain_str(domain);
+ struct intel_power_domain_mask async_put_mask;
power_domains = &dev_priv->power_domains;
drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
"Use count on domain %s is already zero\n",
name);
+ async_put_domains_mask(power_domains, &async_put_mask);
drm_WARN(&dev_priv->drm,
- async_put_domains_mask(power_domains) & BIT_ULL(domain),
+ test_bit(domain, async_put_mask.bits),
"Async disabling of domain %s is pending\n",
name);
power_domains->domain_use_count[domain]--;
- for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
+ for_each_power_domain_well_reverse(dev_priv, power_well, domain)
intel_power_well_put(dev_priv, power_well);
}
@@ -2121,7 +605,8 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains,
}
static void
-release_async_put_domains(struct i915_power_domains *power_domains, u64 mask)
+release_async_put_domains(struct i915_power_domains *power_domains,
+ struct intel_power_domain_mask *mask)
{
struct drm_i915_private *dev_priv =
container_of(power_domains, struct drm_i915_private,
@@ -2169,12 +654,15 @@ intel_display_power_put_async_work(struct work_struct *work)
goto out_verify;
release_async_put_domains(power_domains,
- power_domains->async_put_domains[0]);
+ &power_domains->async_put_domains[0]);
/* Requeue the work if more domains were async put meanwhile. */
- if (power_domains->async_put_domains[1]) {
- power_domains->async_put_domains[0] =
- fetch_and_zero(&power_domains->async_put_domains[1]);
+ if (!bitmap_empty(power_domains->async_put_domains[1].bits, POWER_DOMAIN_NUM)) {
+ bitmap_copy(power_domains->async_put_domains[0].bits,
+ power_domains->async_put_domains[1].bits,
+ POWER_DOMAIN_NUM);
+ bitmap_zero(power_domains->async_put_domains[1].bits,
+ POWER_DOMAIN_NUM);
queue_async_put_domains_work(power_domains,
fetch_and_zero(&new_work_wakeref));
} else {
@@ -2226,9 +714,9 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
/* Let a pending work requeue itself or queue a new one. */
if (power_domains->async_put_wakeref) {
- power_domains->async_put_domains[1] |= BIT_ULL(domain);
+ set_bit(domain, power_domains->async_put_domains[1].bits);
} else {
- power_domains->async_put_domains[0] |= BIT_ULL(domain);
+ set_bit(domain, power_domains->async_put_domains[0].bits);
queue_async_put_domains_work(power_domains,
fetch_and_zero(&work_wakeref));
}
@@ -2259,6 +747,7 @@ out_verify:
void intel_display_power_flush_work(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->power_domains;
+ struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
mutex_lock(&power_domains->lock);
@@ -2267,8 +756,8 @@ void intel_display_power_flush_work(struct drm_i915_private *i915)
if (!work_wakeref)
goto out_verify;
- release_async_put_domains(power_domains,
- async_put_domains_mask(power_domains));
+ async_put_domains_mask(power_domains, &async_put_mask);
+ release_async_put_domains(power_domains, &async_put_mask);
cancel_delayed_work(&power_domains->async_put_work);
out_verify:
@@ -2347,13 +836,13 @@ intel_display_power_get_in_set(struct drm_i915_private *i915,
{
intel_wakeref_t __maybe_unused wf;
- drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+ drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
wf = intel_display_power_get(i915, domain);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
power_domain_set->wakerefs[domain] = wf;
#endif
- power_domain_set->mask |= BIT_ULL(domain);
+ set_bit(domain, power_domain_set->mask.bits);
}
bool
@@ -2363,7 +852,7 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
{
intel_wakeref_t wf;
- drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain));
+ drm_WARN_ON(&i915->drm, test_bit(domain, power_domain_set->mask.bits));
wf = intel_display_power_get_if_enabled(i915, domain);
if (!wf)
@@ -2372,7 +861,7 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
power_domain_set->wakerefs[domain] = wf;
#endif
- power_domain_set->mask |= BIT_ULL(domain);
+ set_bit(domain, power_domain_set->mask.bits);
return true;
}
@@ -2380,11 +869,12 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
void
intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
- u64 mask)
+ struct intel_power_domain_mask *mask)
{
enum intel_display_power_domain domain;
- drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask);
+ drm_WARN_ON(&i915->drm,
+ !bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, mask) {
intel_wakeref_t __maybe_unused wf = -1;
@@ -2393,2363 +883,10 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
#endif
intel_display_power_put(i915, domain, wf);
- power_domain_set->mask &= ~BIT_ULL(domain);
+ clear_bit(domain, power_domain_set->mask.bits);
}
}
-#define I830_PIPES_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CHV_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define HSW_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define BDW_DISPLAY_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
-#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
-#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
-#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-/*
- * ICL PW_0/PG_0 domains (HW/DMC control):
- * - PCI
- * - clocks except port PLL
- * - central power except FBC
- * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
- * ICL PW_1/PG_1 domains (HW/DMC control):
- * - DBUF function
- * - PIPE_A and its planes, except VGA
- * - transcoder EDP + PSR
- * - transcoder DSI
- * - DDI_A
- * - FBC
- */
-#define ICL_PW_4_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_INIT))
- /* VDSC/joining */
-#define ICL_PW_3_POWER_DOMAINS ( \
- ICL_PW_4_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D) | \
- BIT_ULL(POWER_DOMAIN_AUX_E) | \
- BIT_ULL(POWER_DOMAIN_AUX_F) | \
- BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \
- BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_INIT))
- /*
- * - transcoder WD
- * - KVMR (HW control)
- */
-#define ICL_PW_2_POWER_DOMAINS ( \
- ICL_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
- BIT_ULL(POWER_DOMAIN_INIT))
- /*
- * - KVMR (HW control)
- */
-#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- ICL_PW_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_DC_OFF) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define ICL_DDI_IO_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
-#define ICL_DDI_IO_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
-#define ICL_DDI_IO_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
-#define ICL_DDI_IO_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO))
-#define ICL_DDI_IO_E_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO))
-#define ICL_DDI_IO_F_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO))
-
-#define ICL_AUX_A_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_A))
-#define ICL_AUX_B_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_B))
-#define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C))
-#define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_D))
-#define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_E))
-#define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_F))
-#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C_TBT))
-#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_D_TBT))
-#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_E_TBT))
-#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_F_TBT))
-
-#define TGL_PW_5_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_D) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
- BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define TGL_PW_4_POWER_DOMAINS ( \
- TGL_PW_5_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define TGL_PW_3_POWER_DOMAINS ( \
- TGL_PW_4_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define TGL_PW_2_POWER_DOMAINS ( \
- TGL_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- TGL_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
-#define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
-#define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
-#define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
-#define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5)
-#define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6)
-
-#define TGL_AUX_A_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_A))
-#define TGL_AUX_B_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_B))
-#define TGL_AUX_C_IO_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_C))
-
-#define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1)
-#define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2)
-#define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3)
-#define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4)
-#define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5)
-#define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6)
-
-#define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1)
-#define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2)
-#define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3)
-#define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4)
-#define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5)
-#define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6)
-
-#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
- BIT_ULL(POWER_DOMAIN_TC_COLD_OFF))
-
-#define RKL_PW_4_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define RKL_PW_3_POWER_DOMAINS ( \
- RKL_PW_4_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-/*
- * There is no PW_2/PG_2 on RKL.
- *
- * RKL PW_1/PG_1 domains (under HW/DMC control):
- * - DBUF function (note: registers are in PW0)
- * - PIPE_A and its planes and VDSC/joining, except VGA
- * - transcoder A
- * - DDI_A and DDI_B
- * - FBC
- *
- * RKL PW_0/PG_0 domains (under HW/DMC control):
- * - PCI
- * - clocks except port PLL
- * - shared functions:
- * * interrupts except pipe interrupts
- * * MBus except PIPE_MBUS_DBOX_CTL
- * * DBUF registers
- * - central power except FBC
- * - top-level GTC (DDI-level GTC is in the well associated with the DDI)
- */
-
-#define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- RKL_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-/*
- * DG1 onwards Audio MMIO/VERBS lies in PG0 power well.
- */
-#define DG1_PW_3_POWER_DOMAINS ( \
- TGL_PW_4_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define DG1_PW_2_POWER_DOMAINS ( \
- DG1_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define DG1_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- DG1_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-/*
- * XE_LPD Power Domains
- *
- * Previous platforms required that PG(n-1) be enabled before PG(n). That
- * dependency chain turns into a dependency tree on XE_LPD:
- *
- * PG0
- * |
- * --PG1--
- * / \
- * PGA --PG2--
- * / | \
- * PGB PGC PGD
- *
- * Power wells must be enabled from top to bottom and disabled from bottom
- * to top. This allows pipes to be power gated independently.
- */
-
-#define XELPD_PW_D_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_D) | \
- BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define XELPD_PW_C_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_C) | \
- BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define XELPD_PW_B_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_B) | \
- BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define XELPD_PW_A_POWER_DOMAINS ( \
- BIT_ULL(POWER_DOMAIN_PIPE_A) | \
- BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define XELPD_PW_2_POWER_DOMAINS ( \
- XELPD_PW_B_POWER_DOMAINS | \
- XELPD_PW_C_POWER_DOMAINS | \
- XELPD_PW_D_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \
- BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \
- BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \
- BIT_ULL(POWER_DOMAIN_AUX_C) | \
- BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) | \
- BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \
- BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
- BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-/*
- * XELPD PW_1/PG_1 domains (under HW/DMC control):
- * - DBUF function (registers are in PW0)
- * - Transcoder A
- * - DDI_A and DDI_B
- *
- * XELPD PW_0/PW_1 domains (under HW/DMC control):
- * - PCI
- * - Clocks except port PLL
- * - Shared functions:
- * * interrupts except pipe interrupts
- * * MBus except PIPE_MBUS_DBOX_CTL
- * * DBUF registers
- * - Central power except FBC
- * - Top-level GTC (DDI-level GTC is in the well associated with the DDI)
- */
-
-#define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- XELPD_PW_2_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \
- BIT_ULL(POWER_DOMAIN_MODESET) | \
- BIT_ULL(POWER_DOMAIN_AUX_A) | \
- BIT_ULL(POWER_DOMAIN_AUX_B) | \
- BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
- BIT_ULL(POWER_DOMAIN_INIT))
-
-#define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD)
-#define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_XELPD)
-#define XELPD_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1)
-#define XELPD_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2)
-#define XELPD_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3)
-#define XELPD_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4)
-
-#define XELPD_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1)
-#define XELPD_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2)
-#define XELPD_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3)
-#define XELPD_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4)
-
-#define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD)
-#define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD)
-#define XELPD_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1)
-#define XELPD_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2)
-#define XELPD_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3)
-#define XELPD_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4)
-
-static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = i9xx_always_on_power_well_noop,
- .disable = i9xx_always_on_power_well_noop,
- .is_enabled = i9xx_always_on_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_pipe_power_well_ops = {
- .sync_hw = chv_pipe_power_well_sync_hw,
- .enable = chv_pipe_power_well_enable,
- .disable = chv_pipe_power_well_disable,
- .is_enabled = chv_pipe_power_well_enabled,
-};
-
-static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = chv_dpio_cmn_power_well_enable,
- .disable = chv_dpio_cmn_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
-};
-
-static const struct i915_power_well_ops i830_pipes_power_well_ops = {
- .sync_hw = i830_pipes_power_well_sync_hw,
- .enable = i830_pipes_power_well_enable,
- .disable = i830_pipes_power_well_disable,
- .is_enabled = i830_pipes_power_well_enabled,
-};
-
-static const struct i915_power_well_desc i830_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "pipes",
- .domains = I830_PIPES_POWER_DOMAINS,
- .ops = &i830_pipes_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
-};
-
-static const struct i915_power_well_regs hsw_power_well_regs = {
- .bios = HSW_PWR_WELL_CTL1,
- .driver = HSW_PWR_WELL_CTL2,
- .kvmr = HSW_PWR_WELL_CTL3,
- .debug = HSW_PWR_WELL_CTL4,
-};
-
-static const struct i915_power_well_ops hsw_power_well_ops = {
- .regs = &hsw_power_well_regs,
- .sync_hw = hsw_power_well_sync_hw,
- .enable = hsw_power_well_enable,
- .disable = hsw_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = gen9_dc_off_power_well_enable,
- .disable = gen9_dc_off_power_well_disable,
- .is_enabled = gen9_dc_off_power_well_enabled,
-};
-
-static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = bxt_dpio_cmn_power_well_enable,
- .disable = bxt_dpio_cmn_power_well_disable,
- .is_enabled = bxt_dpio_cmn_power_well_enabled,
-};
-
-static const struct i915_power_well_desc hsw_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- .domains = HSW_DISPLAY_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = HSW_DISP_PW_GLOBAL,
- {
- .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
- .hsw.has_vga = true,
- },
- },
-};
-
-static const struct i915_power_well_desc bdw_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- .domains = BDW_DISPLAY_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = HSW_DISP_PW_GLOBAL,
- {
- .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- },
- },
-};
-
-static const struct i915_power_well_ops vlv_display_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = vlv_display_power_well_enable,
- .disable = vlv_display_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = vlv_dpio_cmn_power_well_enable,
- .disable = vlv_dpio_cmn_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
- .enable = vlv_power_well_enable,
- .disable = vlv_power_well_disable,
- .is_enabled = vlv_power_well_enabled,
-};
-
-static const struct i915_power_well_desc vlv_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- .domains = VLV_DISPLAY_POWER_DOMAINS,
- .ops = &vlv_display_power_well_ops,
- .id = VLV_DISP_PW_DISP2D,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
- },
- },
- {
- .name = "dpio-tx-b-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01,
- },
- },
- {
- .name = "dpio-tx-b-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23,
- },
- },
- {
- .name = "dpio-tx-c-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01,
- },
- },
- {
- .name = "dpio-tx-c-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23,
- },
- },
- {
- .name = "dpio-common",
- .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
- .ops = &vlv_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
- },
- },
-};
-
-static const struct i915_power_well_desc chv_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "display",
- /*
- * Pipe A power well is the new disp2d well. Pipe B and C
- * power wells don't actually exist. Pipe A power well is
- * required for any pipe to work.
- */
- .domains = CHV_DISPLAY_POWER_DOMAINS,
- .ops = &chv_pipe_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "dpio-common-bc",
- .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
- .ops = &chv_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
- },
- },
- {
- .name = "dpio-common-d",
- .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
- .ops = &chv_dpio_cmn_power_well_ops,
- .id = CHV_DISP_PW_DPIO_CMN_D,
- {
- .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
- },
- },
-};
-
-static const struct i915_power_well_desc skl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.idx = SKL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "MISC IO power well",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_MISC_IO,
- {
- .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
- },
- },
- {
- .name = "DC off",
- .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.idx = SKL_PW_CTL_IDX_PW_2,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A/E IO power well",
- .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E,
- },
- },
- {
- .name = "DDI B IO power well",
- .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO power well",
- .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
- },
- },
- {
- .name = "DDI D IO power well",
- .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
- },
- },
-};
-
-static const struct i915_power_well_desc bxt_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.idx = SKL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.idx = SKL_PW_CTL_IDX_PW_2,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "dpio-common-a",
- .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = BXT_DISP_PW_DPIO_CMN_A,
- {
- .bxt.phy = DPIO_PHY1,
- },
- },
- {
- .name = "dpio-common-bc",
- .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .bxt.phy = DPIO_PHY0,
- },
- },
-};
-
-static const struct i915_power_well_desc glk_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.idx = SKL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.idx = SKL_PW_CTL_IDX_PW_2,
- .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "dpio-common-a",
- .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = BXT_DISP_PW_DPIO_CMN_A,
- {
- .bxt.phy = DPIO_PHY1,
- },
- },
- {
- .name = "dpio-common-b",
- .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = VLV_DISP_PW_DPIO_CMN_BC,
- {
- .bxt.phy = DPIO_PHY0,
- },
- },
- {
- .name = "dpio-common-c",
- .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
- .ops = &bxt_dpio_cmn_power_well_ops,
- .id = GLK_DISP_PW_DPIO_CMN_C,
- {
- .bxt.phy = DPIO_PHY2,
- },
- },
- {
- .name = "AUX A",
- .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX C",
- .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
- },
- },
- {
- .name = "DDI A IO power well",
- .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
- },
- },
- {
- .name = "DDI B IO power well",
- .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO power well",
- .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
- },
- },
-};
-
-static const struct i915_power_well_regs icl_aux_power_well_regs = {
- .bios = ICL_PWR_WELL_CTL_AUX1,
- .driver = ICL_PWR_WELL_CTL_AUX2,
- .debug = ICL_PWR_WELL_CTL_AUX4,
-};
-
-static const struct i915_power_well_ops icl_aux_power_well_ops = {
- .regs = &icl_aux_power_well_regs,
- .sync_hw = hsw_power_well_sync_hw,
- .enable = icl_aux_power_well_enable,
- .disable = icl_aux_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_regs icl_ddi_power_well_regs = {
- .bios = ICL_PWR_WELL_CTL_DDI1,
- .driver = ICL_PWR_WELL_CTL_DDI2,
- .debug = ICL_PWR_WELL_CTL_DDI4,
-};
-
-static const struct i915_power_well_ops icl_ddi_power_well_ops = {
- .regs = &icl_ddi_power_well_regs,
- .sync_hw = hsw_power_well_sync_hw,
- .enable = hsw_power_well_enable,
- .disable = hsw_power_well_disable,
- .is_enabled = hsw_power_well_enabled,
-};
-
-static const struct i915_power_well_desc icl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = ICL_PW_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_2,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well 3",
- .domains = ICL_PW_3_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_3,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_3,
- .hsw.irq_pipe_mask = BIT(PIPE_B),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A IO",
- .domains = ICL_DDI_IO_A_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
- },
- },
- {
- .name = "DDI B IO",
- .domains = ICL_DDI_IO_B_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
- },
- },
- {
- .name = "DDI C IO",
- .domains = ICL_DDI_IO_C_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
- },
- },
- {
- .name = "DDI D IO",
- .domains = ICL_DDI_IO_D_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_D,
- },
- },
- {
- .name = "DDI E IO",
- .domains = ICL_DDI_IO_E_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_E,
- },
- },
- {
- .name = "DDI F IO",
- .domains = ICL_DDI_IO_F_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_F,
- },
- },
- {
- .name = "AUX A",
- .domains = ICL_AUX_A_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = ICL_AUX_B_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX C TC1",
- .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX D TC2",
- .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_D,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX E TC3",
- .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_E,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX F TC4",
- .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_F,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX C TBT1",
- .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX D TBT2",
- .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX E TBT3",
- .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX F TBT4",
- .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "power well 4",
- .domains = ICL_PW_4_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_4,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_C),
- },
- },
-};
-
-static void
-tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
-{
- u8 tries = 0;
- int ret;
-
- while (1) {
- u32 low_val;
- u32 high_val = 0;
-
- if (block)
- low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
- else
- low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
-
- /*
- * Spec states that we should timeout the request after 200us
- * but the function below will timeout after 500us
- */
- ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val);
- if (ret == 0) {
- if (block &&
- (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
- ret = -EIO;
- else
- break;
- }
-
- if (++tries == 3)
- break;
-
- msleep(1);
- }
-
- if (ret)
- drm_err(&i915->drm, "TC cold %sblock failed\n",
- block ? "" : "un");
- else
- drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
- block ? "" : "un");
-}
-
-static void
-tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
- struct i915_power_well *power_well)
-{
- tgl_tc_cold_request(i915, true);
-}
-
-static void
-tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
- struct i915_power_well *power_well)
-{
- tgl_tc_cold_request(i915, false);
-}
-
-static void
-tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
- struct i915_power_well *power_well)
-{
- if (intel_power_well_refcount(power_well) > 0)
- tgl_tc_cold_off_power_well_enable(i915, power_well);
- else
- tgl_tc_cold_off_power_well_disable(i915, power_well);
-}
-
-static bool
-tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- /*
- * Not the correctly implementation but there is no way to just read it
- * from PCODE, so returning count to avoid state mismatch errors
- */
- return intel_power_well_refcount(power_well);
-}
-
-static const struct i915_power_well_ops tgl_tc_cold_off_ops = {
- .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
- .enable = tgl_tc_cold_off_power_well_enable,
- .disable = tgl_tc_cold_off_power_well_disable,
- .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
-};
-
-static const struct i915_power_well_desc tgl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = TGL_PW_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_2,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well 3",
- .domains = TGL_PW_3_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_3,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_3,
- .hsw.irq_pipe_mask = BIT(PIPE_B),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A IO",
- .domains = ICL_DDI_IO_A_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
- }
- },
- {
- .name = "DDI B IO",
- .domains = ICL_DDI_IO_B_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
- }
- },
- {
- .name = "DDI C IO",
- .domains = ICL_DDI_IO_C_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
- }
- },
- {
- .name = "DDI IO TC1",
- .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
- },
- },
- {
- .name = "DDI IO TC2",
- .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
- },
- },
- {
- .name = "DDI IO TC3",
- .domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
- },
- },
- {
- .name = "DDI IO TC4",
- .domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
- },
- },
- {
- .name = "DDI IO TC5",
- .domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
- },
- },
- {
- .name = "DDI IO TC6",
- .domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
- },
- },
- {
- .name = "TC cold off",
- .domains = TGL_TC_COLD_OFF_POWER_DOMAINS,
- .ops = &tgl_tc_cold_off_ops,
- .id = TGL_DISP_PW_TC_COLD_OFF,
- },
- {
- .name = "AUX A",
- .domains = TGL_AUX_A_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = TGL_AUX_B_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX C",
- .domains = TGL_AUX_C_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
- },
- },
- {
- .name = "AUX USBC1",
- .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX USBC2",
- .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX USBC3",
- .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX USBC4",
- .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX USBC5",
- .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX USBC6",
- .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX TBT1",
- .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT2",
- .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT3",
- .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT4",
- .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT5",
- .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT6",
- .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "power well 4",
- .domains = TGL_PW_4_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_4,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_C),
- }
- },
- {
- .name = "power well 5",
- .domains = TGL_PW_5_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_PW_5,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_D),
- },
- },
-};
-
-static const struct i915_power_well_desc rkl_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 3",
- .domains = RKL_PW_3_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_3,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_3,
- .hsw.irq_pipe_mask = BIT(PIPE_B),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well 4",
- .domains = RKL_PW_4_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_4,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_C),
- }
- },
- {
- .name = "DDI A IO",
- .domains = ICL_DDI_IO_A_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
- }
- },
- {
- .name = "DDI B IO",
- .domains = ICL_DDI_IO_B_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
- }
- },
- {
- .name = "DDI IO TC1",
- .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
- },
- },
- {
- .name = "DDI IO TC2",
- .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
- },
- },
- {
- .name = "AUX A",
- .domains = ICL_AUX_A_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = ICL_AUX_B_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX USBC1",
- .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
- },
- },
- {
- .name = "AUX USBC2",
- .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
- },
- },
-};
-
-static const struct i915_power_well_desc dg1_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = DG1_PW_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_2,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well 3",
- .domains = DG1_PW_3_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = ICL_DISP_PW_3,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_3,
- .hsw.irq_pipe_mask = BIT(PIPE_B),
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A IO",
- .domains = ICL_DDI_IO_A_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
- }
- },
- {
- .name = "DDI B IO",
- .domains = ICL_DDI_IO_B_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
- }
- },
- {
- .name = "DDI IO TC1",
- .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
- },
- },
- {
- .name = "DDI IO TC2",
- .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
- },
- },
- {
- .name = "AUX A",
- .domains = TGL_AUX_A_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
- },
- },
- {
- .name = "AUX B",
- .domains = TGL_AUX_B_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
- },
- },
- {
- .name = "AUX USBC1",
- .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "AUX USBC2",
- .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
- .hsw.is_tc_tbt = false,
- },
- },
- {
- .name = "power well 4",
- .domains = TGL_PW_4_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_4,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_C),
- }
- },
- {
- .name = "power well 5",
- .domains = TGL_PW_5_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_PW_5,
- .hsw.has_fuses = true,
- .hsw.irq_pipe_mask = BIT(PIPE_D),
- },
- },
-};
-
-static const struct i915_power_well_desc xelpd_power_wells[] = {
- {
- .name = "always-on",
- .always_on = true,
- .domains = POWER_DOMAIN_MASK,
- .ops = &i9xx_always_on_power_well_ops,
- .id = DISP_PW_ID_NONE,
- },
- {
- .name = "power well 1",
- /* Handled by the DMC firmware */
- .always_on = true,
- .domains = 0,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_1,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_1,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DC off",
- .domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS,
- .ops = &gen9_dc_off_power_well_ops,
- .id = SKL_DISP_DC_OFF,
- },
- {
- .name = "power well 2",
- .domains = XELPD_PW_2_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = SKL_DISP_PW_2,
- {
- .hsw.idx = ICL_PW_CTL_IDX_PW_2,
- .hsw.has_vga = true,
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well A",
- .domains = XELPD_PW_A_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = XELPD_PW_CTL_IDX_PW_A,
- .hsw.irq_pipe_mask = BIT(PIPE_A),
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well B",
- .domains = XELPD_PW_B_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = XELPD_PW_CTL_IDX_PW_B,
- .hsw.irq_pipe_mask = BIT(PIPE_B),
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well C",
- .domains = XELPD_PW_C_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = XELPD_PW_CTL_IDX_PW_C,
- .hsw.irq_pipe_mask = BIT(PIPE_C),
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "power well D",
- .domains = XELPD_PW_D_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = XELPD_PW_CTL_IDX_PW_D,
- .hsw.irq_pipe_mask = BIT(PIPE_D),
- .hsw.has_fuses = true,
- },
- },
- {
- .name = "DDI A IO",
- .domains = ICL_DDI_IO_A_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
- }
- },
- {
- .name = "DDI B IO",
- .domains = ICL_DDI_IO_B_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
- }
- },
- {
- .name = "DDI C IO",
- .domains = ICL_DDI_IO_C_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
- }
- },
- {
- .name = "DDI IO D_XELPD",
- .domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = XELPD_PW_CTL_IDX_DDI_D,
- }
- },
- {
- .name = "DDI IO E_XELPD",
- .domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = XELPD_PW_CTL_IDX_DDI_E,
- }
- },
- {
- .name = "DDI IO TC1",
- .domains = XELPD_DDI_IO_TC1_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
- }
- },
- {
- .name = "DDI IO TC2",
- .domains = XELPD_DDI_IO_TC2_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
- }
- },
- {
- .name = "DDI IO TC3",
- .domains = XELPD_DDI_IO_TC3_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
- }
- },
- {
- .name = "DDI IO TC4",
- .domains = XELPD_DDI_IO_TC4_POWER_DOMAINS,
- .ops = &icl_ddi_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
- }
- },
- {
- .name = "AUX A",
- .domains = ICL_AUX_A_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
- .hsw.fixed_enable_delay = 600,
- },
- },
- {
- .name = "AUX B",
- .domains = ICL_AUX_B_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
- .hsw.fixed_enable_delay = 600,
- },
- },
- {
- .name = "AUX C",
- .domains = TGL_AUX_C_IO_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
- .hsw.fixed_enable_delay = 600,
- },
- },
- {
- .name = "AUX D_XELPD",
- .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = XELPD_PW_CTL_IDX_AUX_D,
- .hsw.fixed_enable_delay = 600,
- },
- },
- {
- .name = "AUX E_XELPD",
- .domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = XELPD_PW_CTL_IDX_AUX_E,
- },
- },
- {
- .name = "AUX USBC1",
- .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
- .hsw.fixed_enable_delay = 600,
- },
- },
- {
- .name = "AUX USBC2",
- .domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
- },
- },
- {
- .name = "AUX USBC3",
- .domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
- },
- },
- {
- .name = "AUX USBC4",
- .domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
- },
- },
- {
- .name = "AUX TBT1",
- .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT2",
- .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT3",
- .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
- .hsw.is_tc_tbt = true,
- },
- },
- {
- .name = "AUX TBT4",
- .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS,
- .ops = &icl_aux_power_well_ops,
- .id = DISP_PW_ID_NONE,
- {
- .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
- .hsw.is_tc_tbt = true,
- },
- },
-};
-
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
@@ -4770,7 +907,9 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
if (!HAS_DISPLAY(dev_priv))
return 0;
- if (IS_DG1(dev_priv))
+ if (IS_DG2(dev_priv))
+ max_dc = 0;
+ else if (IS_DG1(dev_priv))
max_dc = 3;
else if (DISPLAY_VER(dev_priv) >= 12)
max_dc = 4;
@@ -4828,57 +967,6 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
return mask;
}
-static int
-__set_power_wells(struct i915_power_domains *power_domains,
- const struct i915_power_well_desc *power_well_descs,
- int power_well_descs_sz, u64 skip_mask)
-{
- struct drm_i915_private *i915 = container_of(power_domains,
- struct drm_i915_private,
- power_domains);
- u64 power_well_ids = 0;
- int power_well_count = 0;
- int i, plt_idx = 0;
-
- for (i = 0; i < power_well_descs_sz; i++)
- if (!(BIT_ULL(power_well_descs[i].id) & skip_mask))
- power_well_count++;
-
- power_domains->power_well_count = power_well_count;
- power_domains->power_wells =
- kcalloc(power_well_count,
- sizeof(*power_domains->power_wells),
- GFP_KERNEL);
- if (!power_domains->power_wells)
- return -ENOMEM;
-
- for (i = 0; i < power_well_descs_sz; i++) {
- enum i915_power_well_id id = power_well_descs[i].id;
-
- if (BIT_ULL(id) & skip_mask)
- continue;
-
- power_domains->power_wells[plt_idx++].desc =
- &power_well_descs[i];
-
- if (id == DISP_PW_ID_NONE)
- continue;
-
- drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
- drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
- power_well_ids |= BIT_ULL(id);
- }
-
- return 0;
-}
-
-#define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \
- __set_power_wells(power_domains, __power_well_descs, \
- ARRAY_SIZE(__power_well_descs), skip_mask)
-
-#define set_power_wells(power_domains, __power_well_descs) \
- set_power_wells_mask(power_domains, __power_well_descs, 0)
-
/**
* intel_power_domains_init - initializes the power domain structures
* @dev_priv: i915 device instance
@@ -4889,7 +977,6 @@ __set_power_wells(struct i915_power_domains *power_domains,
int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
- int err;
dev_priv->params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
@@ -4900,54 +987,12 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
dev_priv->dmc.target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
- BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
-
mutex_init(&power_domains->lock);
INIT_DELAYED_WORK(&power_domains->async_put_work,
intel_display_power_put_async_work);
- /*
- * The enabling order will be from lower to higher indexed wells,
- * the disabling order is reversed.
- */
- if (!HAS_DISPLAY(dev_priv)) {
- power_domains->power_well_count = 0;
- err = 0;
- } else if (DISPLAY_VER(dev_priv) >= 13) {
- err = set_power_wells(power_domains, xelpd_power_wells);
- } else if (IS_DG1(dev_priv)) {
- err = set_power_wells(power_domains, dg1_power_wells);
- } else if (IS_ALDERLAKE_S(dev_priv)) {
- err = set_power_wells_mask(power_domains, tgl_power_wells,
- BIT_ULL(TGL_DISP_PW_TC_COLD_OFF));
- } else if (IS_ROCKETLAKE(dev_priv)) {
- err = set_power_wells(power_domains, rkl_power_wells);
- } else if (DISPLAY_VER(dev_priv) == 12) {
- err = set_power_wells(power_domains, tgl_power_wells);
- } else if (DISPLAY_VER(dev_priv) == 11) {
- err = set_power_wells(power_domains, icl_power_wells);
- } else if (IS_GEMINILAKE(dev_priv)) {
- err = set_power_wells(power_domains, glk_power_wells);
- } else if (IS_BROXTON(dev_priv)) {
- err = set_power_wells(power_domains, bxt_power_wells);
- } else if (DISPLAY_VER(dev_priv) == 9) {
- err = set_power_wells(power_domains, skl_power_wells);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- err = set_power_wells(power_domains, chv_power_wells);
- } else if (IS_BROADWELL(dev_priv)) {
- err = set_power_wells(power_domains, bdw_power_wells);
- } else if (IS_HASWELL(dev_priv)) {
- err = set_power_wells(power_domains, hsw_power_wells);
- } else if (IS_VALLEYVIEW(dev_priv)) {
- err = set_power_wells(power_domains, vlv_power_wells);
- } else if (IS_I830(dev_priv)) {
- err = set_power_wells(power_domains, i830_power_wells);
- } else {
- err = set_power_wells(power_domains, i9xx_always_on_power_well);
- }
-
- return err;
+ return intel_display_power_map_init(power_domains);
}
/**
@@ -4958,7 +1003,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv)
{
- kfree(dev_priv->power_domains.power_wells);
+ intel_display_power_map_cleanup(&dev_priv->power_domains);
}
static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
@@ -5151,7 +1196,7 @@ static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
{
if (IS_HASWELL(dev_priv)) {
- if (snb_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
+ if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val))
drm_dbg_kms(&dev_priv->drm,
"Failed to write to D_COMP\n");
} else {
@@ -6229,3 +2274,209 @@ void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m
mutex_unlock(&power_domains->lock);
}
+
+struct intel_ddi_port_domains {
+ enum port port_start;
+ enum port port_end;
+ enum aux_ch aux_ch_start;
+ enum aux_ch aux_ch_end;
+
+ enum intel_display_power_domain ddi_lanes;
+ enum intel_display_power_domain ddi_io;
+ enum intel_display_power_domain aux_legacy_usbc;
+ enum intel_display_power_domain aux_tbt;
+};
+
+static const struct intel_ddi_port_domains
+i9xx_port_domains[] = {
+ {
+ .port_start = PORT_A,
+ .port_end = PORT_F,
+ .aux_ch_start = AUX_CH_A,
+ .aux_ch_end = AUX_CH_F,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
+ .aux_tbt = POWER_DOMAIN_INVALID,
+ },
+};
+
+static const struct intel_ddi_port_domains
+d11_port_domains[] = {
+ {
+ .port_start = PORT_A,
+ .port_end = PORT_B,
+ .aux_ch_start = AUX_CH_A,
+ .aux_ch_end = AUX_CH_B,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
+ .aux_tbt = POWER_DOMAIN_INVALID,
+ }, {
+ .port_start = PORT_C,
+ .port_end = PORT_F,
+ .aux_ch_start = AUX_CH_C,
+ .aux_ch_end = AUX_CH_F,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_C,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_C,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_C,
+ .aux_tbt = POWER_DOMAIN_AUX_TBT1,
+ },
+};
+
+static const struct intel_ddi_port_domains
+d12_port_domains[] = {
+ {
+ .port_start = PORT_A,
+ .port_end = PORT_C,
+ .aux_ch_start = AUX_CH_A,
+ .aux_ch_end = AUX_CH_C,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
+ .aux_tbt = POWER_DOMAIN_INVALID,
+ }, {
+ .port_start = PORT_TC1,
+ .port_end = PORT_TC6,
+ .aux_ch_start = AUX_CH_USBC1,
+ .aux_ch_end = AUX_CH_USBC6,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
+ .aux_tbt = POWER_DOMAIN_AUX_TBT1,
+ },
+};
+
+static const struct intel_ddi_port_domains
+d13_port_domains[] = {
+ {
+ .port_start = PORT_A,
+ .port_end = PORT_C,
+ .aux_ch_start = AUX_CH_A,
+ .aux_ch_end = AUX_CH_C,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_A,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_A,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_A,
+ .aux_tbt = POWER_DOMAIN_INVALID,
+ }, {
+ .port_start = PORT_TC1,
+ .port_end = PORT_TC4,
+ .aux_ch_start = AUX_CH_USBC1,
+ .aux_ch_end = AUX_CH_USBC4,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_TC1,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_TC1,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_USBC1,
+ .aux_tbt = POWER_DOMAIN_AUX_TBT1,
+ }, {
+ .port_start = PORT_D_XELPD,
+ .port_end = PORT_E_XELPD,
+ .aux_ch_start = AUX_CH_D_XELPD,
+ .aux_ch_end = AUX_CH_E_XELPD,
+
+ .ddi_lanes = POWER_DOMAIN_PORT_DDI_LANES_D,
+ .ddi_io = POWER_DOMAIN_PORT_DDI_IO_D,
+ .aux_legacy_usbc = POWER_DOMAIN_AUX_D,
+ .aux_tbt = POWER_DOMAIN_INVALID,
+ },
+};
+
+static void
+intel_port_domains_for_platform(struct drm_i915_private *i915,
+ const struct intel_ddi_port_domains **domains,
+ int *domains_size)
+{
+ if (DISPLAY_VER(i915) >= 13) {
+ *domains = d13_port_domains;
+ *domains_size = ARRAY_SIZE(d13_port_domains);
+ } else if (DISPLAY_VER(i915) >= 12) {
+ *domains = d12_port_domains;
+ *domains_size = ARRAY_SIZE(d12_port_domains);
+ } else if (DISPLAY_VER(i915) >= 11) {
+ *domains = d11_port_domains;
+ *domains_size = ARRAY_SIZE(d11_port_domains);
+ } else {
+ *domains = i9xx_port_domains;
+ *domains_size = ARRAY_SIZE(i9xx_port_domains);
+ }
+}
+
+static const struct intel_ddi_port_domains *
+intel_port_domains_for_port(struct drm_i915_private *i915, enum port port)
+{
+ const struct intel_ddi_port_domains *domains;
+ int domains_size;
+ int i;
+
+ intel_port_domains_for_platform(i915, &domains, &domains_size);
+ for (i = 0; i < domains_size; i++)
+ if (port >= domains[i].port_start && port <= domains[i].port_end)
+ return &domains[i];
+
+ return NULL;
+}
+
+enum intel_display_power_domain
+intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port)
+{
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
+
+ if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_io == POWER_DOMAIN_INVALID)
+ return POWER_DOMAIN_PORT_DDI_IO_A;
+
+ return domains->ddi_io + (int)(port - domains->port_start);
+}
+
+enum intel_display_power_domain
+intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port)
+{
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_port(i915, port);
+
+ if (drm_WARN_ON(&i915->drm, !domains) || domains->ddi_lanes == POWER_DOMAIN_INVALID)
+ return POWER_DOMAIN_PORT_DDI_LANES_A;
+
+ return domains->ddi_lanes + (int)(port - domains->port_start);
+}
+
+static const struct intel_ddi_port_domains *
+intel_port_domains_for_aux_ch(struct drm_i915_private *i915, enum aux_ch aux_ch)
+{
+ const struct intel_ddi_port_domains *domains;
+ int domains_size;
+ int i;
+
+ intel_port_domains_for_platform(i915, &domains, &domains_size);
+ for (i = 0; i < domains_size; i++)
+ if (aux_ch >= domains[i].aux_ch_start && aux_ch <= domains[i].aux_ch_end)
+ return &domains[i];
+
+ return NULL;
+}
+
+enum intel_display_power_domain
+intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
+{
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
+
+ if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_legacy_usbc == POWER_DOMAIN_INVALID)
+ return POWER_DOMAIN_AUX_A;
+
+ return domains->aux_legacy_usbc + (int)(aux_ch - domains->aux_ch_start);
+}
+
+enum intel_display_power_domain
+intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch)
+{
+ const struct intel_ddi_port_domains *domains = intel_port_domains_for_aux_ch(i915, aux_ch);
+
+ if (drm_WARN_ON(&i915->drm, !domains) || domains->aux_tbt == POWER_DOMAIN_INVALID)
+ return POWER_DOMAIN_AUX_TBT1;
+
+ return domains->aux_tbt + (int)(aux_ch - domains->aux_ch_start);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index ced384b0a165..7136ea3f233e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -8,8 +8,10 @@
#include "intel_runtime_pm.h"
+enum aux_ch;
enum dpio_channel;
enum dpio_phy;
+enum port;
struct drm_i915_private;
struct i915_power_well;
struct intel_encoder;
@@ -25,10 +27,10 @@ enum intel_display_power_domain {
POWER_DOMAIN_PIPE_B,
POWER_DOMAIN_PIPE_C,
POWER_DOMAIN_PIPE_D,
- POWER_DOMAIN_PIPE_A_PANEL_FITTER,
- POWER_DOMAIN_PIPE_B_PANEL_FITTER,
- POWER_DOMAIN_PIPE_C_PANEL_FITTER,
- POWER_DOMAIN_PIPE_D_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_D,
POWER_DOMAIN_TRANSCODER_A,
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
@@ -40,46 +42,34 @@ enum intel_display_power_domain {
/* VDSC/joining for eDP/DSI transcoder (ICL) or pipe A (TGL) */
POWER_DOMAIN_TRANSCODER_VDSC_PW2,
- POWER_DOMAIN_PORT_DDI_A_LANES,
- POWER_DOMAIN_PORT_DDI_B_LANES,
- POWER_DOMAIN_PORT_DDI_C_LANES,
- POWER_DOMAIN_PORT_DDI_D_LANES,
- POWER_DOMAIN_PORT_DDI_E_LANES,
- POWER_DOMAIN_PORT_DDI_F_LANES,
- POWER_DOMAIN_PORT_DDI_G_LANES,
- POWER_DOMAIN_PORT_DDI_H_LANES,
- POWER_DOMAIN_PORT_DDI_I_LANES,
-
- POWER_DOMAIN_PORT_DDI_LANES_TC1 = POWER_DOMAIN_PORT_DDI_D_LANES, /* tgl+ */
+ POWER_DOMAIN_PORT_DDI_LANES_A,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_DDI_LANES_D,
+ POWER_DOMAIN_PORT_DDI_LANES_E,
+ POWER_DOMAIN_PORT_DDI_LANES_F,
+
+ POWER_DOMAIN_PORT_DDI_LANES_TC1,
POWER_DOMAIN_PORT_DDI_LANES_TC2,
POWER_DOMAIN_PORT_DDI_LANES_TC3,
POWER_DOMAIN_PORT_DDI_LANES_TC4,
POWER_DOMAIN_PORT_DDI_LANES_TC5,
POWER_DOMAIN_PORT_DDI_LANES_TC6,
- POWER_DOMAIN_PORT_DDI_LANES_D_XELPD = POWER_DOMAIN_PORT_DDI_LANES_TC5, /* XELPD */
- POWER_DOMAIN_PORT_DDI_LANES_E_XELPD,
-
- POWER_DOMAIN_PORT_DDI_A_IO,
- POWER_DOMAIN_PORT_DDI_B_IO,
- POWER_DOMAIN_PORT_DDI_C_IO,
- POWER_DOMAIN_PORT_DDI_D_IO,
- POWER_DOMAIN_PORT_DDI_E_IO,
- POWER_DOMAIN_PORT_DDI_F_IO,
- POWER_DOMAIN_PORT_DDI_G_IO,
- POWER_DOMAIN_PORT_DDI_H_IO,
- POWER_DOMAIN_PORT_DDI_I_IO,
+ POWER_DOMAIN_PORT_DDI_IO_A,
+ POWER_DOMAIN_PORT_DDI_IO_B,
+ POWER_DOMAIN_PORT_DDI_IO_C,
+ POWER_DOMAIN_PORT_DDI_IO_D,
+ POWER_DOMAIN_PORT_DDI_IO_E,
+ POWER_DOMAIN_PORT_DDI_IO_F,
- POWER_DOMAIN_PORT_DDI_IO_TC1 = POWER_DOMAIN_PORT_DDI_D_IO, /* tgl+ */
+ POWER_DOMAIN_PORT_DDI_IO_TC1,
POWER_DOMAIN_PORT_DDI_IO_TC2,
POWER_DOMAIN_PORT_DDI_IO_TC3,
POWER_DOMAIN_PORT_DDI_IO_TC4,
POWER_DOMAIN_PORT_DDI_IO_TC5,
POWER_DOMAIN_PORT_DDI_IO_TC6,
- POWER_DOMAIN_PORT_DDI_IO_D_XELPD = POWER_DOMAIN_PORT_DDI_IO_TC5, /* XELPD */
- POWER_DOMAIN_PORT_DDI_IO_E_XELPD,
-
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@@ -92,30 +82,17 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_D,
POWER_DOMAIN_AUX_E,
POWER_DOMAIN_AUX_F,
- POWER_DOMAIN_AUX_G,
- POWER_DOMAIN_AUX_H,
- POWER_DOMAIN_AUX_I,
- POWER_DOMAIN_AUX_USBC1 = POWER_DOMAIN_AUX_D, /* tgl+ */
+ POWER_DOMAIN_AUX_USBC1,
POWER_DOMAIN_AUX_USBC2,
POWER_DOMAIN_AUX_USBC3,
POWER_DOMAIN_AUX_USBC4,
POWER_DOMAIN_AUX_USBC5,
POWER_DOMAIN_AUX_USBC6,
- POWER_DOMAIN_AUX_D_XELPD = POWER_DOMAIN_AUX_USBC5, /* XELPD */
- POWER_DOMAIN_AUX_E_XELPD,
-
POWER_DOMAIN_AUX_IO_A,
- POWER_DOMAIN_AUX_C_TBT,
- POWER_DOMAIN_AUX_D_TBT,
- POWER_DOMAIN_AUX_E_TBT,
- POWER_DOMAIN_AUX_F_TBT,
- POWER_DOMAIN_AUX_G_TBT,
- POWER_DOMAIN_AUX_H_TBT,
- POWER_DOMAIN_AUX_I_TBT,
-
- POWER_DOMAIN_AUX_TBT1 = POWER_DOMAIN_AUX_D_TBT, /* tgl+ */
+
+ POWER_DOMAIN_AUX_TBT1,
POWER_DOMAIN_AUX_TBT2,
POWER_DOMAIN_AUX_TBT3,
POWER_DOMAIN_AUX_TBT4,
@@ -130,15 +107,20 @@ enum intel_display_power_domain {
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
+ POWER_DOMAIN_INVALID = POWER_DOMAIN_NUM,
};
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
- ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
+ ((pipe) + POWER_DOMAIN_PIPE_PANEL_FITTER_A)
#define POWER_DOMAIN_TRANSCODER(tran) \
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
(tran) + POWER_DOMAIN_TRANSCODER_A)
+struct intel_power_domain_mask {
+ DECLARE_BITMAP(bits, POWER_DOMAIN_NUM);
+};
+
struct i915_power_domains {
/*
* Power wells needed for initialization at driver init and suspend
@@ -156,41 +138,21 @@ struct i915_power_domains {
struct delayed_work async_put_work;
intel_wakeref_t async_put_wakeref;
- u64 async_put_domains[2];
+ struct intel_power_domain_mask async_put_domains[2];
struct i915_power_well *power_wells;
};
struct intel_display_power_domain_set {
- u64 mask;
+ struct intel_power_domain_mask mask;
#ifdef CONFIG_DRM_I915_DEBUG_RUNTIME_PM
intel_wakeref_t wakerefs[POWER_DOMAIN_NUM];
#endif
};
-#define for_each_power_domain(domain, mask) \
- for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
- for_each_if(BIT_ULL(domain) & (mask))
-
-#define for_each_power_well(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
- (__power_well) - (__dev_priv)->power_domains.power_wells < \
- (__dev_priv)->power_domains.power_well_count; \
- (__power_well)++)
-
-#define for_each_power_well_reverse(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
- (__dev_priv)->power_domains.power_well_count - 1; \
- (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
- (__power_well)--)
-
-#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well(__dev_priv, __power_well) \
- for_each_if((__power_well)->desc->domains & (__domain_mask))
-
-#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well_reverse(__dev_priv, __power_well) \
- for_each_if((__power_well)->desc->domains & (__domain_mask))
+#define for_each_power_domain(__domain, __mask) \
+ for ((__domain) = 0; (__domain) < POWER_DOMAIN_NUM; (__domain)++) \
+ for_each_if(test_bit((__domain), (__mask)->bits))
int intel_power_domains_init(struct drm_i915_private *dev_priv);
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
@@ -271,17 +233,26 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
void
intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set,
- u64 mask);
+ struct intel_power_domain_mask *mask);
static inline void
intel_display_power_put_all_in_set(struct drm_i915_private *i915,
struct intel_display_power_domain_set *power_domain_set)
{
- intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask);
+ intel_display_power_put_mask_in_set(i915, power_domain_set, &power_domain_set->mask);
}
void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m);
+enum intel_display_power_domain
+intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port);
+enum intel_display_power_domain
+intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port);
+enum intel_display_power_domain
+intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch);
+enum intel_display_power_domain
+intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch);
+
/*
* FIXME: We should probably switch this to a 0-based scheme to be consistent
* with how we now name/number DBUF_CTL instances.
@@ -305,9 +276,4 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
for ((wf) = intel_display_power_get_if_enabled((i915), (domain)); (wf); \
intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
-void chv_phy_powergate_lanes(struct intel_encoder *encoder,
- bool override, unsigned int mask);
-bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
- enum dpio_channel ch, bool override);
-
#endif /* __INTEL_DISPLAY_POWER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
new file mode 100644
index 000000000000..97b367f39f35
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -0,0 +1,1501 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+
+#include "vlv_sideband_reg.h"
+
+#include "intel_display_power_map.h"
+#include "intel_display_power_well.h"
+
+#define __LIST_INLINE_ELEMS(__elem_type, ...) \
+ ((__elem_type[]) { __VA_ARGS__ })
+
+#define __LIST(__elems) { \
+ .list = __elems, \
+ .count = ARRAY_SIZE(__elems), \
+}
+
+#define I915_PW_DOMAINS(...) \
+ (const struct i915_power_domain_list) \
+ __LIST(__LIST_INLINE_ELEMS(const enum intel_display_power_domain, __VA_ARGS__))
+
+#define I915_DECL_PW_DOMAINS(__name, ...) \
+ static const struct i915_power_domain_list __name = I915_PW_DOMAINS(__VA_ARGS__)
+
+/* Zero-length list assigns all power domains, a NULL list assigns none. */
+#define I915_PW_DOMAINS_NONE NULL
+#define I915_PW_DOMAINS_ALL /* zero-length list */
+
+#define I915_PW_INSTANCES(...) \
+ (const struct i915_power_well_instance_list) \
+ __LIST(__LIST_INLINE_ELEMS(const struct i915_power_well_instance, __VA_ARGS__))
+
+#define I915_PW(_name, _domain_list, ...) \
+ { .name = _name, .domain_list = _domain_list, ## __VA_ARGS__ }
+
+
+struct i915_power_well_desc_list {
+ const struct i915_power_well_desc *list;
+ u8 count;
+};
+
+#define I915_PW_DESCRIPTORS(x) __LIST(x)
+
+
+I915_DECL_PW_DOMAINS(i9xx_pwdoms_always_on, I915_PW_DOMAINS_ALL);
+
+static const struct i915_power_well_desc i9xx_power_wells_always_on[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("always-on", &i9xx_pwdoms_always_on),
+ ),
+ .ops = &i9xx_always_on_power_well_ops,
+ .always_on = true,
+ },
+};
+
+static const struct i915_power_well_desc_list i9xx_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+};
+
+I915_DECL_PW_DOMAINS(i830_pwdoms_pipes,
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc i830_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("pipes", &i830_pwdoms_pipes),
+ ),
+ .ops = &i830_pipes_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list i830_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(i830_power_wells_main),
+};
+
+I915_DECL_PW_DOMAINS(hsw_pwdoms_display,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_DDI_LANES_D,
+ POWER_DOMAIN_PORT_CRT, /* DDI E */
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc hsw_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("display", &hsw_pwdoms_display,
+ .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
+ .id = HSW_DISP_PW_GLOBAL),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ },
+};
+
+static const struct i915_power_well_desc_list hsw_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(hsw_power_wells_main),
+};
+
+I915_DECL_PW_DOMAINS(bdw_pwdoms_display,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_DDI_LANES_D,
+ POWER_DOMAIN_PORT_CRT, /* DDI E */
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc bdw_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("display", &bdw_pwdoms_display,
+ .hsw.idx = HSW_PW_CTL_IDX_GLOBAL,
+ .id = HSW_DISP_PW_GLOBAL),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ },
+};
+
+static const struct i915_power_well_desc_list bdw_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(bdw_power_wells_main),
+};
+
+I915_DECL_PW_DOMAINS(vlv_pwdoms_display,
+ POWER_DOMAIN_DISPLAY_CORE,
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(vlv_pwdoms_dpio_cmn_bc,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(vlv_pwdoms_dpio_tx_bc_lanes,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc vlv_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("display", &vlv_pwdoms_display,
+ .vlv.idx = PUNIT_PWGT_IDX_DISP2D,
+ .id = VLV_DISP_PW_DISP2D),
+ ),
+ .ops = &vlv_display_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("dpio-tx-b-01", &vlv_pwdoms_dpio_tx_bc_lanes,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01),
+ I915_PW("dpio-tx-b-23", &vlv_pwdoms_dpio_tx_bc_lanes,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23),
+ I915_PW("dpio-tx-c-01", &vlv_pwdoms_dpio_tx_bc_lanes,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01),
+ I915_PW("dpio-tx-c-23", &vlv_pwdoms_dpio_tx_bc_lanes,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23),
+ ),
+ .ops = &vlv_dpio_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("dpio-common", &vlv_pwdoms_dpio_cmn_bc,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+ .id = VLV_DISP_PW_DPIO_CMN_BC),
+ ),
+ .ops = &vlv_dpio_cmn_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list vlv_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(vlv_power_wells_main),
+};
+
+I915_DECL_PW_DOMAINS(chv_pwdoms_display,
+ POWER_DOMAIN_DISPLAY_CORE,
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_PORT_DDI_LANES_D,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUDIO_PLAYBACK,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(chv_pwdoms_dpio_cmn_bc,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(chv_pwdoms_dpio_cmn_d,
+ POWER_DOMAIN_PORT_DDI_LANES_D,
+ POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc chv_power_wells_main[] = {
+ {
+ /*
+ * Pipe A power well is the new disp2d well. Pipe B and C
+ * power wells don't actually exist. Pipe A power well is
+ * required for any pipe to work.
+ */
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("display", &chv_pwdoms_display),
+ ),
+ .ops = &chv_pipe_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("dpio-common-bc", &chv_pwdoms_dpio_cmn_bc,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC,
+ .id = VLV_DISP_PW_DPIO_CMN_BC),
+ I915_PW("dpio-common-d", &chv_pwdoms_dpio_cmn_d,
+ .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D,
+ .id = CHV_DISP_PW_DPIO_CMN_D),
+ ),
+ .ops = &chv_dpio_cmn_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list chv_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(chv_power_wells_main),
+};
+
+#define SKL_PW_2_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_A, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_TRANSCODER_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_D, \
+ POWER_DOMAIN_PORT_DDI_LANES_E, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_B, \
+ POWER_DOMAIN_AUX_C, \
+ POWER_DOMAIN_AUX_D
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_pw_2,
+ SKL_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_dc_off,
+ SKL_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_a_e,
+ POWER_DOMAIN_PORT_DDI_IO_A,
+ POWER_DOMAIN_PORT_DDI_IO_E,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_b,
+ POWER_DOMAIN_PORT_DDI_IO_B,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_c,
+ POWER_DOMAIN_PORT_DDI_IO_C,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(skl_pwdoms_ddi_io_d,
+ POWER_DOMAIN_PORT_DDI_IO_D,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc skl_power_wells_pw_1[] = {
+ {
+ /* Handled by the DMC firmware */
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_1", I915_PW_DOMAINS_NONE,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_1,
+ .id = SKL_DISP_PW_1),
+ ),
+ .ops = &hsw_power_well_ops,
+ .always_on = true,
+ .has_fuses = true,
+ },
+};
+
+static const struct i915_power_well_desc skl_power_wells_main[] = {
+ {
+ /* Handled by the DMC firmware */
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("MISC_IO", I915_PW_DOMAINS_NONE,
+ .hsw.idx = SKL_PW_CTL_IDX_MISC_IO,
+ .id = SKL_DISP_PW_MISC_IO),
+ ),
+ .ops = &hsw_power_well_ops,
+ .always_on = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &skl_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &skl_pwdoms_pw_2,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DDI_IO_A_E", &skl_pwdoms_ddi_io_a_e, .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E),
+ I915_PW("DDI_IO_B", &skl_pwdoms_ddi_io_b, .hsw.idx = SKL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_C", &skl_pwdoms_ddi_io_c, .hsw.idx = SKL_PW_CTL_IDX_DDI_C),
+ I915_PW("DDI_IO_D", &skl_pwdoms_ddi_io_d, .hsw.idx = SKL_PW_CTL_IDX_DDI_D),
+ ),
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list skl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(skl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(skl_power_wells_main),
+};
+
+#define BXT_PW_2_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_A, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_TRANSCODER_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_B, \
+ POWER_DOMAIN_AUX_C
+
+I915_DECL_PW_DOMAINS(bxt_pwdoms_pw_2,
+ BXT_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(bxt_pwdoms_dc_off,
+ BXT_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(bxt_pwdoms_dpio_cmn_a,
+ POWER_DOMAIN_PORT_DDI_LANES_A,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(bxt_pwdoms_dpio_cmn_bc,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc bxt_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &bxt_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &bxt_pwdoms_pw_2,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("dpio-common-a", &bxt_pwdoms_dpio_cmn_a,
+ .bxt.phy = DPIO_PHY1,
+ .id = BXT_DISP_PW_DPIO_CMN_A),
+ I915_PW("dpio-common-bc", &bxt_pwdoms_dpio_cmn_bc,
+ .bxt.phy = DPIO_PHY0,
+ .id = VLV_DISP_PW_DPIO_CMN_BC),
+ ),
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list bxt_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(skl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(bxt_power_wells_main),
+};
+
+#define GLK_PW_2_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_A, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_TRANSCODER_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_B, \
+ POWER_DOMAIN_AUX_C
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_pw_2,
+ GLK_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_dc_off,
+ GLK_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_a, POWER_DOMAIN_PORT_DDI_IO_A);
+I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_b, POWER_DOMAIN_PORT_DDI_IO_B);
+I915_DECL_PW_DOMAINS(glk_pwdoms_ddi_io_c, POWER_DOMAIN_PORT_DDI_IO_C);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_a,
+ POWER_DOMAIN_PORT_DDI_LANES_A,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_b,
+ POWER_DOMAIN_PORT_DDI_LANES_B,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_dpio_cmn_c,
+ POWER_DOMAIN_PORT_DDI_LANES_C,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_aux_a,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_IO_A,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_aux_b,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(glk_pwdoms_aux_c,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc glk_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &glk_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &glk_pwdoms_pw_2,
+ .hsw.idx = SKL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("dpio-common-a", &glk_pwdoms_dpio_cmn_a,
+ .bxt.phy = DPIO_PHY1,
+ .id = BXT_DISP_PW_DPIO_CMN_A),
+ I915_PW("dpio-common-b", &glk_pwdoms_dpio_cmn_b,
+ .bxt.phy = DPIO_PHY0,
+ .id = VLV_DISP_PW_DPIO_CMN_BC),
+ I915_PW("dpio-common-c", &glk_pwdoms_dpio_cmn_c,
+ .bxt.phy = DPIO_PHY2,
+ .id = GLK_DISP_PW_DPIO_CMN_C),
+ ),
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &glk_pwdoms_aux_a, .hsw.idx = GLK_PW_CTL_IDX_AUX_A),
+ I915_PW("AUX_B", &glk_pwdoms_aux_b, .hsw.idx = GLK_PW_CTL_IDX_AUX_B),
+ I915_PW("AUX_C", &glk_pwdoms_aux_c, .hsw.idx = GLK_PW_CTL_IDX_AUX_C),
+ I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = GLK_PW_CTL_IDX_DDI_A),
+ I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = SKL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = SKL_PW_CTL_IDX_DDI_C),
+ ),
+ .ops = &hsw_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list glk_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(skl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(glk_power_wells_main),
+};
+
+/*
+ * ICL PW_0/PG_0 domains (HW/DMC control):
+ * - PCI
+ * - clocks except port PLL
+ * - central power except FBC
+ * - shared functions except pipe interrupts, pipe MBUS, DBUF registers
+ * ICL PW_1/PG_1 domains (HW/DMC control):
+ * - DBUF function
+ * - PIPE_A and its planes, except VGA
+ * - transcoder EDP + PSR
+ * - transcoder DSI
+ * - DDI_A
+ * - FBC
+ */
+#define ICL_PW_4_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_pw_4,
+ ICL_PW_4_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+ /* VDSC/joining */
+
+#define ICL_PW_3_POWER_DOMAINS \
+ ICL_PW_4_POWER_DOMAINS, \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_TRANSCODER_A, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_TRANSCODER_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_D, \
+ POWER_DOMAIN_PORT_DDI_LANES_E, \
+ POWER_DOMAIN_PORT_DDI_LANES_F, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_B, \
+ POWER_DOMAIN_AUX_C, \
+ POWER_DOMAIN_AUX_D, \
+ POWER_DOMAIN_AUX_E, \
+ POWER_DOMAIN_AUX_F, \
+ POWER_DOMAIN_AUX_TBT1, \
+ POWER_DOMAIN_AUX_TBT2, \
+ POWER_DOMAIN_AUX_TBT3, \
+ POWER_DOMAIN_AUX_TBT4
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_pw_3,
+ ICL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+ /*
+ * - transcoder WD
+ * - KVMR (HW control)
+ */
+
+#define ICL_PW_2_POWER_DOMAINS \
+ ICL_PW_3_POWER_DOMAINS, \
+ POWER_DOMAIN_TRANSCODER_VDSC_PW2
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_pw_2,
+ ICL_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+ /*
+ * - KVMR (HW control)
+ */
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_dc_off,
+ ICL_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_DC_OFF,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_d, POWER_DOMAIN_PORT_DDI_IO_D);
+I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_e, POWER_DOMAIN_PORT_DDI_IO_E);
+I915_DECL_PW_DOMAINS(icl_pwdoms_ddi_io_f, POWER_DOMAIN_PORT_DDI_IO_F);
+
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_a,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_IO_A);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_b, POWER_DOMAIN_AUX_B);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_c, POWER_DOMAIN_AUX_C);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_d, POWER_DOMAIN_AUX_D);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_e, POWER_DOMAIN_AUX_E);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_f, POWER_DOMAIN_AUX_F);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt1, POWER_DOMAIN_AUX_TBT1);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt2, POWER_DOMAIN_AUX_TBT2);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt3, POWER_DOMAIN_AUX_TBT3);
+I915_DECL_PW_DOMAINS(icl_pwdoms_aux_tbt4, POWER_DOMAIN_AUX_TBT4);
+
+static const struct i915_power_well_desc icl_power_wells_pw_1[] = {
+ {
+ /* Handled by the DMC firmware */
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_1", I915_PW_DOMAINS_NONE,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+ .id = SKL_DISP_PW_1),
+ ),
+ .ops = &hsw_power_well_ops,
+ .always_on = true,
+ .has_fuses = true,
+ },
+};
+
+static const struct i915_power_well_desc icl_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &icl_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &icl_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_3", &icl_pwdoms_pw_3,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .id = ICL_DISP_PW_3),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A),
+ I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C),
+ I915_PW("DDI_IO_D", &icl_pwdoms_ddi_io_d, .hsw.idx = ICL_PW_CTL_IDX_DDI_D),
+ I915_PW("DDI_IO_E", &icl_pwdoms_ddi_io_e, .hsw.idx = ICL_PW_CTL_IDX_DDI_E),
+ I915_PW("DDI_IO_F", &icl_pwdoms_ddi_io_f, .hsw.idx = ICL_PW_CTL_IDX_DDI_F),
+ ),
+ .ops = &icl_ddi_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B),
+ I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C),
+ I915_PW("AUX_D", &icl_pwdoms_aux_d, .hsw.idx = ICL_PW_CTL_IDX_AUX_D),
+ I915_PW("AUX_E", &icl_pwdoms_aux_e, .hsw.idx = ICL_PW_CTL_IDX_AUX_E),
+ I915_PW("AUX_F", &icl_pwdoms_aux_f, .hsw.idx = ICL_PW_CTL_IDX_AUX_F),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1),
+ I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2),
+ I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3),
+ I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ .is_tc_tbt = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_4", &icl_pwdoms_pw_4,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_C),
+ .has_fuses = true,
+ },
+};
+
+static const struct i915_power_well_desc_list icl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(icl_power_wells_main),
+};
+
+#define TGL_PW_5_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_D, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_D, \
+ POWER_DOMAIN_TRANSCODER_D
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_5,
+ TGL_PW_5_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+#define TGL_PW_4_POWER_DOMAINS \
+ TGL_PW_5_POWER_DOMAINS, \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_C
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_4,
+ TGL_PW_4_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+#define TGL_PW_3_POWER_DOMAINS \
+ TGL_PW_4_POWER_DOMAINS, \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC3, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC4, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC5, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC6, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_USBC1, \
+ POWER_DOMAIN_AUX_USBC2, \
+ POWER_DOMAIN_AUX_USBC3, \
+ POWER_DOMAIN_AUX_USBC4, \
+ POWER_DOMAIN_AUX_USBC5, \
+ POWER_DOMAIN_AUX_USBC6, \
+ POWER_DOMAIN_AUX_TBT1, \
+ POWER_DOMAIN_AUX_TBT2, \
+ POWER_DOMAIN_AUX_TBT3, \
+ POWER_DOMAIN_AUX_TBT4, \
+ POWER_DOMAIN_AUX_TBT5, \
+ POWER_DOMAIN_AUX_TBT6
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_3,
+ TGL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_pw_2,
+ TGL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_TRANSCODER_VDSC_PW2,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_dc_off,
+ TGL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc1, POWER_DOMAIN_PORT_DDI_IO_TC1);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc2, POWER_DOMAIN_PORT_DDI_IO_TC2);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc3, POWER_DOMAIN_PORT_DDI_IO_TC3);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc4, POWER_DOMAIN_PORT_DDI_IO_TC4);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc5, POWER_DOMAIN_PORT_DDI_IO_TC5);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_ddi_io_tc6, POWER_DOMAIN_PORT_DDI_IO_TC6);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc1, POWER_DOMAIN_AUX_USBC1);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc2, POWER_DOMAIN_AUX_USBC2);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc3, POWER_DOMAIN_AUX_USBC3);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc4, POWER_DOMAIN_AUX_USBC4);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc5, POWER_DOMAIN_AUX_USBC5);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_usbc6, POWER_DOMAIN_AUX_USBC6);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt5, POWER_DOMAIN_AUX_TBT5);
+I915_DECL_PW_DOMAINS(tgl_pwdoms_aux_tbt6, POWER_DOMAIN_AUX_TBT6);
+
+I915_DECL_PW_DOMAINS(tgl_pwdoms_tc_cold_off,
+ POWER_DOMAIN_AUX_USBC1,
+ POWER_DOMAIN_AUX_USBC2,
+ POWER_DOMAIN_AUX_USBC3,
+ POWER_DOMAIN_AUX_USBC4,
+ POWER_DOMAIN_AUX_USBC5,
+ POWER_DOMAIN_AUX_USBC6,
+ POWER_DOMAIN_AUX_TBT1,
+ POWER_DOMAIN_AUX_TBT2,
+ POWER_DOMAIN_AUX_TBT3,
+ POWER_DOMAIN_AUX_TBT4,
+ POWER_DOMAIN_AUX_TBT5,
+ POWER_DOMAIN_AUX_TBT6,
+ POWER_DOMAIN_TC_COLD_OFF);
+
+static const struct i915_power_well_desc tgl_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &tgl_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &tgl_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_3", &tgl_pwdoms_pw_3,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .id = ICL_DISP_PW_3),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A),
+ I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C),
+ I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1),
+ I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2),
+ I915_PW("DDI_IO_TC3", &tgl_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3),
+ I915_PW("DDI_IO_TC4", &tgl_pwdoms_ddi_io_tc4, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4),
+ I915_PW("DDI_IO_TC5", &tgl_pwdoms_ddi_io_tc5, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5),
+ I915_PW("DDI_IO_TC6", &tgl_pwdoms_ddi_io_tc6, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6),
+ ),
+ .ops = &icl_ddi_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_4", &tgl_pwdoms_pw_4,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ .irq_pipe_mask = BIT(PIPE_C),
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_5", &tgl_pwdoms_pw_5,
+ .hsw.idx = TGL_PW_CTL_IDX_PW_5),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ .irq_pipe_mask = BIT(PIPE_D),
+ },
+};
+
+static const struct i915_power_well_desc tgl_power_wells_tc_cold_off[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("TC_cold_off", &tgl_pwdoms_tc_cold_off,
+ .id = TGL_DISP_PW_TC_COLD_OFF),
+ ),
+ .ops = &tgl_tc_cold_off_ops,
+ },
+};
+
+static const struct i915_power_well_desc tgl_power_wells_aux[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B),
+ I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C),
+ I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1),
+ I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2),
+ I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3),
+ I915_PW("AUX_USBC4", &tgl_pwdoms_aux_usbc4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4),
+ I915_PW("AUX_USBC5", &tgl_pwdoms_aux_usbc5, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5),
+ I915_PW("AUX_USBC6", &tgl_pwdoms_aux_usbc6, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1),
+ I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2),
+ I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3),
+ I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4),
+ I915_PW("AUX_TBT5", &tgl_pwdoms_aux_tbt5, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5),
+ I915_PW("AUX_TBT6", &tgl_pwdoms_aux_tbt6, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ .is_tc_tbt = true,
+ },
+};
+
+static const struct i915_power_well_desc_list tgl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(tgl_power_wells_main),
+ I915_PW_DESCRIPTORS(tgl_power_wells_tc_cold_off),
+ I915_PW_DESCRIPTORS(tgl_power_wells_aux),
+};
+
+static const struct i915_power_well_desc_list adls_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(tgl_power_wells_main),
+ I915_PW_DESCRIPTORS(tgl_power_wells_aux),
+};
+
+#define RKL_PW_4_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_C
+
+I915_DECL_PW_DOMAINS(rkl_pwdoms_pw_4,
+ RKL_PW_4_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+#define RKL_PW_3_POWER_DOMAINS \
+ RKL_PW_4_POWER_DOMAINS, \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_MMIO, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_USBC1, \
+ POWER_DOMAIN_AUX_USBC2
+
+I915_DECL_PW_DOMAINS(rkl_pwdoms_pw_3,
+ RKL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+/*
+ * There is no PW_2/PG_2 on RKL.
+ *
+ * RKL PW_1/PG_1 domains (under HW/DMC control):
+ * - DBUF function (note: registers are in PW0)
+ * - PIPE_A and its planes and VDSC/joining, except VGA
+ * - transcoder A
+ * - DDI_A and DDI_B
+ * - FBC
+ *
+ * RKL PW_0/PG_0 domains (under HW/DMC control):
+ * - PCI
+ * - clocks except port PLL
+ * - shared functions:
+ * * interrupts except pipe interrupts
+ * * MBus except PIPE_MBUS_DBOX_CTL
+ * * DBUF registers
+ * - central power except FBC
+ * - top-level GTC (DDI-level GTC is in the well associated with the DDI)
+ */
+
+I915_DECL_PW_DOMAINS(rkl_pwdoms_dc_off,
+ RKL_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc rkl_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &rkl_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_3", &rkl_pwdoms_pw_3,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .id = ICL_DISP_PW_3),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_4", &rkl_pwdoms_pw_4,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ .irq_pipe_mask = BIT(PIPE_C),
+ },
+};
+
+static const struct i915_power_well_desc rkl_power_wells_ddi_aux[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A),
+ I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1),
+ I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2),
+ ),
+ .ops = &icl_ddi_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B),
+ I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1),
+ I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list rkl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(rkl_power_wells_main),
+ I915_PW_DESCRIPTORS(rkl_power_wells_ddi_aux),
+};
+
+/*
+ * DG1 onwards Audio MMIO/VERBS lies in PG0 power well.
+ */
+#define DG1_PW_3_POWER_DOMAINS \
+ TGL_PW_4_POWER_DOMAINS, \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_TRANSCODER_B, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_USBC1, \
+ POWER_DOMAIN_AUX_USBC2
+
+I915_DECL_PW_DOMAINS(dg1_pwdoms_pw_3,
+ DG1_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(dg1_pwdoms_dc_off,
+ DG1_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(dg1_pwdoms_pw_2,
+ DG1_PW_3_POWER_DOMAINS,
+ POWER_DOMAIN_TRANSCODER_VDSC_PW2,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc dg1_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &dg1_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &dg1_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_3", &dg1_pwdoms_pw_3,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .id = ICL_DISP_PW_3),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_4", &tgl_pwdoms_pw_4,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ .irq_pipe_mask = BIT(PIPE_C),
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_5", &tgl_pwdoms_pw_5,
+ .hsw.idx = TGL_PW_CTL_IDX_PW_5),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_fuses = true,
+ .irq_pipe_mask = BIT(PIPE_D),
+ },
+};
+
+static const struct i915_power_well_desc_list dg1_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(dg1_power_wells_main),
+ I915_PW_DESCRIPTORS(rkl_power_wells_ddi_aux),
+};
+
+/*
+ * XE_LPD Power Domains
+ *
+ * Previous platforms required that PG(n-1) be enabled before PG(n). That
+ * dependency chain turns into a dependency tree on XE_LPD:
+ *
+ * PG0
+ * |
+ * --PG1--
+ * / \
+ * PGA --PG2--
+ * / | \
+ * PGB PGC PGD
+ *
+ * Power wells must be enabled from top to bottom and disabled from bottom
+ * to top. This allows pipes to be power gated independently.
+ */
+
+#define XELPD_PW_D_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_D, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_D, \
+ POWER_DOMAIN_TRANSCODER_D
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_d,
+ XELPD_PW_D_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+#define XELPD_PW_C_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_C, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_C, \
+ POWER_DOMAIN_TRANSCODER_C
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_c,
+ XELPD_PW_C_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+#define XELPD_PW_B_POWER_DOMAINS \
+ POWER_DOMAIN_PIPE_B, \
+ POWER_DOMAIN_PIPE_PANEL_FITTER_B, \
+ POWER_DOMAIN_TRANSCODER_B
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_b,
+ XELPD_PW_B_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a,
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_PANEL_FITTER_A,
+ POWER_DOMAIN_INIT);
+
+#define XELPD_PW_2_POWER_DOMAINS \
+ XELPD_PW_B_POWER_DOMAINS, \
+ XELPD_PW_C_POWER_DOMAINS, \
+ XELPD_PW_D_POWER_DOMAINS, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_D, \
+ POWER_DOMAIN_PORT_DDI_LANES_E, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC1, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC2, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC3, \
+ POWER_DOMAIN_PORT_DDI_LANES_TC4, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_C, \
+ POWER_DOMAIN_AUX_D, \
+ POWER_DOMAIN_AUX_E, \
+ POWER_DOMAIN_AUX_USBC1, \
+ POWER_DOMAIN_AUX_USBC2, \
+ POWER_DOMAIN_AUX_USBC3, \
+ POWER_DOMAIN_AUX_USBC4, \
+ POWER_DOMAIN_AUX_TBT1, \
+ POWER_DOMAIN_AUX_TBT2, \
+ POWER_DOMAIN_AUX_TBT3, \
+ POWER_DOMAIN_AUX_TBT4
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_2,
+ XELPD_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_INIT);
+
+/*
+ * XELPD PW_1/PG_1 domains (under HW/DMC control):
+ * - DBUF function (registers are in PW0)
+ * - Transcoder A
+ * - DDI_A and DDI_B
+ *
+ * XELPD PW_0/PW_1 domains (under HW/DMC control):
+ * - PCI
+ * - Clocks except port PLL
+ * - Shared functions:
+ * * interrupts except pipe interrupts
+ * * MBus except PIPE_MBUS_DBOX_CTL
+ * * DBUF registers
+ * - Central power except FBC
+ * - Top-level GTC (DDI-level GTC is in the well associated with the DDI)
+ */
+
+I915_DECL_PW_DOMAINS(xelpd_pwdoms_dc_off,
+ XELPD_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc xelpd_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &xelpd_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &xelpd_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_A", &xelpd_pwdoms_pw_a,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_A),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_A),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_B", &xelpd_pwdoms_pw_b,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_B),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_C", &xelpd_pwdoms_pw_c,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_C),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_C),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_D", &xelpd_pwdoms_pw_d,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_D),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_D),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DDI_IO_A", &glk_pwdoms_ddi_io_a, .hsw.idx = ICL_PW_CTL_IDX_DDI_A),
+ I915_PW("DDI_IO_B", &glk_pwdoms_ddi_io_b, .hsw.idx = ICL_PW_CTL_IDX_DDI_B),
+ I915_PW("DDI_IO_C", &glk_pwdoms_ddi_io_c, .hsw.idx = ICL_PW_CTL_IDX_DDI_C),
+ I915_PW("DDI_IO_D", &icl_pwdoms_ddi_io_d, .hsw.idx = XELPD_PW_CTL_IDX_DDI_D),
+ I915_PW("DDI_IO_E", &icl_pwdoms_ddi_io_e, .hsw.idx = XELPD_PW_CTL_IDX_DDI_E),
+ I915_PW("DDI_IO_TC1", &tgl_pwdoms_ddi_io_tc1, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1),
+ I915_PW("DDI_IO_TC2", &tgl_pwdoms_ddi_io_tc2, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2),
+ I915_PW("DDI_IO_TC3", &tgl_pwdoms_ddi_io_tc3, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3),
+ I915_PW("DDI_IO_TC4", &tgl_pwdoms_ddi_io_tc4, .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4),
+ ),
+ .ops = &icl_ddi_power_well_ops,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .hsw.idx = ICL_PW_CTL_IDX_AUX_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .hsw.idx = ICL_PW_CTL_IDX_AUX_B),
+ I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C),
+ I915_PW("AUX_D", &icl_pwdoms_aux_d, .hsw.idx = XELPD_PW_CTL_IDX_AUX_D),
+ I915_PW("AUX_E", &icl_pwdoms_aux_e, .hsw.idx = XELPD_PW_CTL_IDX_AUX_E),
+ I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1),
+ I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2),
+ I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3),
+ I915_PW("AUX_USBC4", &tgl_pwdoms_aux_usbc4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ .fixed_enable_delay = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1),
+ I915_PW("AUX_TBT2", &icl_pwdoms_aux_tbt2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2),
+ I915_PW("AUX_TBT3", &icl_pwdoms_aux_tbt3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3),
+ I915_PW("AUX_TBT4", &icl_pwdoms_aux_tbt4, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ .is_tc_tbt = true,
+ },
+};
+
+static const struct i915_power_well_desc_list xelpd_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(xelpd_power_wells_main),
+};
+
+static void init_power_well_domains(const struct i915_power_well_instance *inst,
+ struct i915_power_well *power_well)
+{
+ int j;
+
+ if (!inst->domain_list)
+ return;
+
+ if (inst->domain_list->count == 0) {
+ bitmap_fill(power_well->domains.bits, POWER_DOMAIN_NUM);
+
+ return;
+ }
+
+ for (j = 0; j < inst->domain_list->count; j++)
+ set_bit(inst->domain_list->list[j], power_well->domains.bits);
+}
+
+#define for_each_power_well_instance_in_desc_list(_desc_list, _desc_count, _desc, _inst) \
+ for ((_desc) = (_desc_list); (_desc) - (_desc_list) < (_desc_count); (_desc)++) \
+ for ((_inst) = (_desc)->instances->list; \
+ (_inst) - (_desc)->instances->list < (_desc)->instances->count; \
+ (_inst)++)
+
+#define for_each_power_well_instance(_desc_list, _desc_count, _descs, _desc, _inst) \
+ for ((_descs) = (_desc_list); \
+ (_descs) - (_desc_list) < (_desc_count); \
+ (_descs)++) \
+ for_each_power_well_instance_in_desc_list((_descs)->list, (_descs)->count, \
+ (_desc), (_inst))
+
+static int
+__set_power_wells(struct i915_power_domains *power_domains,
+ const struct i915_power_well_desc_list *power_well_descs,
+ int power_well_descs_sz)
+{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
+ u64 power_well_ids = 0;
+ const struct i915_power_well_desc_list *desc_list;
+ const struct i915_power_well_desc *desc;
+ const struct i915_power_well_instance *inst;
+ int power_well_count = 0;
+ int plt_idx = 0;
+
+ for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc_list, desc, inst)
+ power_well_count++;
+
+ power_domains->power_well_count = power_well_count;
+ power_domains->power_wells =
+ kcalloc(power_well_count,
+ sizeof(*power_domains->power_wells),
+ GFP_KERNEL);
+ if (!power_domains->power_wells)
+ return -ENOMEM;
+
+ for_each_power_well_instance(power_well_descs, power_well_descs_sz, desc_list, desc, inst) {
+ struct i915_power_well *pw = &power_domains->power_wells[plt_idx];
+ enum i915_power_well_id id = inst->id;
+
+ pw->desc = desc;
+ drm_WARN_ON(&i915->drm,
+ overflows_type(inst - desc->instances->list, pw->instance_idx));
+ pw->instance_idx = inst - desc->instances->list;
+
+ init_power_well_domains(inst, pw);
+
+ plt_idx++;
+
+ if (id == DISP_PW_ID_NONE)
+ continue;
+
+ drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8);
+ drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id));
+ power_well_ids |= BIT_ULL(id);
+ }
+
+ return 0;
+}
+
+#define set_power_wells(power_domains, __power_well_descs) \
+ __set_power_wells(power_domains, __power_well_descs, \
+ ARRAY_SIZE(__power_well_descs))
+
+/**
+ * intel_display_power_map_init - initialize power domain -> power well mappings
+ * @power_domains: power domain state
+ *
+ * Creates all the power wells for the current platform, initializes the
+ * dynamic state for them and initializes the mapping of each power well to
+ * all the power domains the power well belongs to.
+ */
+int intel_display_power_map_init(struct i915_power_domains *power_domains)
+{
+ struct drm_i915_private *i915 = container_of(power_domains,
+ struct drm_i915_private,
+ power_domains);
+ /*
+ * The enabling order will be from lower to higher indexed wells,
+ * the disabling order is reversed.
+ */
+ if (!HAS_DISPLAY(i915)) {
+ power_domains->power_well_count = 0;
+ return 0;
+ }
+
+ if (DISPLAY_VER(i915) >= 13)
+ return set_power_wells(power_domains, xelpd_power_wells);
+ else if (IS_DG1(i915))
+ return set_power_wells(power_domains, dg1_power_wells);
+ else if (IS_ALDERLAKE_S(i915))
+ return set_power_wells(power_domains, adls_power_wells);
+ else if (IS_ROCKETLAKE(i915))
+ return set_power_wells(power_domains, rkl_power_wells);
+ else if (DISPLAY_VER(i915) == 12)
+ return set_power_wells(power_domains, tgl_power_wells);
+ else if (DISPLAY_VER(i915) == 11)
+ return set_power_wells(power_domains, icl_power_wells);
+ else if (IS_GEMINILAKE(i915))
+ return set_power_wells(power_domains, glk_power_wells);
+ else if (IS_BROXTON(i915))
+ return set_power_wells(power_domains, bxt_power_wells);
+ else if (DISPLAY_VER(i915) == 9)
+ return set_power_wells(power_domains, skl_power_wells);
+ else if (IS_CHERRYVIEW(i915))
+ return set_power_wells(power_domains, chv_power_wells);
+ else if (IS_BROADWELL(i915))
+ return set_power_wells(power_domains, bdw_power_wells);
+ else if (IS_HASWELL(i915))
+ return set_power_wells(power_domains, hsw_power_wells);
+ else if (IS_VALLEYVIEW(i915))
+ return set_power_wells(power_domains, vlv_power_wells);
+ else if (IS_I830(i915))
+ return set_power_wells(power_domains, i830_power_wells);
+ else
+ return set_power_wells(power_domains, i9xx_power_wells);
+}
+
+/**
+ * intel_display_power_map_cleanup - clean up power domain -> power well mappings
+ * @power_domains: power domain state
+ *
+ * Cleans up all the state that was initialized by intel_display_power_map_init().
+ */
+void intel_display_power_map_cleanup(struct i915_power_domains *power_domains)
+{
+ kfree(power_domains->power_wells);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.h b/drivers/gpu/drm/i915/display/intel_display_power_map.h
new file mode 100644
index 000000000000..da8f7055a44c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_POWER_MAP_H__
+#define __INTEL_DISPLAY_POWER_MAP_H__
+
+struct i915_power_domains;
+
+int intel_display_power_map_init(struct i915_power_domains *power_domains);
+void intel_display_power_map_cleanup(struct i915_power_domains *power_domains);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 2a0fb9d9c60f..91cfd5890f46 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -4,7 +4,65 @@
*/
#include "i915_drv.h"
+#include "i915_irq.h"
+#include "intel_combo_phy.h"
+#include "intel_combo_phy_regs.h"
+#include "intel_crt.h"
+#include "intel_de.h"
#include "intel_display_power_well.h"
+#include "intel_display_types.h"
+#include "intel_dmc.h"
+#include "intel_dpio_phy.h"
+#include "intel_dpll.h"
+#include "intel_hotplug.h"
+#include "intel_pcode.h"
+#include "intel_pm.h"
+#include "intel_pps.h"
+#include "intel_tc.h"
+#include "intel_vga.h"
+#include "vlv_sideband.h"
+#include "vlv_sideband_reg.h"
+
+struct i915_power_well_regs {
+ i915_reg_t bios;
+ i915_reg_t driver;
+ i915_reg_t kvmr;
+ i915_reg_t debug;
+};
+
+struct i915_power_well_ops {
+ const struct i915_power_well_regs *regs;
+ /*
+ * Synchronize the well's hw state to match the current sw state, for
+ * example enable/disable it based on the current refcount. Called
+ * during driver init and resume time, possibly after first calling
+ * the enable/disable handlers.
+ */
+ void (*sync_hw)(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+ /*
+ * Enable the well and resources that depend on it (for example
+ * interrupts located on the well). Called after the 0->1 refcount
+ * transition.
+ */
+ void (*enable)(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+ /*
+ * Disable the well and resources that depend on it. Called after
+ * the 1->0 refcount transition.
+ */
+ void (*disable)(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+ /* Returns the hw enabled state. */
+ bool (*is_enabled)(struct drm_i915_private *i915,
+ struct i915_power_well *power_well);
+};
+
+static const struct i915_power_well_instance *
+i915_power_well_instance(const struct i915_power_well *power_well)
+{
+ return &power_well->desc->instances->list[power_well->instance_idx];
+}
struct i915_power_well *
lookup_power_well(struct drm_i915_private *i915,
@@ -13,7 +71,7 @@ lookup_power_well(struct drm_i915_private *i915,
struct i915_power_well *power_well;
for_each_power_well(i915, power_well)
- if (power_well->desc->id == power_well_id)
+ if (i915_power_well_instance(power_well)->id == power_well_id)
return power_well;
/*
@@ -32,7 +90,7 @@ lookup_power_well(struct drm_i915_private *i915,
void intel_power_well_enable(struct drm_i915_private *i915,
struct i915_power_well *power_well)
{
- drm_dbg_kms(&i915->drm, "enabling %s\n", power_well->desc->name);
+ drm_dbg_kms(&i915->drm, "enabling %s\n", intel_power_well_name(power_well));
power_well->desc->ops->enable(i915, power_well);
power_well->hw_enabled = true;
}
@@ -40,7 +98,7 @@ void intel_power_well_enable(struct drm_i915_private *i915,
void intel_power_well_disable(struct drm_i915_private *i915,
struct i915_power_well *power_well)
{
- drm_dbg_kms(&i915->drm, "disabling %s\n", power_well->desc->name);
+ drm_dbg_kms(&i915->drm, "disabling %s\n", intel_power_well_name(power_well));
power_well->hw_enabled = false;
power_well->desc->ops->disable(i915, power_well);
}
@@ -65,7 +123,7 @@ void intel_power_well_put(struct drm_i915_private *i915,
{
drm_WARN(&i915->drm, !power_well->count,
"Use count on power well %s is already zero",
- power_well->desc->name);
+ i915_power_well_instance(power_well)->name);
if (!--power_well->count)
intel_power_well_disable(i915, power_well);
@@ -99,15 +157,1756 @@ bool intel_power_well_is_always_on(struct i915_power_well *power_well)
const char *intel_power_well_name(struct i915_power_well *power_well)
{
- return power_well->desc->name;
+ return i915_power_well_instance(power_well)->name;
}
-u64 intel_power_well_domains(struct i915_power_well *power_well)
+struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well)
{
- return power_well->desc->domains;
+ return &power_well->domains;
}
int intel_power_well_refcount(struct i915_power_well *power_well)
{
return power_well->count;
}
+
+/*
+ * Starting with Haswell, we have a "Power Down Well" that can be turned off
+ * when not needed anymore. We have 4 registers that can request the power well
+ * to be enabled, and it will only be disabled if none of the registers is
+ * requesting it to be enabled.
+ */
+static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv,
+ u8 irq_pipe_mask, bool has_vga)
+{
+ if (has_vga)
+ intel_vga_reset_io_mem(dev_priv);
+
+ if (irq_pipe_mask)
+ gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask);
+}
+
+static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv,
+ u8 irq_pipe_mask)
+{
+ if (irq_pipe_mask)
+ gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask);
+}
+
+#define ICL_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+
+#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
+
+static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well)
+{
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+
+ return power_well->desc->is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
+ ICL_AUX_PW_TO_CH(pw_idx);
+}
+
+static struct intel_digital_port *
+aux_ch_to_digital_port(struct drm_i915_private *dev_priv,
+ enum aux_ch aux_ch)
+{
+ struct intel_digital_port *dig_port = NULL;
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ /* We'll check the MST primary port */
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ dig_port = enc_to_dig_port(encoder);
+ if (!dig_port)
+ continue;
+
+ if (dig_port->aux_ch != aux_ch) {
+ dig_port = NULL;
+ continue;
+ }
+
+ break;
+ }
+
+ return dig_port;
+}
+
+static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
+ const struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
+
+ return intel_port_to_phy(i915, dig_port->base.port);
+}
+
+static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ bool timeout_expected)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+
+ /*
+ * For some power wells we're not supposed to watch the status bit for
+ * an ack, but rather just wait a fixed amount of time and then
+ * proceed. This is only used on DG2.
+ */
+ if (IS_DG2(dev_priv) && power_well->desc->fixed_enable_delay) {
+ usleep_range(600, 1200);
+ return;
+ }
+
+ /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
+ if (intel_de_wait_for_set(dev_priv, regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
+ drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
+ intel_power_well_name(power_well));
+
+ drm_WARN_ON(&dev_priv->drm, !timeout_expected);
+
+ }
+}
+
+static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
+ const struct i915_power_well_regs *regs,
+ int pw_idx)
+{
+ u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
+ u32 ret;
+
+ ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
+ ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
+ if (regs->kvmr.reg)
+ ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
+ ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
+
+ return ret;
+}
+
+static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ bool disabled;
+ u32 reqs;
+
+ /*
+ * Bspec doesn't require waiting for PWs to get disabled, but still do
+ * this for paranoia. The known cases where a PW will be forced on:
+ * - a KVMR request on any power well via the KVMR request register
+ * - a DMC request on PW1 and MISC_IO power wells via the BIOS and
+ * DEBUG request registers
+ * Skip the wait in case any of the request bits are set and print a
+ * diagnostic message.
+ */
+ wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
+ HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
+ (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
+ if (disabled)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
+ intel_power_well_name(power_well),
+ !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
+}
+
+static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
+ enum skl_power_gate pg)
+{
+ /* Timeout 5us for PG#0, for other PGs 1us */
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
+}
+
+static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ u32 val;
+
+ if (power_well->desc->has_fuses) {
+ enum skl_power_gate pg;
+
+ pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ SKL_PW_CTL_IDX_TO_PG(pw_idx);
+
+ /* Wa_16013190616:adlp */
+ if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1)
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC);
+
+ /*
+ * For PW1 we have to wait both for the PW0/PG0 fuse state
+ * before enabling the power well and PW1/PG1's own fuse
+ * state after the enabling. For all other power wells with
+ * fuses we only have to wait for that PW/PG's fuse state
+ * after the enabling.
+ */
+ if (pg == SKL_PG1)
+ gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
+ }
+
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well, false);
+
+ if (power_well->desc->has_fuses) {
+ enum skl_power_gate pg;
+
+ pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) :
+ SKL_PW_CTL_IDX_TO_PG(pw_idx);
+ gen9_wait_for_power_well_fuses(dev_priv, pg);
+ }
+
+ hsw_power_well_post_enable(dev_priv,
+ power_well->desc->irq_pipe_mask,
+ power_well->desc->has_vga);
+}
+
+static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ u32 val;
+
+ hsw_power_well_pre_disable(dev_priv,
+ power_well->desc->irq_pipe_mask);
+
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
+static void
+icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+ u32 val;
+
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
+
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+ if (DISPLAY_VER(dev_priv) < 12) {
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
+ intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
+ val | ICL_LANE_ENABLE_AUX);
+ }
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well, false);
+
+ /* Display WA #1178: icl */
+ if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
+ !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
+ val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
+ val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
+ intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
+ }
+}
+
+static void
+icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+ u32 val;
+
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
+
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
+ intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
+ val & ~ICL_LANE_ENABLE_AUX);
+
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+
+ hsw_wait_for_power_well_disable(dev_priv, power_well);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ struct intel_digital_port *dig_port)
+{
+ if (drm_WARN_ON(&dev_priv->drm, !dig_port))
+ return;
+
+ if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
+ return;
+
+ drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
+}
+
+#else
+
+static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ struct intel_digital_port *dig_port)
+{
+}
+
+#endif
+
+#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1)
+
+static void icl_tc_cold_exit(struct drm_i915_private *i915)
+{
+ int ret, tries = 0;
+
+ while (1) {
+ ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0,
+ 250, 1);
+ if (ret != -EAGAIN || ++tries == 3)
+ break;
+ msleep(1);
+ }
+
+ /* Spec states that TC cold exit can take up to 1ms to complete */
+ if (!ret)
+ msleep(1);
+
+ /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
+ drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
+ "succeeded");
+}
+
+static void
+icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
+ struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch);
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ bool is_tbt = power_well->desc->is_tc_tbt;
+ bool timeout_expected;
+ u32 val;
+
+ icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port);
+
+ val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
+ val &= ~DP_AUX_CH_CTL_TBT_IO;
+ if (is_tbt)
+ val |= DP_AUX_CH_CTL_TBT_IO;
+ intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
+
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx));
+
+ /*
+ * An AUX timeout is expected if the TBT DP tunnel is down,
+ * or need to enable AUX on a legacy TypeC port as part of the TC-cold
+ * exit sequence.
+ */
+ timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
+ if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
+ icl_tc_cold_exit(dev_priv);
+
+ hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
+
+ if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) {
+ enum tc_port tc_port;
+
+ tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x2));
+
+ if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
+ DKL_CMN_UC_DW27_UC_HEALTH, 1))
+ drm_warn(&dev_priv->drm,
+ "Timeout waiting TC uC health\n");
+ }
+}
+
+static void
+icl_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+
+ if (intel_phy_is_tc(dev_priv, phy))
+ return icl_tc_phy_aux_power_well_enable(dev_priv, power_well);
+ else if (IS_ICELAKE(dev_priv))
+ return icl_combo_phy_aux_power_well_enable(dev_priv,
+ power_well);
+ else
+ return hsw_power_well_enable(dev_priv, power_well);
+}
+
+static void
+icl_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well);
+
+ if (intel_phy_is_tc(dev_priv, phy))
+ return hsw_power_well_disable(dev_priv, power_well);
+ else if (IS_ICELAKE(dev_priv))
+ return icl_combo_phy_aux_power_well_disable(dev_priv,
+ power_well);
+ else
+ return hsw_power_well_disable(dev_priv, power_well);
+}
+
+/*
+ * We should only use the power well if we explicitly asked the hardware to
+ * enable it, so check if it's enabled and also check if we've requested it to
+ * be enabled.
+ */
+static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
+ HSW_PWR_WELL_CTL_STATE(pw_idx);
+ u32 val;
+
+ val = intel_de_read(dev_priv, regs->driver);
+
+ /*
+ * On GEN9 big core due to a DMC bug the driver's request bits for PW1
+ * and the MISC_IO PW will be not restored, so check instead for the
+ * BIOS's own request bits, which are forced-on for these power wells
+ * when exiting DC5/6.
+ */
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
+ (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
+ val |= intel_de_read(dev_priv, regs->bios);
+
+ return (val & mask) == mask;
+}
+
+static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
+{
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
+ "DC9 already programmed to be enabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled to enable DC9.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
+ HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
+ "Power well 2 on.\n");
+ drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+
+ /*
+ * TODO: check for the following to verify the conditions to enter DC9
+ * state are satisfied:
+ * 1] Check relevant display engine registers to verify if mode set
+ * disable sequence was followed.
+ * 2] Check if display uninitialize sequence is initialized.
+ */
+}
+
+static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
+{
+ drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled.\n");
+
+ /*
+ * TODO: check for the following to verify DC9 state was indeed
+ * entered before programming to disable it:
+ * 1] Check relevant display engine registers to verify if mode
+ * set disable sequence was followed.
+ * 2] Check if display uninitialize sequence is initialized.
+ */
+}
+
+static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
+ u32 state)
+{
+ int rewrites = 0;
+ int rereads = 0;
+ u32 v;
+
+ intel_de_write(dev_priv, DC_STATE_EN, state);
+
+ /* It has been observed that disabling the dc6 state sometimes
+ * doesn't stick and dmc keeps returning old value. Make sure
+ * the write really sticks enough times and also force rewrite until
+ * we are confident that state is exactly what we want.
+ */
+ do {
+ v = intel_de_read(dev_priv, DC_STATE_EN);
+
+ if (v != state) {
+ intel_de_write(dev_priv, DC_STATE_EN, state);
+ rewrites++;
+ rereads = 0;
+ } else if (rereads++ > 5) {
+ break;
+ }
+
+ } while (rewrites < 100);
+
+ if (v != state)
+ drm_err(&dev_priv->drm,
+ "Writing dc state to 0x%x failed, now 0x%x\n",
+ state, v);
+
+ /* Most of the times we need one retry, avoid spam */
+ if (rewrites > 1)
+ drm_dbg_kms(&dev_priv->drm,
+ "Rewrote dc state to 0x%x %d times\n",
+ state, rewrites);
+}
+
+static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
+{
+ u32 mask;
+
+ mask = DC_STATE_EN_UPTO_DC5;
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6
+ | DC_STATE_EN_DC9;
+ else if (DISPLAY_VER(dev_priv) == 11)
+ mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ mask |= DC_STATE_EN_DC9;
+ else
+ mask |= DC_STATE_EN_UPTO_DC6;
+
+ return mask;
+}
+
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Resetting DC state tracking from %02x to %02x\n",
+ dev_priv->dmc.dc_state, val);
+ dev_priv->dmc.dc_state = val;
+}
+
+/**
+ * gen9_set_dc_state - set target display C power state
+ * @dev_priv: i915 device instance
+ * @state: target DC power state
+ * - DC_STATE_DISABLE
+ * - DC_STATE_EN_UPTO_DC5
+ * - DC_STATE_EN_UPTO_DC6
+ * - DC_STATE_EN_DC9
+ *
+ * Signal to DMC firmware/HW the target DC power state passed in @state.
+ * DMC/HW can turn off individual display clocks and power rails when entering
+ * a deeper DC power state (higher in number) and turns these back when exiting
+ * that state to a shallower power state (lower in number). The HW will decide
+ * when to actually enter a given state on an on-demand basis, for instance
+ * depending on the active state of display pipes. The state of display
+ * registers backed by affected power rails are saved/restored as needed.
+ *
+ * Based on the above enabling a deeper DC power state is asynchronous wrt.
+ * enabling it. Disabling a deeper power state is synchronous: for instance
+ * setting %DC_STATE_DISABLE won't complete until all HW resources are turned
+ * back on and register state is restored. This is guaranteed by the MMIO write
+ * to DC_STATE_EN blocking until the state is restored.
+ */
+void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
+{
+ u32 val;
+ u32 mask;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ state & ~dev_priv->dmc.allowed_dc_mask))
+ state &= dev_priv->dmc.allowed_dc_mask;
+
+ val = intel_de_read(dev_priv, DC_STATE_EN);
+ mask = gen9_dc_mask(dev_priv);
+ drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
+ val & mask, state);
+
+ /* Check if DMC is ignoring our DC state requests */
+ if ((val & mask) != dev_priv->dmc.dc_state)
+ drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
+ dev_priv->dmc.dc_state, val & mask);
+
+ val &= ~mask;
+ val |= state;
+
+ gen9_write_dc_state(dev_priv, val);
+
+ dev_priv->dmc.dc_state = val & mask;
+}
+
+static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
+{
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
+}
+
+static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
+ val = intel_de_read(dev_priv, DC_STATE_EN);
+ val &= ~DC_STATE_DC3CO_STATUS;
+ intel_de_write(dev_priv, DC_STATE_EN, val);
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ /*
+ * Delay of 200us DC3CO Exit time B.Spec 49196
+ */
+ usleep_range(200, 210);
+}
+
+static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
+{
+ enum i915_power_well_id high_pg;
+
+ /* Power wells at this level and above must be disabled for DC5 entry */
+ if (DISPLAY_VER(dev_priv) == 12)
+ high_pg = ICL_DISP_PW_3;
+ else
+ high_pg = SKL_DISP_PW_2;
+
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_display_power_well_is_enabled(dev_priv, high_pg),
+ "Power wells above platform's DC5 limit still enabled.\n");
+
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5),
+ "DC5 already programmed to be enabled.\n");
+ assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+
+ assert_dmc_loaded(dev_priv);
+}
+
+void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+{
+ assert_can_enable_dc5(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
+ intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
+
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
+}
+
+static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
+{
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Backlight is not disabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC6),
+ "DC6 already programmed to be enabled.\n");
+
+ assert_dmc_loaded(dev_priv);
+}
+
+void skl_enable_dc6(struct drm_i915_private *dev_priv)
+{
+ assert_can_enable_dc6(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
+
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
+ intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
+
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+}
+
+void bxt_enable_dc9(struct drm_i915_private *dev_priv)
+{
+ assert_can_enable_dc9(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
+ /*
+ * Power sequencer reset is not needed on
+ * platforms with South Display Engine on PCH,
+ * because PPS registers are always on.
+ */
+ if (!HAS_PCH_SPLIT(dev_priv))
+ intel_pps_reset_all(dev_priv);
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
+}
+
+void bxt_disable_dc9(struct drm_i915_private *dev_priv)
+{
+ assert_can_disable_dc9(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ intel_pps_unlock_regs_wa(dev_priv);
+}
+
+static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
+ int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
+ u32 bios_req = intel_de_read(dev_priv, regs->bios);
+
+ /* Take over the request bit if set by BIOS. */
+ if (bios_req & mask) {
+ u32 drv_req = intel_de_read(dev_priv, regs->driver);
+
+ if (!(drv_req & mask))
+ intel_de_write(dev_priv, regs->driver, drv_req | mask);
+ intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
+ }
+}
+
+static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bxt_ddi_phy_init(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+}
+
+static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ bxt_ddi_phy_uninit(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+}
+
+static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return bxt_ddi_phy_is_enabled(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+}
+
+static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *power_well;
+
+ power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A);
+ if (intel_power_well_refcount(power_well) > 0)
+ bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+
+ power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ if (intel_power_well_refcount(power_well) > 0)
+ bxt_ddi_phy_verify_state(dev_priv, i915_power_well_instance(power_well)->bxt.phy);
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ power_well = lookup_power_well(dev_priv,
+ GLK_DISP_PW_DPIO_CMN_C);
+ if (intel_power_well_refcount(power_well) > 0)
+ bxt_ddi_phy_verify_state(dev_priv,
+ i915_power_well_instance(power_well)->bxt.phy);
+ }
+}
+
+static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
+ (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
+}
+
+static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
+{
+ u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
+ u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices;
+
+ drm_WARN(&dev_priv->drm,
+ hw_enabled_dbuf_slices != enabled_dbuf_slices,
+ "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
+ hw_enabled_dbuf_slices,
+ enabled_dbuf_slices);
+}
+
+void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_config cdclk_config = {};
+
+ if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) {
+ tgl_disable_dc3co(dev_priv);
+ return;
+ }
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
+ /* Can't read out voltage_level so can't use intel_cdclk_changed() */
+ drm_WARN_ON(&dev_priv->drm,
+ intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
+ &cdclk_config));
+
+ gen9_assert_dbuf_enabled(dev_priv);
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ bxt_verify_ddi_phy_power_wells(dev_priv);
+
+ if (DISPLAY_VER(dev_priv) >= 11)
+ /*
+ * DMC retains HW context only for port A, the other combo
+ * PHY's HW context for port B is lost after DC transitions,
+ * so we need to restore it manually.
+ */
+ intel_combo_phy_init(dev_priv);
+}
+
+static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ gen9_disable_dc_states(dev_priv);
+}
+
+static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (!intel_dmc_has_payload(dev_priv))
+ return;
+
+ switch (dev_priv->dmc.target_dc_state) {
+ case DC_STATE_EN_DC3CO:
+ tgl_enable_dc3co(dev_priv);
+ break;
+ case DC_STATE_EN_UPTO_DC6:
+ skl_enable_dc6(dev_priv);
+ break;
+ case DC_STATE_EN_UPTO_DC5:
+ gen9_enable_dc5(dev_priv);
+ break;
+ }
+}
+
+static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return true;
+}
+
+static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
+ i830_enable_pipe(dev_priv, PIPE_A);
+ if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
+ i830_enable_pipe(dev_priv, PIPE_B);
+}
+
+static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ i830_disable_pipe(dev_priv, PIPE_B);
+ i830_disable_pipe(dev_priv, PIPE_A);
+}
+
+static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
+ intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+}
+
+static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ if (intel_power_well_refcount(power_well) > 0)
+ i830_pipes_power_well_enable(dev_priv, power_well);
+ else
+ i830_pipes_power_well_disable(dev_priv, power_well);
+}
+
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
+{
+ int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(pw_idx);
+ state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
+ PUNIT_PWRGT_PWR_GATE(pw_idx);
+
+ vlv_punit_get(dev_priv);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
+ ctrl &= ~mask;
+ ctrl |= state;
+ vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
+
+ if (wait_for(COND, 100))
+ drm_err(&dev_priv->drm,
+ "timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+
+#undef COND
+
+out:
+ vlv_punit_put(dev_priv);
+}
+
+static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+}
+
+static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int pw_idx = i915_power_well_instance(power_well)->vlv.idx;
+ bool enabled = false;
+ u32 mask;
+ u32 state;
+ u32 ctrl;
+
+ mask = PUNIT_PWRGT_MASK(pw_idx);
+ ctrl = PUNIT_PWRGT_PWR_ON(pw_idx);
+
+ vlv_punit_get(dev_priv);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+ state != PUNIT_PWRGT_PWR_GATE(pw_idx));
+ if (state == ctrl)
+ enabled = true;
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
+ drm_WARN_ON(&dev_priv->drm, ctrl != state);
+
+ vlv_punit_put(dev_priv);
+
+ return enabled;
+}
+
+static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ /*
+ * On driver load, a pipe may be active and driving a DSI display.
+ * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
+ * (and never recovering) in this case. intel_dsi_post_disable() will
+ * clear it when we turn off the display.
+ */
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D);
+ val &= DPOUNIT_CLOCK_GATE_DISABLE;
+ val |= VRHUNIT_CLOCK_GATE_DISABLE;
+ intel_de_write(dev_priv, DSPCLK_GATE_D, val);
+
+ /*
+ * Disable trickle feed and enable pnd deadline calculation
+ */
+ intel_de_write(dev_priv, MI_ARB_VLV,
+ MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ intel_de_write(dev_priv, CBR1_VLV, 0);
+
+ drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
+ intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
+ DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
+ 1000));
+}
+
+static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ enum pipe pipe;
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection. Supposedly DSI also
+ * needs the ref clock up and running.
+ *
+ * CHV DPLL B/C have some issues if VGA mode is enabled.
+ */
+ for_each_pipe(dev_priv, pipe) {
+ u32 val = intel_de_read(dev_priv, DPLL(pipe));
+
+ val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ intel_de_write(dev_priv, DPLL(pipe), val);
+ }
+
+ vlv_init_display_clock_gating(dev_priv);
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_enable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /*
+ * During driver initialization/resume we can avoid restoring the
+ * part of the HW/SW state that will be inited anyway explicitly.
+ */
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ intel_hpd_init(dev_priv);
+ intel_hpd_poll_disable(dev_priv);
+
+ /* Re-enable the ADPA, if we have one */
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ if (encoder->type == INTEL_OUTPUT_ANALOG)
+ intel_crt_reset(&encoder->base);
+ }
+
+ intel_vga_redisable_power_on(dev_priv);
+
+ intel_pps_unlock_regs_wa(dev_priv);
+}
+
+static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
+{
+ spin_lock_irq(&dev_priv->irq_lock);
+ valleyview_disable_display_irqs(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* make sure we're done processing display irqs */
+ intel_synchronize_irq(dev_priv);
+
+ intel_pps_reset_all(dev_priv);
+
+ /* Prevent us from re-enabling polling on accident in late suspend */
+ if (!dev_priv->drm.dev->power.is_suspended)
+ intel_hpd_poll_enable(dev_priv);
+}
+
+static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ vlv_display_power_well_init(dev_priv);
+}
+
+static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_display_power_well_deinit(dev_priv);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ /* since ref/cri clock was enabled */
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all
+ * be set to 0.
+ *
+ * This should only be done on init and resume from S3 with
+ * both PLLs disabled, or we risk losing DPIO and PLL
+ * synchronization.
+ */
+ intel_de_write(dev_priv, DPIO_CTL,
+ intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe)
+ assert_pll_disabled(dev_priv, pipe);
+
+ /* Assert common reset */
+ intel_de_write(dev_priv, DPIO_CTL,
+ intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
+#define BITS_SET(val, bits) (((val) & (bits)) == (bits))
+
+static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn_bc =
+ lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC);
+ struct i915_power_well *cmn_d =
+ lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D);
+ u32 phy_control = dev_priv->chv_phy_control;
+ u32 phy_status = 0;
+ u32 phy_status_mask = 0xffffffff;
+
+ /*
+ * The BIOS can leave the PHY is some weird state
+ * where it doesn't fully power down some parts.
+ * Disable the asserts until the PHY has been fully
+ * reset (ie. the power well has been disabled at
+ * least once).
+ */
+ if (!dev_priv->chv_phy_assert[DPIO_PHY0])
+ phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
+ PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
+
+ if (!dev_priv->chv_phy_assert[DPIO_PHY1])
+ phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
+ PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
+
+ if (intel_power_well_is_enabled(dev_priv, cmn_bc)) {
+ phy_status |= PHY_POWERGOOD(DPIO_PHY0);
+
+ /* this assumes override is only used to enable lanes */
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0);
+
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1);
+
+ /* CL1 is on whenever anything is on in either channel */
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) |
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0);
+
+ /*
+ * The DPLLB check accounts for the pipe B + port A usage
+ * with CL2 powered up but all the lanes in the second channel
+ * powered down.
+ */
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
+ (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1);
+ }
+
+ if (intel_power_well_is_enabled(dev_priv, cmn_d)) {
+ phy_status |= PHY_POWERGOOD(DPIO_PHY1);
+
+ /* this assumes override is only used to enable lanes */
+ if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0)
+ phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0);
+
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0);
+ if (BITS_SET(phy_control,
+ PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0)))
+ phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
+ }
+
+ phy_status &= phy_status_mask;
+
+ /*
+ * The PHY may be busy with some initial calibration and whatnot,
+ * so the power state can take a while to actually change.
+ */
+ if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
+ phy_status_mask, phy_status, 10))
+ drm_err(&dev_priv->drm,
+ "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+ intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
+ phy_status, dev_priv->chv_phy_control);
+}
+
+#undef BITS_SET
+
+static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
+ enum dpio_phy phy;
+ enum pipe pipe;
+ u32 tmp;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ id != VLV_DISP_PW_DPIO_CMN_BC &&
+ id != CHV_DISP_PW_DPIO_CMN_D);
+
+ if (id == VLV_DISP_PW_DPIO_CMN_BC) {
+ pipe = PIPE_A;
+ phy = DPIO_PHY0;
+ } else {
+ pipe = PIPE_C;
+ phy = DPIO_PHY1;
+ }
+
+ /* since ref/cri clock was enabled */
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /* Poll for phypwrgood signal */
+ if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
+ PHY_POWERGOOD(phy), 1))
+ drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
+ phy);
+
+ vlv_dpio_get(dev_priv);
+
+ /* Enable dynamic power down */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
+ tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
+ DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
+
+ if (id == VLV_DISP_PW_DPIO_CMN_BC) {
+ tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
+ tmp |= DPIO_DYNPWRDOWNEN_CH1;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
+ } else {
+ /*
+ * Force the non-existing CL2 off. BXT does this
+ * too, so maybe it saves some power even though
+ * CL2 doesn't exist?
+ */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+ tmp |= DPIO_CL2_LDOFUSE_PWRENB;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
+ }
+
+ vlv_dpio_put(dev_priv);
+
+ dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+}
+
+static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
+ enum dpio_phy phy;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ id != VLV_DISP_PW_DPIO_CMN_BC &&
+ id != CHV_DISP_PW_DPIO_CMN_D);
+
+ if (id == VLV_DISP_PW_DPIO_CMN_BC) {
+ phy = DPIO_PHY0;
+ assert_pll_disabled(dev_priv, PIPE_A);
+ assert_pll_disabled(dev_priv, PIPE_B);
+ } else {
+ phy = DPIO_PHY1;
+ assert_pll_disabled(dev_priv, PIPE_C);
+ }
+
+ dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
+
+ /* PHY is fully reset now, so we can enable the PHY state asserts */
+ dev_priv->chv_phy_assert[phy] = true;
+
+ assert_chv_phy_status(dev_priv);
+}
+
+static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override, unsigned int mask)
+{
+ enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
+ u32 reg, val, expected, actual;
+
+ /*
+ * The BIOS can leave the PHY is some weird state
+ * where it doesn't fully power down some parts.
+ * Disable the asserts until the PHY has been fully
+ * reset (ie. the power well has been disabled at
+ * least once).
+ */
+ if (!dev_priv->chv_phy_assert[phy])
+ return;
+
+ if (ch == DPIO_CH0)
+ reg = _CHV_CMN_DW0_CH0;
+ else
+ reg = _CHV_CMN_DW6_CH1;
+
+ vlv_dpio_get(dev_priv);
+ val = vlv_dpio_read(dev_priv, pipe, reg);
+ vlv_dpio_put(dev_priv);
+
+ /*
+ * This assumes !override is only used when the port is disabled.
+ * All lanes should power down even without the override when
+ * the port is disabled.
+ */
+ if (!override || mask == 0xf) {
+ expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+ /*
+ * If CH1 common lane is not active anymore
+ * (eg. for pipe B DPLL) the entire channel will
+ * shut down, which causes the common lane registers
+ * to read as 0. That means we can't actually check
+ * the lane power down status bits, but as the entire
+ * register reads as 0 it's a good indication that the
+ * channel is indeed entirely powered down.
+ */
+ if (ch == DPIO_CH1 && val == 0)
+ expected = 0;
+ } else if (mask != 0x0) {
+ expected = DPIO_ANYDL_POWERDOWN;
+ } else {
+ expected = 0;
+ }
+
+ if (ch == DPIO_CH0)
+ actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0;
+ else
+ actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
+ actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
+
+ drm_WARN(&dev_priv->drm, actual != expected,
+ "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
+ !!(actual & DPIO_ALLDL_POWERDOWN),
+ !!(actual & DPIO_ANYDL_POWERDOWN),
+ !!(expected & DPIO_ALLDL_POWERDOWN),
+ !!(expected & DPIO_ANYDL_POWERDOWN),
+ reg, val);
+}
+
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ bool was_override;
+
+ mutex_lock(&power_domains->lock);
+
+ was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ if (override == was_override)
+ goto out;
+
+ if (override)
+ dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ else
+ dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
+ phy, ch, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+
+out:
+ mutex_unlock(&power_domains->lock);
+
+ return was_override;
+}
+
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+ bool override, unsigned int mask)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder));
+ enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
+
+ mutex_lock(&power_domains->lock);
+
+ dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch);
+ dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch);
+
+ if (override)
+ dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+ else
+ dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
+
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
+ phy, ch, mask, dev_priv->chv_phy_control);
+
+ assert_chv_phy_status(dev_priv);
+
+ assert_chv_phy_powergate(dev_priv, phy, ch, override, mask);
+
+ mutex_unlock(&power_domains->lock);
+}
+
+static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum pipe pipe = PIPE_A;
+ bool enabled;
+ u32 state, ctrl;
+
+ vlv_punit_get(dev_priv);
+
+ state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
+ /*
+ * We only ever set the power-on and power-gate states, anything
+ * else is unexpected.
+ */
+ drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
+ state != DP_SSS_PWR_GATE(pipe));
+ enabled = state == DP_SSS_PWR_ON(pipe);
+
+ /*
+ * A transient state at this point would mean some unexpected party
+ * is poking at the power controls too.
+ */
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
+ drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
+
+ vlv_punit_put(dev_priv);
+
+ return enabled;
+}
+
+static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well,
+ bool enable)
+{
+ enum pipe pipe = PIPE_A;
+ u32 state;
+ u32 ctrl;
+
+ state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
+
+ vlv_punit_get(dev_priv);
+
+#define COND \
+ ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
+
+ if (COND)
+ goto out;
+
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
+ ctrl &= ~DP_SSC_MASK(pipe);
+ ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
+
+ if (wait_for(COND, 100))
+ drm_err(&dev_priv->drm,
+ "timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
+
+#undef COND
+
+out:
+ vlv_punit_put(dev_priv);
+}
+
+static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
+}
+
+static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ chv_set_pipe_power_well(dev_priv, power_well, true);
+
+ vlv_display_power_well_init(dev_priv);
+}
+
+static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ vlv_display_power_well_deinit(dev_priv);
+
+ chv_set_pipe_power_well(dev_priv, power_well, false);
+}
+
+static void
+tgl_tc_cold_request(struct drm_i915_private *i915, bool block)
+{
+ u8 tries = 0;
+ int ret;
+
+ while (1) {
+ u32 low_val;
+ u32 high_val = 0;
+
+ if (block)
+ low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ;
+ else
+ low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ;
+
+ /*
+ * Spec states that we should timeout the request after 200us
+ * but the function below will timeout after 500us
+ */
+ ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val);
+ if (ret == 0) {
+ if (block &&
+ (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
+ ret = -EIO;
+ else
+ break;
+ }
+
+ if (++tries == 3)
+ break;
+
+ msleep(1);
+ }
+
+ if (ret)
+ drm_err(&i915->drm, "TC cold %sblock failed\n",
+ block ? "" : "un");
+ else
+ drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
+ block ? "" : "un");
+}
+
+static void
+tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ tgl_tc_cold_request(i915, true);
+}
+
+static void
+tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ tgl_tc_cold_request(i915, false);
+}
+
+static void
+tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915,
+ struct i915_power_well *power_well)
+{
+ if (intel_power_well_refcount(power_well) > 0)
+ tgl_tc_cold_off_power_well_enable(i915, power_well);
+ else
+ tgl_tc_cold_off_power_well_disable(i915, power_well);
+}
+
+static bool
+tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ /*
+ * Not the correctly implementation but there is no way to just read it
+ * from PCODE, so returning count to avoid state mismatch errors
+ */
+ return intel_power_well_refcount(power_well);
+}
+
+
+const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = i9xx_always_on_power_well_noop,
+ .disable = i9xx_always_on_power_well_noop,
+ .is_enabled = i9xx_always_on_power_well_enabled,
+};
+
+const struct i915_power_well_ops chv_pipe_power_well_ops = {
+ .sync_hw = chv_pipe_power_well_sync_hw,
+ .enable = chv_pipe_power_well_enable,
+ .disable = chv_pipe_power_well_disable,
+ .is_enabled = chv_pipe_power_well_enabled,
+};
+
+const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = chv_dpio_cmn_power_well_enable,
+ .disable = chv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+const struct i915_power_well_ops i830_pipes_power_well_ops = {
+ .sync_hw = i830_pipes_power_well_sync_hw,
+ .enable = i830_pipes_power_well_enable,
+ .disable = i830_pipes_power_well_disable,
+ .is_enabled = i830_pipes_power_well_enabled,
+};
+
+static const struct i915_power_well_regs hsw_power_well_regs = {
+ .bios = HSW_PWR_WELL_CTL1,
+ .driver = HSW_PWR_WELL_CTL2,
+ .kvmr = HSW_PWR_WELL_CTL3,
+ .debug = HSW_PWR_WELL_CTL4,
+};
+
+const struct i915_power_well_ops hsw_power_well_ops = {
+ .regs = &hsw_power_well_regs,
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = hsw_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = gen9_dc_off_power_well_enable,
+ .disable = gen9_dc_off_power_well_disable,
+ .is_enabled = gen9_dc_off_power_well_enabled,
+};
+
+const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = bxt_dpio_cmn_power_well_enable,
+ .disable = bxt_dpio_cmn_power_well_disable,
+ .is_enabled = bxt_dpio_cmn_power_well_enabled,
+};
+
+const struct i915_power_well_ops vlv_display_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = vlv_display_power_well_enable,
+ .disable = vlv_display_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = vlv_dpio_cmn_power_well_enable,
+ .disable = vlv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+const struct i915_power_well_ops vlv_dpio_power_well_ops = {
+ .sync_hw = i9xx_power_well_sync_hw_noop,
+ .enable = vlv_power_well_enable,
+ .disable = vlv_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
+static const struct i915_power_well_regs icl_aux_power_well_regs = {
+ .bios = ICL_PWR_WELL_CTL_AUX1,
+ .driver = ICL_PWR_WELL_CTL_AUX2,
+ .debug = ICL_PWR_WELL_CTL_AUX4,
+};
+
+const struct i915_power_well_ops icl_aux_power_well_ops = {
+ .regs = &icl_aux_power_well_regs,
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = icl_aux_power_well_enable,
+ .disable = icl_aux_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+static const struct i915_power_well_regs icl_ddi_power_well_regs = {
+ .bios = ICL_PWR_WELL_CTL_DDI1,
+ .driver = ICL_PWR_WELL_CTL_DDI2,
+ .debug = ICL_PWR_WELL_CTL_DDI4,
+};
+
+const struct i915_power_well_ops icl_ddi_power_well_ops = {
+ .regs = &icl_ddi_power_well_regs,
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = hsw_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
+const struct i915_power_well_ops tgl_tc_cold_off_ops = {
+ .sync_hw = tgl_tc_cold_off_power_well_sync_hw,
+ .enable = tgl_tc_cold_off_power_well_enable,
+ .disable = tgl_tc_cold_off_power_well_disable,
+ .is_enabled = tgl_tc_cold_off_power_well_is_enabled,
+};
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
index 9a3756fdcf7f..d0624642dcb6 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -8,10 +8,23 @@
#include <linux/types.h>
#include "intel_display.h"
+#include "intel_display_power.h"
struct drm_i915_private;
struct i915_power_well;
+#define for_each_power_well(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells < \
+ (__dev_priv)->power_domains.power_well_count; \
+ (__power_well)++)
+
+#define for_each_power_well_reverse(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
+ (__dev_priv)->power_domains.power_well_count - 1; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
+ (__power_well)--)
+
/*
* i915_power_well_id:
*
@@ -20,7 +33,7 @@ struct i915_power_well;
* wells must be assigned DISP_PW_ID_NONE.
*/
enum i915_power_well_id {
- DISP_PW_ID_NONE,
+ DISP_PW_ID_NONE = 0, /* must be kept zero */
VLV_DISP_PW_DISP2D,
BXT_DISP_PW_DPIO_CMN_A,
@@ -36,45 +49,13 @@ enum i915_power_well_id {
TGL_DISP_PW_TC_COLD_OFF,
};
-struct i915_power_well_regs {
- i915_reg_t bios;
- i915_reg_t driver;
- i915_reg_t kvmr;
- i915_reg_t debug;
-};
-
-struct i915_power_well_ops {
- const struct i915_power_well_regs *regs;
- /*
- * Synchronize the well's hw state to match the current sw state, for
- * example enable/disable it based on the current refcount. Called
- * during driver init and resume time, possibly after first calling
- * the enable/disable handlers.
- */
- void (*sync_hw)(struct drm_i915_private *i915,
- struct i915_power_well *power_well);
- /*
- * Enable the well and resources that depend on it (for example
- * interrupts located on the well). Called after the 0->1 refcount
- * transition.
- */
- void (*enable)(struct drm_i915_private *i915,
- struct i915_power_well *power_well);
- /*
- * Disable the well and resources that depend on it. Called after
- * the 1->0 refcount transition.
- */
- void (*disable)(struct drm_i915_private *i915,
- struct i915_power_well *power_well);
- /* Returns the hw enabled state. */
- bool (*is_enabled)(struct drm_i915_private *i915,
- struct i915_power_well *power_well);
-};
-
-struct i915_power_well_desc {
+struct i915_power_well_instance {
const char *name;
- bool always_on;
- u64 domains;
+ const struct i915_power_domain_list {
+ const enum intel_display_power_domain *list;
+ u8 count;
+ } *domain_list;
+
/* unique identifier for this power well */
enum i915_power_well_id id;
/*
@@ -98,33 +79,45 @@ struct i915_power_well_desc {
* constrol/status registers.
*/
u8 idx;
- /* Mask of pipes whose IRQ logic is backed by the pw */
- u8 irq_pipe_mask;
- /*
- * Instead of waiting for the status bit to ack enables,
- * just wait a specific amount of time and then consider
- * the well enabled.
- */
- u16 fixed_enable_delay;
- /* The pw is backing the VGA functionality */
- bool has_vga:1;
- bool has_fuses:1;
- /*
- * The pw is for an ICL+ TypeC PHY port in
- * Thunderbolt mode.
- */
- bool is_tc_tbt:1;
} hsw;
};
+};
+
+struct i915_power_well_desc {
const struct i915_power_well_ops *ops;
+ const struct i915_power_well_instance_list {
+ const struct i915_power_well_instance *list;
+ u8 count;
+ } *instances;
+
+ /* Mask of pipes whose IRQ logic is backed by the pw */
+ u16 irq_pipe_mask:4;
+ u16 always_on:1;
+ /*
+ * Instead of waiting for the status bit to ack enables,
+ * just wait a specific amount of time and then consider
+ * the well enabled.
+ */
+ u16 fixed_enable_delay:1;
+ /* The pw is backing the VGA functionality */
+ u16 has_vga:1;
+ u16 has_fuses:1;
+ /*
+ * The pw is for an ICL+ TypeC PHY port in
+ * Thunderbolt mode.
+ */
+ u16 is_tc_tbt:1;
};
struct i915_power_well {
const struct i915_power_well_desc *desc;
+ struct intel_power_domain_mask domains;
/* power well enable/disable usage count */
int count;
/* cached hw enabled state */
bool hw_enabled;
+ /* index into desc->instances->list */
+ u8 instance_idx;
};
struct i915_power_well *lookup_power_well(struct drm_i915_private *i915,
@@ -147,7 +140,34 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
bool intel_power_well_is_always_on(struct i915_power_well *power_well);
const char *intel_power_well_name(struct i915_power_well *power_well);
-u64 intel_power_well_domains(struct i915_power_well *power_well);
+struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well);
int intel_power_well_refcount(struct i915_power_well *power_well);
+void chv_phy_powergate_lanes(struct intel_encoder *encoder,
+ bool override, unsigned int mask);
+bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ enum dpio_channel ch, bool override);
+
+void gen9_enable_dc5(struct drm_i915_private *dev_priv);
+void skl_enable_dc6(struct drm_i915_private *dev_priv);
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
+void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state);
+void gen9_disable_dc_states(struct drm_i915_private *dev_priv);
+void bxt_enable_dc9(struct drm_i915_private *dev_priv);
+void bxt_disable_dc9(struct drm_i915_private *dev_priv);
+
+extern const struct i915_power_well_ops i9xx_always_on_power_well_ops;
+extern const struct i915_power_well_ops chv_pipe_power_well_ops;
+extern const struct i915_power_well_ops chv_dpio_cmn_power_well_ops;
+extern const struct i915_power_well_ops i830_pipes_power_well_ops;
+extern const struct i915_power_well_ops hsw_power_well_ops;
+extern const struct i915_power_well_ops gen9_dc_off_power_well_ops;
+extern const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops;
+extern const struct i915_power_well_ops vlv_display_power_well_ops;
+extern const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops;
+extern const struct i915_power_well_ops vlv_dpio_power_well_ops;
+extern const struct i915_power_well_ops icl_aux_power_well_ops;
+extern const struct i915_power_well_ops icl_ddi_power_well_ops;
+extern const struct i915_power_well_ops tgl_tc_cold_off_ops;
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 257cf662f9f4..34d00f5aff25 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -52,6 +52,10 @@
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+#define DG2_DMC_PATH DMC_PATH(dg2, 2, 06)
+#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 06)
+MODULE_FIRMWARE(DG2_DMC_PATH);
+
#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16)
#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 16)
MODULE_FIRMWARE(ADLP_DMC_PATH);
@@ -374,6 +378,44 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
}
}
+static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
+ const u32 *mmioaddr, u32 mmio_count,
+ int header_ver, u8 dmc_id)
+{
+ struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
+ u32 start_range, end_range;
+ int i;
+
+ if (dmc_id >= DMC_FW_MAX) {
+ drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
+ return false;
+ }
+
+ if (header_ver == 1) {
+ start_range = DMC_MMIO_START_RANGE;
+ end_range = DMC_MMIO_END_RANGE;
+ } else if (dmc_id == DMC_FW_MAIN) {
+ start_range = TGL_MAIN_MMIO_START;
+ end_range = TGL_MAIN_MMIO_END;
+ } else if (DISPLAY_VER(i915) >= 13) {
+ start_range = ADLP_PIPE_MMIO_START;
+ end_range = ADLP_PIPE_MMIO_END;
+ } else if (DISPLAY_VER(i915) >= 12) {
+ start_range = TGL_PIPE_MMIO_START(dmc_id);
+ end_range = TGL_PIPE_MMIO_END(dmc_id);
+ } else {
+ drm_warn(&i915->drm, "Unknown mmio range for sanity check");
+ return false;
+ }
+
+ for (i = 0; i < mmio_count; i++) {
+ if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
+ return false;
+ }
+
+ return true;
+}
+
static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
const struct intel_dmc_header_base *dmc_header,
size_t rem_size, u8 dmc_id)
@@ -443,6 +485,12 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
return 0;
}
+ if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
+ dmc_header->header_ver, dmc_id)) {
+ drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
+ return 0;
+ }
+
for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i];
@@ -688,7 +736,11 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
*/
intel_dmc_runtime_pm_get(dev_priv);
- if (IS_ALDERLAKE_P(dev_priv)) {
+ if (IS_DG2(dev_priv)) {
+ dmc->fw_path = DG2_DMC_PATH;
+ dmc->required_version = DG2_DMC_VERSION_REQUIRED;
+ dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
+ } else if (IS_ALDERLAKE_P(dev_priv)) {
dmc->fw_path = ADLP_DMC_PATH;
dmc->required_version = ADLP_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index d65e698832eb..67e14eb96a7a 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -16,7 +16,23 @@
#define DMC_LAST_WRITE _MMIO(0x8F034)
#define DMC_LAST_WRITE_VALUE 0xc003b400
#define DMC_MMIO_START_RANGE 0x80000
-#define DMC_MMIO_END_RANGE 0x8FFFF
+#define DMC_MMIO_END_RANGE 0x8FFFF
+#define DMC_V1_MMIO_START_RANGE 0x80000
+#define TGL_MAIN_MMIO_START 0x8F000
+#define TGL_MAIN_MMIO_END 0x8FFFF
+#define _TGL_PIPEA_MMIO_START 0x92000
+#define _TGL_PIPEA_MMIO_END 0x93FFF
+#define _TGL_PIPEB_MMIO_START 0x96000
+#define _TGL_PIPEB_MMIO_END 0x97FFF
+#define ADLP_PIPE_MMIO_START 0x5F000
+#define ADLP_PIPE_MMIO_END 0x5FFFF
+
+#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\
+ _TGL_PIPEB_MMIO_START)
+
+#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\
+ _TGL_PIPEB_MMIO_END)
+
#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
#define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 97cf3cac0105..fb6cf30ee628 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -97,6 +97,14 @@
#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359
+enum intel_dp_aux_backlight_modparam {
+ INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
+ INTEL_DP_AUX_BACKLIGHT_OFF = 0,
+ INTEL_DP_AUX_BACKLIGHT_ON = 1,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
+};
+
/* Intel EDP backlight callbacks */
static bool
intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
@@ -126,6 +134,24 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
return false;
}
+ /*
+ * If we don't have HDR static metadata there is no way to
+ * runtime detect used range for nits based control. For now
+ * do not use Intel proprietary eDP backlight control if we
+ * don't have this data in panel EDID. In case we find panel
+ * which supports only nits based control, but doesn't provide
+ * HDR static metadata we need to start maintaining table of
+ * ranges for such panels.
+ */
+ if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
+ !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
+ BIT(HDMI_STATIC_METADATA_TYPE1))) {
+ drm_info(&i915->drm,
+ "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
+ return false;
+ }
+
panel->backlight.edp.intel.sdr_uses_aux =
tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP;
@@ -413,14 +439,6 @@ static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = {
.get = intel_dp_aux_vesa_get_backlight,
};
-enum intel_dp_aux_backlight_modparam {
- INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
- INTEL_DP_AUX_BACKLIGHT_OFF = 0,
- INTEL_DP_AUX_BACKLIGHT_ON = 1,
- INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
- INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
-};
-
int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 26f9e2b748e4..9feaf1a589f3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -82,19 +82,8 @@ static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp,
const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
int ret;
- if (intel_dp_is_edp(intel_dp))
- return false;
-
- /*
- * Detecting LTTPRs must be avoided on platforms with an AUX timeout
- * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
- */
- if (DISPLAY_VER(i915) < 10 || IS_GEMINILAKE(i915))
- return false;
-
ret = drm_dp_read_lttpr_common_caps(&intel_dp->aux, dpcd,
intel_dp->lttpr_common_caps);
if (ret < 0)
@@ -197,13 +186,25 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
*/
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
{
- u8 dpcd[DP_RECEIVER_CAP_SIZE];
- int lttpr_count;
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int lttpr_count = 0;
- if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
- return -EIO;
+ /*
+ * Detecting LTTPRs must be avoided on platforms with an AUX timeout
+ * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
+ */
+ if (!intel_dp_is_edp(intel_dp) &&
+ (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) {
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+ if (drm_dp_dpcd_probe(&intel_dp->aux, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
+ return -EIO;
- lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd);
+ if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
+ return -EIO;
+
+ lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd);
+ }
/*
* The DPTX shall read the DPRX caps after LTTPR detection, so re-read
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 44edeb2e55c0..cc6abe761f5e 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -24,6 +24,7 @@
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
+#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 95b9d327ed4d..6eef0b8a91eb 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -18,7 +18,10 @@
#include "vlv_sideband.h"
struct intel_dpll_funcs {
- int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state);
+ int (*crtc_compute_clock)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ int (*crtc_get_shared_dpll)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
};
struct intel_limit {
@@ -759,8 +762,8 @@ chv_find_best_dpll(const struct intel_limit *limit,
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock)
{
- int refclk = 100000;
const struct intel_limit *limit = &intel_limits_bxt;
+ int refclk = 100000;
return chv_find_best_dpll(limit, crtc_state,
crtc_state->port_clock, refclk,
@@ -927,32 +930,48 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
crtc_state->dpll_hw_state.dpll = dpll;
}
-static int hsw_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
+ return 0;
+}
+
+static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
-
- if (IS_DG2(dev_priv))
- return intel_mpllb_calc_state(crtc_state, encoder);
+ int ret;
if (DISPLAY_VER(dev_priv) < 11 &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
return 0;
- if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
+ ret = intel_reserve_shared_dplls(state, crtc, encoder);
+ if (ret) {
drm_dbg_kms(&dev_priv->drm,
"failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
- return -EINVAL;
+ return ret;
}
return 0;
}
+static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_encoder *encoder =
+ intel_get_crtc_new_encoder(state, crtc_state);
+
+ return intel_mpllb_calc_state(crtc_state, encoder);
+}
+
static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
{
return dpll->m < factor * dpll->n;
@@ -1068,18 +1087,15 @@ static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
crtc_state->dpll_hw_state.dpll = dpll;
}
-static int ilk_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
int refclk = 120000;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (!crtc_state->has_pch_encoder)
return 0;
@@ -1118,11 +1134,27 @@ static int ilk_crtc_compute_clock(struct intel_crtc_state *crtc_state)
ilk_compute_dpll(crtc_state, &crtc_state->dpll,
&crtc_state->dpll);
- if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
+ return 0;
+}
+
+static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ int ret;
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (!crtc_state->has_pch_encoder)
+ return 0;
+
+ ret = intel_reserve_shared_dplls(state, crtc, NULL);
+ if (ret) {
drm_dbg_kms(&dev_priv->drm,
"failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
- return -EINVAL;
+ return ret;
}
return 0;
@@ -1163,14 +1195,14 @@ void chv_compute_dpll(struct intel_crtc_state *crtc_state)
(crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
-static int chv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int chv_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- int refclk = 100000;
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit = &intel_limits_chv;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
+ int refclk = 100000;
if (!crtc_state->clock_set &&
!chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
@@ -1184,14 +1216,14 @@ static int chv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
return 0;
}
-static int vlv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- int refclk = 100000;
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit = &intel_limits_vlv;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
+ int refclk = 100000;
if (!crtc_state->clock_set &&
!vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
@@ -1205,16 +1237,15 @@ static int vlv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
return 0;
}
-static int g4x_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
int refclk = 96000;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
@@ -1251,16 +1282,15 @@ static int g4x_crtc_compute_clock(struct intel_crtc_state *crtc_state)
return 0;
}
-static int pnv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
int refclk = 96000;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
@@ -1288,16 +1318,15 @@ static int pnv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
return 0;
}
-static int i9xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
int refclk = 96000;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
@@ -1325,16 +1354,15 @@ static int i9xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
return 0;
}
-static int i8xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_limit *limit;
int refclk = 48000;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
@@ -1364,12 +1392,18 @@ static int i8xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
return 0;
}
+static const struct intel_dpll_funcs dg2_dpll_funcs = {
+ .crtc_compute_clock = dg2_crtc_compute_clock,
+};
+
static const struct intel_dpll_funcs hsw_dpll_funcs = {
.crtc_compute_clock = hsw_crtc_compute_clock,
+ .crtc_get_shared_dpll = hsw_crtc_get_shared_dpll,
};
static const struct intel_dpll_funcs ilk_dpll_funcs = {
.crtc_compute_clock = ilk_crtc_compute_clock,
+ .crtc_get_shared_dpll = ilk_crtc_get_shared_dpll,
};
static const struct intel_dpll_funcs chv_dpll_funcs = {
@@ -1396,18 +1430,54 @@ static const struct intel_dpll_funcs i8xx_dpll_funcs = {
.crtc_compute_clock = i8xx_crtc_compute_clock,
};
-int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state)
+int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
+
+ if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll))
+ return 0;
- return i915->dpll_funcs->crtc_compute_clock(crtc_state);
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (!crtc_state->hw.enable)
+ return 0;
+
+ return i915->dpll_funcs->crtc_compute_clock(state, crtc);
+}
+
+int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
+
+ if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll))
+ return 0;
+
+ if (!crtc_state->hw.enable)
+ return 0;
+
+ if (!i915->dpll_funcs->crtc_get_shared_dpll)
+ return 0;
+
+ return i915->dpll_funcs->crtc_get_shared_dpll(state, crtc);
}
void
intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
{
- if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
+ if (IS_DG2(dev_priv))
+ dev_priv->dpll_funcs = &dg2_dpll_funcs;
+ else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
dev_priv->dpll_funcs = &hsw_dpll_funcs;
else if (HAS_PCH_SPLIT(dev_priv))
dev_priv->dpll_funcs = &ilk_dpll_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 69b06a9e473e..bbc30542f29f 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -10,12 +10,16 @@
struct dpll;
struct drm_i915_private;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
enum pipe;
void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
-int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state);
+int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
int vlv_calc_dpll_params(int refclk, struct dpll *clock);
int pnv_calc_dpll_params(int refclk, struct dpll *clock);
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index b7071da4b7e5..22f55574a35c 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -90,9 +90,9 @@ struct intel_shared_dpll_funcs {
struct intel_dpll_mgr {
const struct dpll_info *dpll_info;
- bool (*get_dplls)(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
+ int (*get_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
void (*put_dplls)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void (*update_active_dpll)(struct intel_atomic_state *state,
@@ -514,9 +514,9 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
udelay(200);
}
-static bool ibx_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int ibx_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -541,7 +541,7 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
}
if (!pll)
- return false;
+ return -EINVAL;
/* reference the pll */
intel_reference_shared_dpll(state, crtc,
@@ -549,7 +549,7 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
crtc_state->shared_dpll = pll;
- return true;
+ return 0;
}
static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -584,7 +584,7 @@ static const struct intel_dpll_mgr pch_pll_mgr = {
};
static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
@@ -1060,16 +1060,13 @@ static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
return link_clock * 2;
}
-static bool hsw_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int hsw_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_shared_dpll *pll;
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
+ struct intel_shared_dpll *pll = NULL;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
pll = hsw_ddi_wrpll_get_dpll(state, crtc);
@@ -1077,18 +1074,16 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
pll = hsw_ddi_lcpll_get_dpll(crtc_state);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
pll = hsw_ddi_spll_get_dpll(state, crtc);
- else
- return false;
if (!pll)
- return false;
+ return -EINVAL;
intel_reference_shared_dpll(state, crtc,
pll, &crtc_state->dpll_hw_state);
crtc_state->shared_dpll = pll;
- return true;
+ return 0;
}
static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
@@ -1493,7 +1488,7 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
params->dco_integer * MHz(1)) * 0x8000, MHz(1));
}
-static bool
+static int
skl_ddi_calculate_wrpll(int clock /* in Hz */,
int ref_clock,
struct skl_wrpll_params *wrpll_params)
@@ -1552,7 +1547,7 @@ skip_remaining_dividers:
if (!ctx.p) {
DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
- return false;
+ return -EINVAL;
}
/*
@@ -1564,14 +1559,15 @@ skip_remaining_dividers:
skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
ctx.central_freq, p0, p1, p2);
- return true;
+ return 0;
}
-static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
+static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct skl_wrpll_params wrpll_params = {};
u32 ctrl1, cfgcr1, cfgcr2;
+ int ret;
/*
* See comment in intel_dpll_hw_state to understand why we always use 0
@@ -1581,10 +1577,10 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
- if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
- i915->dpll.ref_clks.nssc,
- &wrpll_params))
- return false;
+ ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+ i915->dpll.ref_clks.nssc, &wrpll_params);
+ if (ret)
+ return ret;
cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
@@ -1596,13 +1592,11 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
crtc_state->dpll_hw_state.ctrl1 = ctrl1;
crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
- return true;
+
+ return 0;
}
static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
@@ -1676,7 +1670,7 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
return dco_freq / (p0 * p1 * p2 * 5);
}
-static bool
+static int
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
u32 ctrl1;
@@ -1708,12 +1702,9 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
break;
}
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
crtc_state->dpll_hw_state.ctrl1 = ctrl1;
- return true;
+ return 0;
}
static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
@@ -1750,33 +1741,23 @@ static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
return link_clock * 2;
}
-static bool skl_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int skl_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
- bool bret;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- bret = skl_ddi_hdmi_pll_dividers(crtc_state);
- if (!bret) {
- drm_dbg_kms(&i915->drm,
- "Could not get HDMI pll dividers.\n");
- return false;
- }
- } else if (intel_crtc_has_dp_encoder(crtc_state)) {
- bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
- if (!bret) {
- drm_dbg_kms(&i915->drm,
- "Could not set DP dpll HW state.\n");
- return false;
- }
- } else {
- return false;
- }
+ int ret;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ ret = skl_ddi_hdmi_pll_dividers(crtc_state);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ ret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
+ else
+ ret = -EINVAL;
+ if (ret)
+ return ret;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
pll = intel_find_shared_dpll(state, crtc,
@@ -1789,14 +1770,14 @@ static bool skl_get_dpll(struct intel_atomic_state *state,
BIT(DPLL_ID_SKL_DPLL2) |
BIT(DPLL_ID_SKL_DPLL1));
if (!pll)
- return false;
+ return -EINVAL;
intel_reference_shared_dpll(state, crtc,
pll, &crtc_state->dpll_hw_state);
crtc_state->shared_dpll = pll;
- return true;
+ return 0;
}
static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
@@ -2095,7 +2076,7 @@ static const struct dpll bxt_dp_clk_val[] = {
{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
};
-static bool
+static int
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
struct dpll *clk_div)
{
@@ -2111,12 +2092,12 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
crtc_state->port_clock,
pipe_name(crtc->pipe));
- return false;
+ return -EINVAL;
}
drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
- return true;
+ return 0;
}
static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
@@ -2139,8 +2120,8 @@ static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
clk_div->dot != crtc_state->port_clock);
}
-static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
- const struct dpll *clk_div)
+static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
+ const struct dpll *clk_div)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
@@ -2149,8 +2130,6 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
u32 prop_coef, int_coef, gain_ctl, targ_cnt;
u32 lanestagger;
- memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
-
if (vco >= 6200000 && vco <= 6700000) {
prop_coef = 4;
int_coef = 9;
@@ -2169,7 +2148,7 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
targ_cnt = 9;
} else {
drm_err(&i915->drm, "Invalid VCO\n");
- return false;
+ return -EINVAL;
}
if (clock > 270000)
@@ -2206,10 +2185,10 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
- return true;
+ return 0;
}
-static bool
+static int
bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
struct dpll clk_div = {};
@@ -2219,7 +2198,7 @@ bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
-static bool
+static int
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
struct dpll clk_div = {};
@@ -2246,23 +2225,25 @@ static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
}
-static bool bxt_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int bxt_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id id;
+ int ret;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
- !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
- return false;
-
- if (intel_crtc_has_dp_encoder(crtc_state) &&
- !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
- return false;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ ret = bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ ret = bxt_ddi_dp_set_dpll_hw_state(crtc_state);
+ else
+ ret = -EINVAL;
+ if (ret)
+ return ret;
/* 1:1 mapping between ports and PLLs */
id = (enum intel_dpll_id) encoder->port;
@@ -2276,7 +2257,7 @@ static bool bxt_get_dpll(struct intel_atomic_state *state,
crtc_state->shared_dpll = pll;
- return true;
+ return 0;
}
static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
@@ -2513,8 +2494,8 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
/* the following params are unused */
};
-static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
- struct skl_wrpll_params *pll_params)
+static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *pll_params)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct icl_combo_pll_params *params =
@@ -2527,16 +2508,16 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
if (clock == params[i].clock) {
*pll_params = params[i].wrpll;
- return true;
+ return 0;
}
}
MISSING_CASE(clock);
- return false;
+ return -EINVAL;
}
-static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
- struct skl_wrpll_params *pll_params)
+static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *pll_params)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
@@ -2568,7 +2549,7 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
}
}
- return true;
+ return 0;
}
static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
@@ -2598,7 +2579,7 @@ static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
return ref_clock;
}
-static bool
+static int
icl_calc_wrpll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *wrpll_params)
{
@@ -2633,13 +2614,13 @@ icl_calc_wrpll(struct intel_crtc_state *crtc_state,
}
if (best_div == 0)
- return false;
+ return -EINVAL;
icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
pdiv, qdiv, kdiv);
- return true;
+ return 0;
}
static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
@@ -2709,8 +2690,6 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
{
u32 dco_fraction = pll_params->dco_fraction;
- memset(pll_state, 0, sizeof(*pll_state));
-
if (ehl_combo_pll_div_frac_wa_needed(i915))
dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
@@ -2731,10 +2710,10 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val);
}
-static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
- u32 *target_dco_khz,
- struct intel_dpll_hw_state *state,
- bool is_dkl)
+static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
+ u32 *target_dco_khz,
+ struct intel_dpll_hw_state *state,
+ bool is_dkl)
{
static const u8 div1_vals[] = { 7, 5, 3, 2 };
u32 dco_min_freq, dco_max_freq;
@@ -2800,19 +2779,19 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
hsdiv |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
- return true;
+ return 0;
}
}
- return false;
+ return -EINVAL;
}
/*
* The specification for this function uses real numbers, so the math had to be
* adapted to integer-only calculation, that's why it looks so different.
*/
-static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
- struct intel_dpll_hw_state *pll_state)
+static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
+ struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
int refclk_khz = dev_priv->dpll.ref_clks.nssc;
@@ -2826,14 +2805,14 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
bool use_ssc = false;
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
+ int ret;
- memset(pll_state, 0, sizeof(*pll_state));
-
- if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
- pll_state, is_dkl)) {
+ ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
+ pll_state, is_dkl);
+ if (ret) {
drm_dbg_kms(&dev_priv->drm,
"Failed to find divisors for clock %d\n", clock);
- return false;
+ return ret;
}
m1div = 2;
@@ -2848,7 +2827,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
drm_dbg_kms(&dev_priv->drm,
"Failed to find mdiv for clock %d\n",
clock);
- return false;
+ return -EINVAL;
}
}
m2div_rem = dco_khz % (refclk_khz * m1div);
@@ -2875,7 +2854,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
break;
default:
MISSING_CASE(refclk_khz);
- return false;
+ return -EINVAL;
}
/*
@@ -3018,7 +2997,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
}
- return true;
+ return 0;
}
static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
@@ -3140,9 +3119,9 @@ static u32 intel_get_hti_plls(struct drm_i915_private *i915)
return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
}
-static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -3160,11 +3139,10 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
else
ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
- if (!ret) {
+ if (ret) {
drm_dbg_kms(&dev_priv->drm,
"Could not calculate combo PHY PLL state.\n");
-
- return false;
+ return ret;
}
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
@@ -3209,7 +3187,7 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
drm_dbg_kms(&dev_priv->drm,
"No combo PHY PLL found for [ENCODER:%d:%s]\n",
encoder->base.base.id, encoder->base.name);
- return false;
+ return -EINVAL;
}
intel_reference_shared_dpll(state, crtc,
@@ -3217,12 +3195,12 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
icl_update_active_dpll(state, crtc, encoder);
- return true;
+ return 0;
}
-static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
@@ -3230,12 +3208,14 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
struct skl_wrpll_params pll_params = { };
struct icl_port_dpll *port_dpll;
enum intel_dpll_id dpll_id;
+ int ret;
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
- if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
+ ret = icl_calc_tbt_pll(crtc_state, &pll_params);
+ if (ret) {
drm_dbg_kms(&dev_priv->drm,
"Could not calculate TBT PLL state.\n");
- return false;
+ return ret;
}
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
@@ -3245,14 +3225,15 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
BIT(DPLL_ID_ICL_TBTPLL));
if (!port_dpll->pll) {
drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
- return false;
+ return -EINVAL;
}
intel_reference_shared_dpll(state, crtc,
port_dpll->pll, &port_dpll->hw_state);
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
- if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
+ ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
+ if (ret) {
drm_dbg_kms(&dev_priv->drm,
"Could not calculate MG PHY PLL state.\n");
goto err_unreference_tbt_pll;
@@ -3264,6 +3245,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
&port_dpll->hw_state,
BIT(dpll_id));
if (!port_dpll->pll) {
+ ret = -EINVAL;
drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
goto err_unreference_tbt_pll;
}
@@ -3272,18 +3254,18 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
icl_update_active_dpll(state, crtc, encoder);
- return true;
+ return 0;
err_unreference_tbt_pll:
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
- return false;
+ return ret;
}
-static bool icl_get_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+static int icl_get_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
@@ -3295,7 +3277,7 @@ static bool icl_get_dplls(struct intel_atomic_state *state,
MISSING_CASE(phy);
- return false;
+ return -EINVAL;
}
static void icl_put_dplls(struct intel_atomic_state *state,
@@ -4081,13 +4063,12 @@ static const struct intel_dpll_mgr adlp_pll_mgr = {
/**
* intel_shared_dpll_init - Initialize shared DPLLs
- * @dev: drm device
+ * @dev_priv: i915 device
*
- * Initialize shared DPLLs for @dev.
+ * Initialize shared DPLLs for @dev_priv.
*/
-void intel_shared_dpll_init(struct drm_device *dev)
+void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
const struct intel_dpll_mgr *dpll_mgr = NULL;
const struct dpll_info *dpll_info;
int i;
@@ -4126,7 +4107,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
dpll_info = dpll_mgr->dpll_info;
for (i = 0; dpll_info[i].name; i++) {
- drm_WARN_ON(dev, i != dpll_info[i].id);
+ drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
}
@@ -4154,17 +4135,18 @@ void intel_shared_dpll_init(struct drm_device *dev)
* intel_release_shared_dplls().
*
* Returns:
- * True if all required DPLLs were successfully reserved.
+ * 0 if all required DPLLs were successfully reserved,
+ * negative error code otherwise.
*/
-bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
+int intel_reserve_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
- return false;
+ return -EINVAL;
return dpll_mgr->get_dplls(state, crtc, encoder);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index ba2fdfce1579..f7c96a1f13c8 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -37,7 +37,6 @@
__a > __b ? (__a - __b) : (__b - __a); })
enum tc_port;
-struct drm_device;
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc;
@@ -337,9 +336,9 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
+int intel_reserve_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
void intel_release_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
@@ -356,7 +355,7 @@ bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
-void intel_shared_dpll_init(struct drm_device *dev);
+void intel_shared_dpll_init(struct drm_i915_private *dev_priv);
void intel_dpll_update_ref_clks(struct drm_i915_private *dev_priv);
void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv);
void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 670835318a1f..bbdc34a23d54 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -811,6 +811,14 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc)
fbc->funcs->program_cfb(fbc);
}
+static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
+{
+ /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,dg2,adlp */
+ if (DISPLAY_VER(fbc->i915) >= 11)
+ intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0,
+ DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
+}
+
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
{
struct drm_i915_private *i915 = fbc->i915;
@@ -1045,7 +1053,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
const struct intel_crtc_state *crtc_state;
struct intel_fbc *fbc = plane->fbc;
@@ -1086,7 +1094,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
*/
if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) {
plane_state->no_fbc_reason = "PSR2 enabled";
- return false;
+ return 0;
}
if (!pixel_format_is_valid(plane_state)) {
@@ -1112,7 +1120,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
fb->format->has_alpha) {
plane_state->no_fbc_reason = "per-pixel alpha not supported";
- return false;
+ return 0;
}
if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
@@ -1128,7 +1136,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
if (DISPLAY_VER(i915) >= 9 &&
plane_state->view.color_plane[0].y & 3) {
plane_state->no_fbc_reason = "plane start Y offset misaligned";
- return false;
+ return 0;
}
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
@@ -1136,7 +1144,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
(plane_state->view.color_plane[0].y +
(drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
plane_state->no_fbc_reason = "plane end Y offset misaligned";
- return false;
+ return 0;
}
/* WaFbcExceedCdClockThreshold:hsw,bdw */
@@ -1462,6 +1470,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
intel_fbc_update_state(state, crtc, plane);
+ intel_fbc_program_workarounds(fbc);
intel_fbc_program_cfb(fbc);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 4de4c174a987..ed5dab8427d6 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -298,7 +298,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
* Mailbox interface.
*/
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
- ret = snb_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1);
+ ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
if (ret) {
drm_err(&dev_priv->drm,
"Failed to initiate HDCP key load (%d)\n",
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 03398feb6676..d1d1b59102d6 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -75,13 +75,17 @@ const struct drm_display_mode *
intel_panel_downclock_mode(struct intel_connector *connector,
const struct drm_display_mode *adjusted_mode)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode, *best_mode = NULL;
- int vrefresh = drm_mode_vrefresh(adjusted_mode);
+ int min_vrefresh = i915->vbt.seamless_drrs_min_refresh_rate;
+ int max_vrefresh = drm_mode_vrefresh(adjusted_mode);
/* pick the fixed_mode with the lowest refresh rate */
list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) {
- if (drm_mode_vrefresh(fixed_mode) < vrefresh) {
- vrefresh = drm_mode_vrefresh(fixed_mode);
+ int vrefresh = drm_mode_vrefresh(fixed_mode);
+
+ if (vrefresh >= min_vrefresh && vrefresh < max_vrefresh) {
+ max_vrefresh = vrefresh;
best_mode = fixed_mode;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 64bd4ca0edd4..5a598dd06039 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -6,6 +6,7 @@
#include "g4x_dp.h"
#include "i915_drv.h"
#include "intel_de.h"
+#include "intel_display_power_well.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpll.h"
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 5a55010a9b2f..36356893c7ca 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -891,6 +891,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ /* Wa_16011303918:adl-p */
+ if (crtc_state->vrr.enable &&
+ IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, not compatible with HW stepping + VRR\n");
+ return false;
+ }
+
+ if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
+ return false;
+ }
+
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
!HAS_PSR_HW_TRACKING(dev_priv)) {
@@ -904,12 +918,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!crtc_state->enable_psr2_sel_fetch &&
IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
- return false;
+ goto unsupported;
}
if (!psr2_granularity_check(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
- return false;
+ goto unsupported;
}
if (!crtc_state->enable_psr2_sel_fetch &&
@@ -918,25 +932,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
"PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
crtc_hdisplay, crtc_vdisplay,
psr_max_h, psr_max_v);
- return false;
- }
-
- if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
- return false;
- }
-
- /* Wa_16011303918:adl-p */
- if (crtc_state->vrr.enable &&
- IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 not enabled, not compatible with HW stepping + VRR\n");
- return false;
+ goto unsupported;
}
tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
return true;
+
+unsupported:
+ crtc_state->enable_psr2_sel_fetch = false;
+ return false;
}
void intel_psr_compute_config(struct intel_dp *intel_dp,
@@ -1349,6 +1353,9 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
intel_dp->psr.enabled = false;
+ intel_dp->psr.psr2_enabled = false;
+ intel_dp->psr.psr2_sel_fetch_enabled = false;
+ intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
}
/**
@@ -1611,8 +1618,12 @@ exit:
}
static void clip_area_update(struct drm_rect *overlap_damage_area,
- struct drm_rect *damage_area)
+ struct drm_rect *damage_area,
+ struct drm_rect *pipe_src)
{
+ if (!drm_rect_intersect(damage_area, pipe_src))
+ return;
+
if (overlap_damage_area->y1 == -1) {
overlap_damage_area->y1 = damage_area->y1;
overlap_damage_area->y2 = damage_area->y2;
@@ -1678,6 +1689,7 @@ static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *c
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
struct intel_plane_state *new_plane_state, *old_plane_state;
@@ -1701,7 +1713,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
*/
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
- struct drm_rect src, damaged_area = { .y1 = -1 };
+ struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
+ .x2 = INT_MAX };
struct drm_atomic_helper_damage_iter iter;
struct drm_rect clip;
@@ -1728,20 +1741,23 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (old_plane_state->uapi.visible) {
damaged_area.y1 = old_plane_state->uapi.dst.y1;
damaged_area.y2 = old_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &damaged_area);
+ clip_area_update(&pipe_clip, &damaged_area,
+ &crtc_state->pipe_src);
}
if (new_plane_state->uapi.visible) {
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &damaged_area);
+ clip_area_update(&pipe_clip, &damaged_area,
+ &crtc_state->pipe_src);
}
continue;
} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
/* If alpha changed mark the whole plane area as damaged */
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
- clip_area_update(&pipe_clip, &damaged_area);
+ clip_area_update(&pipe_clip, &damaged_area,
+ &crtc_state->pipe_src);
continue;
}
@@ -1752,7 +1768,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
&new_plane_state->uapi);
drm_atomic_for_each_plane_damage(&iter, &clip) {
if (drm_rect_intersect(&clip, &src))
- clip_area_update(&damaged_area, &clip);
+ clip_area_update(&damaged_area, &clip,
+ &crtc_state->pipe_src);
}
if (damaged_area.y1 == -1)
@@ -1760,7 +1777,20 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
- clip_area_update(&pipe_clip, &damaged_area);
+ clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
+ }
+
+ /*
+ * TODO: For now we are just using full update in case
+ * selective fetch area calculation fails. To optimize this we
+ * should identify cases where this happens and fix the area
+ * calculation for those.
+ */
+ if (pipe_clip.y1 == -1) {
+ drm_info_once(&dev_priv->drm,
+ "Selective fetch area calculation failed in pipe %c\n",
+ pipe_name(crtc->pipe));
+ full_update = true;
}
if (full_update)
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index fc037c027ea5..b8b822ea3755 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_display.h"
+#include "intel_display_power_map.h"
#include "intel_display_types.h"
#include "intel_dp_mst.h"
#include "intel_tc.h"
@@ -61,10 +62,12 @@ bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
static enum intel_display_power_domain
tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port))
return POWER_DOMAIN_TC_COLD_OFF;
- return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
+ return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
}
static intel_wakeref_t
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index e4a11c3e3f3e..4b98bab3b890 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -735,7 +735,7 @@ struct lvds_lfp_data_ptr {
} __packed;
struct bdb_lvds_lfp_data_ptrs {
- u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+ u8 lvds_entries;
struct lvds_lfp_data_ptr ptr[16];
struct lvds_lfp_data_ptr_table panel_name; /* 156-163? */
} __packed;
@@ -769,6 +769,11 @@ struct lvds_pnp_id {
u8 mfg_year;
} __packed;
+/*
+ * For reference only. fp_timing has variable size so
+ * the data must be accessed using the data table pointers.
+ * Do not use this directly!
+ */
struct lvds_lfp_data_entry {
struct lvds_fp_timing fp_timing;
struct lvds_dvo_timing dvo_timing;
@@ -783,6 +788,23 @@ struct lvds_lfp_panel_name {
u8 name[13];
} __packed;
+struct lvds_lfp_black_border {
+ u8 top; /* 227 */
+ u8 bottom; /* 227 */
+ u8 left; /* 238 */
+ u8 right; /* 238 */
+} __packed;
+
+struct bdb_lvds_lfp_data_tail {
+ struct lvds_lfp_panel_name panel_name[16]; /* 156-163? */
+ u16 scaling_enable; /* 187 */
+ u8 seamless_drrs_min_refresh_rate[16]; /* 188 */
+ u8 pixel_overlap_count[16]; /* 208 */
+ struct lvds_lfp_black_border black_border[16]; /* 227 */
+ u16 dual_lfp_port_sync_enable; /* 231 */
+ u16 gpu_dithering_for_banding_artifacts; /* 245 */
+} __packed;
+
/*
* Block 43 - LFP Backlight Control Data Block
*/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 470fdfd61a0f..ddda468241ef 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -138,21 +138,21 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* Alternatively, we can trade that extra information on read/write
* activity with
* args->busy =
- * !dma_resv_test_signaled(obj->resv, true);
+ * !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
* to report the overall busyness. This is what the wait-ioctl does.
*
*/
args->busy = 0;
- dma_resv_iter_begin(&cursor, obj->base.resv, true);
+ dma_resv_iter_begin(&cursor, obj->base.resv, DMA_RESV_USAGE_READ);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (dma_resv_iter_is_restarted(&cursor))
args->busy = 0;
- if (dma_resv_iter_is_exclusive(&cursor))
- /* Translate the exclusive fence to the READ *and* WRITE engine */
+ if (dma_resv_iter_usage(&cursor) <= DMA_RESV_USAGE_WRITE)
+ /* Translate the write fences to the READ *and* WRITE engine */
args->busy |= busy_check_writer(fence);
else
- /* Translate shared fences to READ set of engines */
+ /* Translate read fences to READ set of engines */
args->busy |= busy_check_reader(fence);
}
dma_resv_iter_end(&cursor);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 1fd0cc9ca213..0512afdd20d8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -116,7 +116,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
obj->base.resv, NULL, true,
i915_fence_timeout(i915),
I915_FENCE_GFP);
- dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
+ dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
+ DMA_RESV_USAGE_KERNEL);
dma_fence_work_commit(&clflush->base);
/*
* We must have successfully populated the pages(since we are
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index b2aaf620741e..8949fb0a944f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -68,7 +68,7 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
#ifdef CONFIG_LOCKDEP
- GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true) &&
+ GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) &&
i915_gem_object_evictable(obj));
#endif
return mr && (mr->type == INTEL_MEMORY_LOCAL ||
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 07e816ddfb3d..06b1b188ce5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -745,30 +745,19 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = {
/**
* i915_gem_object_get_moving_fence - Get the object's moving fence if any
* @obj: The object whose moving fence to get.
+ * @fence: The resulting fence
*
* A non-signaled moving fence means that there is an async operation
* pending on the object that needs to be waited on before setting up
* any GPU- or CPU PTEs to the object's pages.
*
- * Return: A refcounted pointer to the object's moving fence if any,
- * NULL otherwise.
+ * Return: Negative error code or 0 for success.
*/
-struct dma_fence *
-i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj)
+int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
+ struct dma_fence **fence)
{
- return dma_fence_get(i915_gem_to_ttm(obj)->moving);
-}
-
-void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
- struct dma_fence *fence)
-{
- struct dma_fence **moving = &i915_gem_to_ttm(obj)->moving;
-
- if (*moving == fence)
- return;
-
- dma_fence_put(*moving);
- *moving = dma_fence_get(fence);
+ return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL,
+ fence);
}
/**
@@ -786,23 +775,16 @@ void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
bool intr)
{
- struct dma_fence *fence = i915_gem_to_ttm(obj)->moving;
- int ret;
+ long ret;
assert_object_held(obj);
- if (!fence)
- return 0;
- ret = dma_fence_wait(fence, intr);
- if (ret)
- return ret;
+ ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL,
+ intr, MAX_SCHEDULE_TIMEOUT);
+ if (!ret)
+ ret = -ETIME;
- if (fence->error)
- return fence->error;
-
- i915_gem_to_ttm(obj)->moving = NULL;
- dma_fence_put(fence);
- return 0;
+ return ret < 0 ? ret : 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 02c37fe4a535..e11d82a9f7c3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -520,12 +520,8 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
-struct dma_fence *
-i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj);
-
-void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
- struct dma_fence *fence);
-
+int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
+ struct dma_fence **fence);
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
bool intr);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index 432ac74ff225..a10716f4e717 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -467,19 +467,6 @@ out:
return fence;
}
-static int
-prev_deps(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
- struct i915_deps *deps)
-{
- int ret;
-
- ret = i915_deps_add_dependency(deps, bo->moving, ctx);
- if (!ret)
- ret = i915_deps_add_resv(deps, bo->base.resv, ctx);
-
- return ret;
-}
-
/**
* i915_ttm_move - The TTM move callback used by i915.
* @bo: The buffer object.
@@ -534,7 +521,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
struct i915_deps deps;
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
- ret = prev_deps(bo, ctx, &deps);
+ ret = i915_deps_add_resv(&deps, bo->base.resv, ctx);
if (ret) {
i915_refct_sgt_put(dst_rsgt);
return ret;
@@ -637,9 +624,8 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
if (IS_ERR_OR_NULL(copy_fence))
return PTR_ERR_OR_ZERO(copy_fence);
- dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence);
- dma_resv_add_shared_fence(src_bo->base.resv, copy_fence);
-
+ dma_resv_add_fence(dst_bo->base.resv, copy_fence, DMA_RESV_USAGE_WRITE);
+ dma_resv_add_fence(src_bo->base.resv, copy_fence, DMA_RESV_USAGE_READ);
dma_fence_put(copy_fence);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 6d1a71d6404c..094f06b4ce33 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -86,7 +86,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
return true;
/* we will unbind on next submission, still have userptr pins */
- r = dma_resv_wait_timeout(obj->base.resv, true, false,
+ r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index dab3d30c09a0..319936f91ac5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -40,7 +40,8 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
struct dma_fence *fence;
long ret = timeout ?: 1;
- dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL);
+ dma_resv_iter_begin(&cursor, resv,
+ dma_resv_usage_rw(flags & I915_WAIT_ALL));
dma_resv_for_each_fence_unlocked(&cursor, fence) {
ret = i915_gem_object_wait_fence(fence, flags, timeout);
if (ret <= 0)
@@ -117,7 +118,8 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_iter_begin(&cursor, obj->base.resv, flags & I915_WAIT_ALL);
+ dma_resv_iter_begin(&cursor, obj->base.resv,
+ dma_resv_usage_rw(flags & I915_WAIT_ALL));
dma_resv_for_each_fence_unlocked(&cursor, fence)
i915_gem_fence_wait_priority(fence, attr);
dma_resv_iter_end(&cursor);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index a342fd387d4e..62c61af77a42 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -219,7 +219,8 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
goto out_detach;
}
- timeout = dma_resv_wait_timeout(dmabuf->resv, false, true, 5 * HZ);
+ timeout = dma_resv_wait_timeout(dmabuf->resv, DMA_RESV_USAGE_WRITE,
+ true, 5 * HZ);
if (!timeout) {
pr_err("dmabuf wait for exclusive fence timed out.\n");
timeout = -ETIME;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index 6f98adb3a103..801af51aff62 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -220,9 +220,8 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
if (rq) {
err = dma_resv_reserve_fences(obj->base.resv, 1);
if (!err)
- dma_resv_add_excl_fence(obj->base.resv,
- &rq->fence);
- i915_gem_object_set_moving_fence(obj, &rq->fence);
+ dma_resv_add_fence(obj->base.resv, &rq->fence,
+ DMA_RESV_USAGE_KERNEL);
i915_request_put(rq);
}
if (err)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index c4c2c91a2ee7..5bc93a1ce3e3 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1221,8 +1221,8 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
expand32(POISON_INUSE), &rq);
i915_gem_object_unpin_pages(obj);
if (rq) {
- dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
- i915_gem_object_set_moving_fence(obj, &rq->fence);
+ dma_resv_add_fence(obj->base.resv, &rq->fence,
+ DMA_RESV_USAGE_KERNEL);
i915_request_put(rq);
}
i915_gem_object_unlock(obj);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 0c6b9eb724ae..90a440865037 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -138,7 +138,7 @@ static int gen6_drpc(struct seq_file *m)
}
if (GRAPHICS_VER(i915) <= 7)
- snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
+ snb_pcode_read(gt->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
seq_printf(m, "RC1e Enabled: %s\n",
str_yes_no(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
@@ -545,7 +545,7 @@ static int llc_show(struct seq_file *m, void *data)
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
ia_freq = gpu_freq;
- snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ snb_pcode_read(gt->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq, NULL);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(rps,
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index 40e2e28ee6c7..14fe65812e42 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -124,7 +124,6 @@ static void calc_ia_freq(struct intel_llc *llc,
static void gen6_update_ring_freq(struct intel_llc *llc)
{
- struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
struct ia_constants consts;
unsigned int gpu_freq;
@@ -142,7 +141,7 @@ static void gen6_update_ring_freq(struct intel_llc *llc)
unsigned int ia_freq, ring_freq;
calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
- snb_pcode_write(i915, GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+ snb_pcode_write(llc_to_gt(llc)->uncore, GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
gpu_freq);
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index b4770690e794..f8d0523f4c18 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -272,7 +272,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
GEN6_RC_CTL_HW_ENABLE;
rc6vids = 0;
- ret = snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
+ ret = snb_pcode_read(rc6_to_gt(rc6)->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
if (GRAPHICS_VER(i915) == 6 && ret) {
drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n");
} else if (GRAPHICS_VER(i915) == 6 &&
@@ -282,7 +282,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
rc6vids &= 0xffff00;
rc6vids |= GEN6_ENCODE_RC6_VID(450);
- ret = snb_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+ ret = snb_pcode_write(rc6_to_gt(rc6)->uncore, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
if (ret)
drm_err(&i915->drm,
"Couldn't fix incorrect rc6 voltage\n");
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 40ffcb94e379..15ec64d881c4 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -299,7 +299,8 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
GEM_BUG_ON(ring->emit > ring->size - bytes);
GEM_BUG_ON(ring->space < bytes);
cs = ring->vaddr + ring->emit;
- GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ memset32(cs, POISON_INUSE, bytes / sizeof(*cs));
ring->emit += bytes;
ring->space -= bytes;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 3bd8415a0f1b..fb3f57ee450b 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1144,7 +1144,8 @@ static void gen6_rps_init(struct intel_rps *rps)
if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11)
mult = GEN9_FREQ_SCALER;
- if (snb_pcode_read(i915, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
+ if (snb_pcode_read(rps_to_gt(rps)->uncore,
+ HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status, NULL) == 0)
rps->efficient_freq =
clamp_t(u32,
@@ -1984,7 +1985,7 @@ void intel_rps_init(struct intel_rps *rps)
if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
u32 params = 0;
- snb_pcode_read(i915, GEN6_READ_OC_PARAMS, &params, NULL);
+ snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_READ_OC_PARAMS, &params, NULL);
if (params & BIT(31)) { /* OC supported */
drm_dbg(&i915->drm,
"Overclocking supported, max: %dMHz, overclock: %dMHz\n",
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
index 2cd184ab32b1..cfd736d88939 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -31,7 +31,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq);
val = gpu_freq;
- if (snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ if (snb_pcode_read(llc_to_gt(llc)->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
&val, NULL)) {
pr_err("Failed to read freq table[%d], range [%d, %d]\n",
gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq);
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index 6a69ac0184ad..cfb4708dd62e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -521,7 +521,7 @@ static void show_pcu_config(struct intel_rps *rps)
for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
int ia_freq = gpu_freq;
- snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE,
+ snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq, NULL);
pr_info("%5d %5d %5d\n",
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index ea8324abc784..1699f644298e 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -1,9 +1,25 @@
# SPDX-License-Identifier: GPL-2.0
-GVT_DIR := gvt
-GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
- interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
- execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
- fb_decoder.o dmabuf.o page_track.o
-ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/
-i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
+kvmgt-$(CONFIG_DRM_I915_GVT) += \
+ gvt/aperture_gm.o \
+ gvt/cfg_space.o \
+ gvt/cmd_parser.o \
+ gvt/debugfs.o \
+ gvt/display.o \
+ gvt/dmabuf.o \
+ gvt/edid.o \
+ gvt/execlist.o \
+ gvt/fb_decoder.o \
+ gvt/firmware.o \
+ gvt/gtt.o \
+ gvt/handlers.o \
+ gvt/interrupt.o \
+ gvt/kvmgt.o \
+ gvt/mmio.o \
+ gvt/mmio_context.o \
+ gvt/opregion.o \
+ gvt/page_track.o \
+ gvt/sched_policy.o \
+ gvt/scheduler.o \
+ gvt/trace_points.o \
+ gvt/vgpu.o
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index b490e3db2e38..dad3a6054335 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -129,60 +129,16 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
-static int map_aperture(struct intel_vgpu *vgpu, bool map)
+static void map_aperture(struct intel_vgpu *vgpu, bool map)
{
- phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
- unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
- u64 first_gfn;
- u64 val;
- int ret;
-
- if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
- return 0;
-
- val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
- if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
- val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
- else
- val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
-
- first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
-
- ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
- aperture_pa >> PAGE_SHIFT,
- aperture_sz >> PAGE_SHIFT,
- map);
- if (ret)
- return ret;
-
- vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
- return 0;
+ if (map != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
}
-static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
+static void trap_gttmmio(struct intel_vgpu *vgpu, bool trap)
{
- u64 start, end;
- u64 val;
- int ret;
-
- if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
- return 0;
-
- val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0];
- if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
- start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
- else
- start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0);
-
- start &= ~GENMASK(3, 0);
- end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1;
-
- ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap);
- if (ret)
- return ret;
-
- vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
- return 0;
+ if (trap != vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked)
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap;
}
static int emulate_pci_command_write(struct intel_vgpu *vgpu,
@@ -191,26 +147,17 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
u8 old = vgpu_cfg_space(vgpu)[offset];
u8 new = *(u8 *)p_data;
u8 changed = old ^ new;
- int ret;
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
if (old & PCI_COMMAND_MEMORY) {
- ret = trap_gttmmio(vgpu, false);
- if (ret)
- return ret;
- ret = map_aperture(vgpu, false);
- if (ret)
- return ret;
+ trap_gttmmio(vgpu, false);
+ map_aperture(vgpu, false);
} else {
- ret = trap_gttmmio(vgpu, true);
- if (ret)
- return ret;
- ret = map_aperture(vgpu, true);
- if (ret)
- return ret;
+ trap_gttmmio(vgpu, true);
+ map_aperture(vgpu, true);
}
return 0;
@@ -230,13 +177,12 @@ static int emulate_pci_rom_bar_write(struct intel_vgpu *vgpu,
return 0;
}
-static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
+static void emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 new = *(u32 *)(p_data);
bool lo = IS_ALIGNED(offset, 8);
u64 size;
- int ret = 0;
bool mmio_enabled =
vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
@@ -259,14 +205,14 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
* Untrap the BAR, since guest hasn't configured a
* valid GPA
*/
- ret = trap_gttmmio(vgpu, false);
+ trap_gttmmio(vgpu, false);
break;
case PCI_BASE_ADDRESS_2:
case PCI_BASE_ADDRESS_3:
size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
intel_vgpu_write_pci_bar(vgpu, offset,
size >> (lo ? 0 : 32), lo);
- ret = map_aperture(vgpu, false);
+ map_aperture(vgpu, false);
break;
default:
/* Unimplemented BARs */
@@ -282,19 +228,18 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
*/
trap_gttmmio(vgpu, false);
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
- ret = trap_gttmmio(vgpu, mmio_enabled);
+ trap_gttmmio(vgpu, mmio_enabled);
break;
case PCI_BASE_ADDRESS_2:
case PCI_BASE_ADDRESS_3:
map_aperture(vgpu, false);
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
- ret = map_aperture(vgpu, mmio_enabled);
+ map_aperture(vgpu, mmio_enabled);
break;
default:
intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
}
}
- return ret;
}
/**
@@ -336,8 +281,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
- return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
-
+ emulate_pci_bar_write(vgpu, offset, p_data, bytes);
+ break;
case INTEL_GVT_PCI_SWSCI:
if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index efad8552d6e6..0ba2a3455d99 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1011,7 +1011,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
if (GRAPHICS_VER(s->engine->i915) == 9 &&
intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) {
- intel_gvt_hypervisor_read_gpa(s->vgpu,
+ intel_gvt_read_gpa(s->vgpu,
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
/* check inhibit context */
if (ctx_sr_ctl & 1) {
@@ -1775,7 +1775,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
I915_GTT_PAGE_SIZE - offset : end_gma - gma;
- intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
+ intel_gvt_read_gpa(vgpu, gpa, va + len, copy_len);
len += copy_len;
gma += copy_len;
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index c95c25d2addb..01e54b45c5c1 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -29,7 +29,7 @@
*/
#include <linux/dma-buf.h>
-#include <linux/vfio.h>
+#include <linux/mdev.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
@@ -42,24 +42,6 @@
#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
-static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
- unsigned long size,
- dma_addr_t dma_addr)
-{
- int ret = 0;
-
- if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
- ret = -EINVAL;
-
- return ret;
-}
-
-static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
- dma_addr_t dma_addr)
-{
- intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
-}
-
static int vgpu_gem_get_pages(
struct drm_i915_gem_object *obj)
{
@@ -95,7 +77,7 @@ static int vgpu_gem_get_pages(
for_each_sg(st->sgl, sg, page_num, i) {
dma_addr_t dma_addr =
GEN8_DECODE_PTE(readq(&gtt_entries[i]));
- if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
+ if (intel_gvt_dma_pin_guest_page(vgpu, dma_addr)) {
ret = -EINVAL;
goto out;
}
@@ -114,7 +96,7 @@ out:
for_each_sg(st->sgl, sg, i, j) {
dma_addr = sg_dma_address(sg);
if (dma_addr)
- vgpu_unpin_dma_address(vgpu, dma_addr);
+ intel_gvt_dma_unmap_guest_page(vgpu, dma_addr);
}
sg_free_table(st);
kfree(st);
@@ -136,7 +118,7 @@ static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
int i;
for_each_sg(pages->sgl, sg, fb_info->size, i)
- vgpu_unpin_dma_address(vgpu,
+ intel_gvt_dma_unmap_guest_page(vgpu,
sg_dma_address(sg));
}
@@ -157,7 +139,6 @@ static void dmabuf_gem_object_free(struct kref *kref)
dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
if (dmabuf_obj == obj) {
list_del(pos);
- intel_gvt_hypervisor_put_vfio_device(vgpu);
idr_remove(&vgpu->object_idr,
dmabuf_obj->dmabuf_id);
kfree(dmabuf_obj->info);
@@ -491,14 +472,6 @@ int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
kref_init(&dmabuf_obj->kref);
- mutex_lock(&vgpu->dmabuf_lock);
- if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
- gvt_vgpu_err("get vfio device failed\n");
- mutex_unlock(&vgpu->dmabuf_lock);
- goto out_free_info;
- }
- mutex_unlock(&vgpu->dmabuf_lock);
-
update_fb_info(gfx_plane_info, &fb_info);
INIT_LIST_HEAD(&dmabuf_obj->list);
@@ -603,7 +576,6 @@ void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
dmabuf_obj->vgpu = NULL;
idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
- intel_gvt_hypervisor_put_vfio_device(vgpu);
list_del(pos);
/* dmabuf_obj might be freed in dmabuf_obj_put */
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 66d354c4195b..274c6ef42400 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -159,12 +159,12 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
vgpu->hws_pga[execlist->engine->id]);
if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
- intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
- status, 8);
- intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4,
- &write_pointer, 4);
+ intel_gvt_write_gpa(vgpu,
+ hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
+ status, 8);
+ intel_gvt_write_gpa(vgpu,
+ hwsp_gpa + INTEL_HWS_CSB_WRITE_INDEX(execlist->engine->i915) * 4,
+ &write_pointer, 4);
}
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 1a8274a3f4b1..54fe442238c6 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -66,22 +66,16 @@ static struct bin_attribute firmware_attr = {
.mmap = NULL,
};
-static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
-{
- *(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore,
- _MMIO(offset));
- return 0;
-}
-
static int expose_firmware_sysfs(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
+ struct drm_i915_private *i915 = gvt->gt->i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct gvt_firmware_header *h;
void *firmware;
void *p;
unsigned long size, crc32_start;
- int i, ret;
+ int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
firmware = vzalloc(size);
@@ -99,17 +93,16 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
p = firmware + h->cfg_space_offset;
- for (i = 0; i < h->cfg_space_size; i += 4)
- pci_read_config_dword(pdev, i, p + i);
-
- memcpy(gvt->firmware.cfg_space, p, info->cfg_space_size);
+ memcpy(gvt->firmware.cfg_space, i915->vgpu.initial_cfg_space,
+ info->cfg_space_size);
+ memcpy(p, gvt->firmware.cfg_space, info->cfg_space_size);
p = firmware + h->mmio_offset;
- /* Take a snapshot of hw mmio registers. */
- intel_gvt_for_each_tracked_mmio(gvt, mmio_snapshot_handler, p);
+ memcpy(gvt->firmware.mmio, i915->vgpu.initial_mmio,
+ info->mmio_size);
- memcpy(gvt->firmware.mmio, p, info->mmio_size);
+ memcpy(p, gvt->firmware.mmio, info->mmio_size);
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index d4082f4b9be1..9c5cc2800975 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -49,6 +49,22 @@
static bool enable_out_of_sync = false;
static int preallocated_oos_pages = 8192;
+static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ struct kvm *kvm = vgpu->kvm;
+ int idx;
+ bool ret;
+
+ if (!vgpu->attached)
+ return false;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ ret = kvm_is_visible_gfn(kvm, gfn);
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ return ret;
+}
+
/*
* validate a gm address and related range size,
* translate it to host gm address
@@ -314,7 +330,7 @@ static inline int gtt_get_entry64(void *pt,
return -EINVAL;
if (hypervisor_access) {
- ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
+ ret = intel_gvt_read_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift),
&e->val64, 8);
if (WARN_ON(ret))
@@ -339,7 +355,7 @@ static inline int gtt_set_entry64(void *pt,
return -EINVAL;
if (hypervisor_access) {
- ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
+ ret = intel_gvt_write_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift),
&e->val64, 8);
if (WARN_ON(ret))
@@ -997,7 +1013,7 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
return;
- intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
+ intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
}
static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
@@ -1162,15 +1178,16 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_entry *entry)
{
const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- unsigned long pfn;
+ kvm_pfn_t pfn;
if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0;
- pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
- if (pfn == INTEL_GVT_INVALID_ADDR)
+ if (!vgpu->attached)
+ return -EINVAL;
+ pfn = gfn_to_pfn(vgpu->kvm, ops->get_pfn(entry));
+ if (is_error_noslot_pfn(pfn))
return -EINVAL;
-
return PageTransHuge(pfn_to_page(pfn));
}
@@ -1195,8 +1212,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
return PTR_ERR(sub_spt);
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
- ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
- start_gfn + sub_index, PAGE_SIZE, &dma_addr);
+ ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
+ PAGE_SIZE, &dma_addr);
if (ret) {
ppgtt_invalidate_spt(spt);
return ret;
@@ -1241,8 +1258,8 @@ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
ops->set_64k_splited(&entry);
for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
- ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
- start_gfn + i, PAGE_SIZE, &dma_addr);
+ ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i,
+ PAGE_SIZE, &dma_addr);
if (ret)
return ret;
@@ -1296,8 +1313,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
}
/* direct shadow */
- ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
- &dma_addr);
+ ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr);
if (ret)
return -ENXIO;
@@ -1331,7 +1347,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
ppgtt_set_shadow_entry(spt, &se, i);
} else {
gfn = ops->get_pfn(&ge);
- if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
+ if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
ops->set_pfn(&se, gvt->gtt.scratch_mfn);
ppgtt_set_shadow_entry(spt, &se, i);
continue;
@@ -1497,7 +1513,7 @@ static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
struct intel_gvt *gvt = spt->vgpu->gvt;
int ret;
- ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
+ ret = intel_gvt_read_gpa(spt->vgpu,
spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
oos_page->mem, I915_GTT_PAGE_SIZE);
if (ret)
@@ -2228,8 +2244,7 @@ static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
pfn = pte_ops->get_pfn(entry);
if (pfn != vgpu->gvt->gtt.scratch_mfn)
- intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
- pfn << PAGE_SHIFT);
+ intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
}
static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
@@ -2315,13 +2330,13 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
/* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn
*/
- if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
+ if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
goto out;
}
- ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
- PAGE_SIZE, &dma_addr);
+ ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
+ &dma_addr);
if (ret) {
gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
deleted file mode 100644
index f0b69e4dcb52..000000000000
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors:
- * Kevin Tian <kevin.tian@intel.com>
- * Eddie Dong <eddie.dong@intel.com>
- *
- * Contributors:
- * Niu Bing <bing.niu@intel.com>
- * Zhi Wang <zhi.a.wang@intel.com>
- *
- */
-
-#include <linux/types.h>
-#include <linux/kthread.h>
-
-#include "i915_drv.h"
-#include "intel_gvt.h"
-#include "gvt.h"
-#include <linux/vfio.h>
-#include <linux/mdev.h>
-
-struct intel_gvt_host intel_gvt_host;
-
-static const char * const supported_hypervisors[] = {
- [INTEL_GVT_HYPERVISOR_XEN] = "XEN",
- [INTEL_GVT_HYPERVISOR_KVM] = "KVM",
-};
-
-static const struct intel_gvt_ops intel_gvt_ops = {
- .emulate_cfg_read = intel_vgpu_emulate_cfg_read,
- .emulate_cfg_write = intel_vgpu_emulate_cfg_write,
- .emulate_mmio_read = intel_vgpu_emulate_mmio_read,
- .emulate_mmio_write = intel_vgpu_emulate_mmio_write,
- .vgpu_create = intel_gvt_create_vgpu,
- .vgpu_destroy = intel_gvt_destroy_vgpu,
- .vgpu_release = intel_gvt_release_vgpu,
- .vgpu_reset = intel_gvt_reset_vgpu,
- .vgpu_activate = intel_gvt_activate_vgpu,
- .vgpu_deactivate = intel_gvt_deactivate_vgpu,
- .vgpu_query_plane = intel_vgpu_query_plane,
- .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
- .write_protect_handler = intel_vgpu_page_track_handler,
- .emulate_hotplug = intel_vgpu_emulate_hotplug,
-};
-
-static void init_device_info(struct intel_gvt *gvt)
-{
- struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
-
- info->max_support_vgpus = 8;
- info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
- info->mmio_size = 2 * 1024 * 1024;
- info->mmio_bar = 0;
- info->gtt_start_offset = 8 * 1024 * 1024;
- info->gtt_entry_size = 8;
- info->gtt_entry_size_shift = 3;
- info->gmadr_bytes_in_cmd = 8;
- info->max_surface_size = 36 * 1024 * 1024;
- info->msi_cap_offset = pdev->msi_cap;
-}
-
-static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
-{
- struct intel_vgpu *vgpu;
- int id;
-
- mutex_lock(&gvt->lock);
- idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
- if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
- (void *)&gvt->service_request)) {
- if (vgpu->active)
- intel_vgpu_emulate_vblank(vgpu);
- }
- }
- mutex_unlock(&gvt->lock);
-}
-
-static int gvt_service_thread(void *data)
-{
- struct intel_gvt *gvt = (struct intel_gvt *)data;
- int ret;
-
- gvt_dbg_core("service thread start\n");
-
- while (!kthread_should_stop()) {
- ret = wait_event_interruptible(gvt->service_thread_wq,
- kthread_should_stop() || gvt->service_request);
-
- if (kthread_should_stop())
- break;
-
- if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
- continue;
-
- intel_gvt_test_and_emulate_vblank(gvt);
-
- if (test_bit(INTEL_GVT_REQUEST_SCHED,
- (void *)&gvt->service_request) ||
- test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
- (void *)&gvt->service_request)) {
- intel_gvt_schedule(gvt);
- }
- }
-
- return 0;
-}
-
-static void clean_service_thread(struct intel_gvt *gvt)
-{
- kthread_stop(gvt->service_thread);
-}
-
-static int init_service_thread(struct intel_gvt *gvt)
-{
- init_waitqueue_head(&gvt->service_thread_wq);
-
- gvt->service_thread = kthread_run(gvt_service_thread,
- gvt, "gvt_service_thread");
- if (IS_ERR(gvt->service_thread)) {
- gvt_err("fail to start service thread.\n");
- return PTR_ERR(gvt->service_thread);
- }
- return 0;
-}
-
-/**
- * intel_gvt_clean_device - clean a GVT device
- * @i915: i915 private
- *
- * This function is called at the driver unloading stage, to free the
- * resources owned by a GVT device.
- *
- */
-void intel_gvt_clean_device(struct drm_i915_private *i915)
-{
- struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
-
- if (drm_WARN_ON(&i915->drm, !gvt))
- return;
-
- intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
- intel_gvt_clean_vgpu_types(gvt);
-
- intel_gvt_debugfs_clean(gvt);
- clean_service_thread(gvt);
- intel_gvt_clean_cmd_parser(gvt);
- intel_gvt_clean_sched_policy(gvt);
- intel_gvt_clean_workload_scheduler(gvt);
- intel_gvt_clean_gtt(gvt);
- intel_gvt_free_firmware(gvt);
- intel_gvt_clean_mmio_info(gvt);
- idr_destroy(&gvt->vgpu_idr);
-
- kfree(i915->gvt);
-}
-
-/**
- * intel_gvt_init_device - initialize a GVT device
- * @i915: drm i915 private data
- *
- * This function is called at the initialization stage, to initialize
- * necessary GVT components.
- *
- * Returns:
- * Zero on success, negative error code if failed.
- *
- */
-int intel_gvt_init_device(struct drm_i915_private *i915)
-{
- struct intel_gvt *gvt;
- struct intel_vgpu *vgpu;
- int ret;
-
- if (drm_WARN_ON(&i915->drm, i915->gvt))
- return -EEXIST;
-
- gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
- if (!gvt)
- return -ENOMEM;
-
- gvt_dbg_core("init gvt device\n");
-
- idr_init_base(&gvt->vgpu_idr, 1);
- spin_lock_init(&gvt->scheduler.mmio_context_lock);
- mutex_init(&gvt->lock);
- mutex_init(&gvt->sched_lock);
- gvt->gt = to_gt(i915);
- i915->gvt = gvt;
-
- init_device_info(gvt);
-
- ret = intel_gvt_setup_mmio_info(gvt);
- if (ret)
- goto out_clean_idr;
-
- intel_gvt_init_engine_mmio_context(gvt);
-
- ret = intel_gvt_load_firmware(gvt);
- if (ret)
- goto out_clean_mmio_info;
-
- ret = intel_gvt_init_irq(gvt);
- if (ret)
- goto out_free_firmware;
-
- ret = intel_gvt_init_gtt(gvt);
- if (ret)
- goto out_free_firmware;
-
- ret = intel_gvt_init_workload_scheduler(gvt);
- if (ret)
- goto out_clean_gtt;
-
- ret = intel_gvt_init_sched_policy(gvt);
- if (ret)
- goto out_clean_workload_scheduler;
-
- ret = intel_gvt_init_cmd_parser(gvt);
- if (ret)
- goto out_clean_sched_policy;
-
- ret = init_service_thread(gvt);
- if (ret)
- goto out_clean_cmd_parser;
-
- ret = intel_gvt_init_vgpu_types(gvt);
- if (ret)
- goto out_clean_thread;
-
- vgpu = intel_gvt_create_idle_vgpu(gvt);
- if (IS_ERR(vgpu)) {
- ret = PTR_ERR(vgpu);
- gvt_err("failed to create idle vgpu\n");
- goto out_clean_types;
- }
- gvt->idle_vgpu = vgpu;
-
- intel_gvt_debugfs_init(gvt);
-
- gvt_dbg_core("gvt device initialization is done\n");
- intel_gvt_host.dev = i915->drm.dev;
- intel_gvt_host.initialized = true;
- return 0;
-
-out_clean_types:
- intel_gvt_clean_vgpu_types(gvt);
-out_clean_thread:
- clean_service_thread(gvt);
-out_clean_cmd_parser:
- intel_gvt_clean_cmd_parser(gvt);
-out_clean_sched_policy:
- intel_gvt_clean_sched_policy(gvt);
-out_clean_workload_scheduler:
- intel_gvt_clean_workload_scheduler(gvt);
-out_clean_gtt:
- intel_gvt_clean_gtt(gvt);
-out_free_firmware:
- intel_gvt_free_firmware(gvt);
-out_clean_mmio_info:
- intel_gvt_clean_mmio_info(gvt);
-out_clean_idr:
- idr_destroy(&gvt->vgpu_idr);
- kfree(gvt);
- i915->gvt = NULL;
- return ret;
-}
-
-int
-intel_gvt_pm_resume(struct intel_gvt *gvt)
-{
- intel_gvt_restore_fence(gvt);
- intel_gvt_restore_mmio(gvt);
- intel_gvt_restore_ggtt(gvt);
- return 0;
-}
-
-int
-intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m)
-{
- int ret;
- void *gvt;
-
- if (!intel_gvt_host.initialized)
- return -ENODEV;
-
- if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
- m->type != INTEL_GVT_HYPERVISOR_XEN)
- return -EINVAL;
-
- /* Get a reference for device model module */
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- intel_gvt_host.mpt = m;
- intel_gvt_host.hypervisor_type = m->type;
- gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
-
- ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
- &intel_gvt_ops);
- if (ret < 0) {
- gvt_err("Failed to init %s hypervisor module\n",
- supported_hypervisors[intel_gvt_host.hypervisor_type]);
- module_put(THIS_MODULE);
- return -ENODEV;
- }
- gvt_dbg_core("Running with hypervisor %s in host mode\n",
- supported_hypervisors[intel_gvt_host.hypervisor_type]);
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
-
-void
-intel_gvt_unregister_hypervisor(void)
-{
- void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
- intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt);
- module_put(THIS_MODULE);
-}
-EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 0ebffc327528..03ecffc2ba56 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -34,11 +34,13 @@
#define _GVT_H_
#include <uapi/linux/pci_regs.h>
+#include <linux/kvm_host.h>
+#include <linux/vfio.h>
#include "i915_drv.h"
+#include "intel_gvt.h"
#include "debug.h"
-#include "hypercall.h"
#include "mmio.h"
#include "reg.h"
#include "interrupt.h"
@@ -56,15 +58,6 @@
#define GVT_MAX_VGPU 8
-struct intel_gvt_host {
- struct device *dev;
- bool initialized;
- int hypervisor_type;
- const struct intel_gvt_mpt *mpt;
-};
-
-extern struct intel_gvt_host intel_gvt_host;
-
/* Describe per-platform limitations. */
struct intel_gvt_device_info {
u32 max_support_vgpus;
@@ -176,12 +169,14 @@ struct intel_vgpu_submission {
} last_ctx[I915_NUM_ENGINES];
};
+#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
+
struct intel_vgpu {
struct intel_gvt *gvt;
struct mutex vgpu_lock;
int id;
- unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
bool active;
+ bool attached;
bool pv_notified;
bool failsafe;
unsigned int resetting_eng;
@@ -209,21 +204,40 @@ struct intel_vgpu {
struct dentry *debugfs;
- /* Hypervisor-specific device state. */
- void *vdev;
-
struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock;
struct idr object_idr;
struct intel_vgpu_vblank_timer vblank_timer;
u32 scan_nonprivbb;
-};
-static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
-{
- return vgpu->vdev;
-}
+ struct vfio_device vfio_device;
+ struct vfio_region *region;
+ int num_regions;
+ struct eventfd_ctx *intx_trigger;
+ struct eventfd_ctx *msi_trigger;
+
+ /*
+ * Two caches are used to avoid mapping duplicated pages (eg.
+ * scratch pages). This help to reduce dma setup overhead.
+ */
+ struct rb_root gfn_cache;
+ struct rb_root dma_addr_cache;
+ unsigned long nr_cache_entries;
+ struct mutex cache_lock;
+
+ struct notifier_block iommu_notifier;
+ struct notifier_block group_notifier;
+ struct kvm *kvm;
+ struct work_struct release_work;
+ atomic_t released;
+ struct vfio_group *vfio_group;
+
+ struct kvm_page_track_notifier_node track_node;
+#define NR_BKT (1 << 18)
+ struct hlist_head ptable[NR_BKT];
+#undef NR_BKT
+};
/* validating GM healthy status*/
#define vgpu_is_vm_unhealthy(ret_val) \
@@ -272,7 +286,7 @@ struct intel_gvt_mmio {
/* Value of command write of this reg needs to be patched */
#define F_CMD_WRITE_PATCH (1 << 8)
- const struct gvt_mmio_block *mmio_block;
+ struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
@@ -428,7 +442,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define RING_CTX_SIZE 320
struct intel_vgpu_creation_params {
- __u64 handle;
__u64 low_gm_sz; /* in MB */
__u64 high_gm_sz; /* in MB */
__u64 fence_sz;
@@ -496,6 +509,9 @@ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
+int intel_gvt_set_opregion(struct intel_vgpu *vgpu);
+int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num);
+
/* validating GM functions */
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
@@ -557,30 +573,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu);
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason);
-
-struct intel_gvt_ops {
- int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
- unsigned int);
- int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
- unsigned int);
- int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
- unsigned int);
- int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
- unsigned int);
- struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
- struct intel_vgpu_type *);
- void (*vgpu_destroy)(struct intel_vgpu *vgpu);
- void (*vgpu_release)(struct intel_vgpu *vgpu);
- void (*vgpu_reset)(struct intel_vgpu *);
- void (*vgpu_activate)(struct intel_vgpu *);
- void (*vgpu_deactivate)(struct intel_vgpu *);
- int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
- int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
- int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
- unsigned int);
- void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected);
-};
-
+void intel_vgpu_detach_regions(struct intel_vgpu *vgpu);
enum {
GVT_FAILSAFE_UNSUPPORTED_GUEST,
@@ -724,13 +717,54 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch(
return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH;
}
+/**
+ * intel_gvt_read_gpa - copy data from GPA to host data buffer
+ * @vgpu: a vGPU
+ * @gpa: guest physical address
+ * @buf: host data buffer
+ * @len: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
+ void *buf, unsigned long len)
+{
+ if (!vgpu->attached)
+ return -ESRCH;
+ return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, false);
+}
+
+/**
+ * intel_gvt_write_gpa - copy data from host data buffer to GPA
+ * @vgpu: a vGPU
+ * @gpa: guest physical address
+ * @buf: host data buffer
+ * @len: data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
+ unsigned long gpa, void *buf, unsigned long len)
+{
+ if (!vgpu->attached)
+ return -ESRCH;
+ return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, true);
+}
+
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_init(struct intel_gvt *gvt);
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
-int intel_gvt_pm_resume(struct intel_gvt *gvt);
+int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn);
+int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn);
+int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr);
+int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
+ unsigned long size, dma_addr_t *dma_addr);
+void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
+ dma_addr_t dma_addr);
#include "trace.h"
-#include "mpt.h"
#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 57b0f4977760..beea5895e499 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -72,7 +72,7 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
return 0;
}
-bool intel_gvt_match_device(struct intel_gvt *gvt,
+static bool intel_gvt_match_device(struct intel_gvt *gvt,
unsigned long device)
{
return intel_gvt_get_device_type(gvt) & device;
@@ -102,12 +102,11 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
return NULL;
}
-static int new_mmio_info(struct intel_gvt *gvt,
- u32 offset, u16 flags, u32 size,
- u32 addr_mask, u32 ro_mask, u32 device,
- gvt_mmio_func read, gvt_mmio_func write)
+static int setup_mmio_info(struct intel_gvt *gvt, u32 offset, u32 size,
+ u16 flags, u32 addr_mask, u32 ro_mask, u32 device,
+ gvt_mmio_func read, gvt_mmio_func write)
{
- struct intel_gvt_mmio_info *info, *p;
+ struct intel_gvt_mmio_info *p;
u32 start, end, i;
if (!intel_gvt_match_device(gvt, device))
@@ -120,32 +119,18 @@ static int new_mmio_info(struct intel_gvt *gvt,
end = offset + size;
for (i = start; i < end; i += 4) {
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->offset = i;
- p = intel_gvt_find_mmio_info(gvt, info->offset);
- if (p) {
- WARN(1, "dup mmio definition offset %x\n",
- info->offset);
- kfree(info);
-
- /* We return -EEXIST here to make GVT-g load fail.
- * So duplicated MMIO can be found as soon as
- * possible.
- */
- return -EEXIST;
+ p = intel_gvt_find_mmio_info(gvt, i);
+ if (!p) {
+ WARN(1, "assign a handler to a non-tracked mmio %x\n",
+ i);
+ return -ENODEV;
}
-
- info->ro_mask = ro_mask;
- info->device = device;
- info->read = read ? read : intel_vgpu_default_mmio_read;
- info->write = write ? write : intel_vgpu_default_mmio_write;
- gvt->mmio.mmio_attribute[info->offset / 4] = flags;
- INIT_HLIST_NODE(&info->node);
- hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
- gvt->mmio.num_tracked_mmio++;
+ p->ro_mask = ro_mask;
+ gvt->mmio.mmio_attribute[i / 4] = flags;
+ if (read)
+ p->read = read;
+ if (write)
+ p->write = write;
}
return 0;
}
@@ -2143,15 +2128,12 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
}
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
- ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
- f, s, am, rm, d, r, w); \
+ ret = setup_mmio_info(gvt, i915_mmio_reg_offset(reg), \
+ s, f, am, rm, d, r, w); \
if (ret) \
return ret; \
} while (0)
-#define MMIO_D(reg, d) \
- MMIO_F(reg, 4, 0, 0, 0, d, NULL, NULL)
-
#define MMIO_DH(reg, d, r, w) \
MMIO_F(reg, 4, 0, 0, 0, d, r, w)
@@ -2176,9 +2158,6 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
} while (0)
-#define MMIO_RING_D(prefix, d) \
- MMIO_RING_F(prefix, 4, 0, 0, 0, d, NULL, NULL)
-
#define MMIO_RING_DFH(prefix, d, f, r, w) \
MMIO_RING_F(prefix, 4, f, 0, 0, d, r, w)
@@ -2202,7 +2181,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(SDEISR, D_ALL);
MMIO_RING_DFH(RING_HWSTAM, D_ALL, 0, NULL, NULL);
@@ -2230,7 +2208,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
- MMIO_D(GEN7_CXT_SIZE, D_ALL);
MMIO_RING_DFH(RING_TAIL, D_ALL, 0, NULL, NULL);
MMIO_RING_DFH(RING_HEAD, D_ALL, 0, NULL, NULL);
@@ -2284,257 +2261,32 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
- MMIO_F(_MMIO(0x60220), 0x20, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_D(_MMIO(0x602a0), D_ALL);
-
- MMIO_D(_MMIO(0x65050), D_ALL);
- MMIO_D(_MMIO(0x650b4), D_ALL);
-
- MMIO_D(_MMIO(0xc4040), D_ALL);
- MMIO_D(DERRMR, D_ALL);
-
- MMIO_D(PIPEDSL(PIPE_A), D_ALL);
- MMIO_D(PIPEDSL(PIPE_B), D_ALL);
- MMIO_D(PIPEDSL(PIPE_C), D_ALL);
- MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL);
-
MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write);
MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write);
-
- MMIO_D(PIPESTAT(PIPE_A), D_ALL);
- MMIO_D(PIPESTAT(PIPE_B), D_ALL);
- MMIO_D(PIPESTAT(PIPE_C), D_ALL);
- MMIO_D(PIPESTAT(_PIPE_EDP), D_ALL);
-
- MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A), D_ALL);
- MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B), D_ALL);
- MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C), D_ALL);
- MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP), D_ALL);
-
- MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A), D_ALL);
- MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B), D_ALL);
- MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C), D_ALL);
- MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP), D_ALL);
-
- MMIO_D(CURCNTR(PIPE_A), D_ALL);
- MMIO_D(CURCNTR(PIPE_B), D_ALL);
- MMIO_D(CURCNTR(PIPE_C), D_ALL);
-
- MMIO_D(CURPOS(PIPE_A), D_ALL);
- MMIO_D(CURPOS(PIPE_B), D_ALL);
- MMIO_D(CURPOS(PIPE_C), D_ALL);
-
- MMIO_D(CURBASE(PIPE_A), D_ALL);
- MMIO_D(CURBASE(PIPE_B), D_ALL);
- MMIO_D(CURBASE(PIPE_C), D_ALL);
-
- MMIO_D(CUR_FBC_CTL(PIPE_A), D_ALL);
- MMIO_D(CUR_FBC_CTL(PIPE_B), D_ALL);
- MMIO_D(CUR_FBC_CTL(PIPE_C), D_ALL);
-
- MMIO_D(_MMIO(0x700ac), D_ALL);
- MMIO_D(_MMIO(0x710ac), D_ALL);
- MMIO_D(_MMIO(0x720ac), D_ALL);
-
- MMIO_D(_MMIO(0x70090), D_ALL);
- MMIO_D(_MMIO(0x70094), D_ALL);
- MMIO_D(_MMIO(0x70098), D_ALL);
- MMIO_D(_MMIO(0x7009c), D_ALL);
-
- MMIO_D(DSPCNTR(PIPE_A), D_ALL);
- MMIO_D(DSPADDR(PIPE_A), D_ALL);
- MMIO_D(DSPSTRIDE(PIPE_A), D_ALL);
- MMIO_D(DSPPOS(PIPE_A), D_ALL);
- MMIO_D(DSPSIZE(PIPE_A), D_ALL);
MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
- MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
- MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
-
- MMIO_D(DSPCNTR(PIPE_B), D_ALL);
- MMIO_D(DSPADDR(PIPE_B), D_ALL);
- MMIO_D(DSPSTRIDE(PIPE_B), D_ALL);
- MMIO_D(DSPPOS(PIPE_B), D_ALL);
- MMIO_D(DSPSIZE(PIPE_B), D_ALL);
MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
- MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
- MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
-
- MMIO_D(DSPCNTR(PIPE_C), D_ALL);
- MMIO_D(DSPADDR(PIPE_C), D_ALL);
- MMIO_D(DSPSTRIDE(PIPE_C), D_ALL);
- MMIO_D(DSPPOS(PIPE_C), D_ALL);
- MMIO_D(DSPSIZE(PIPE_C), D_ALL);
MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
- MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
- MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
reg50080_mmio_write);
-
- MMIO_D(SPRCTL(PIPE_A), D_ALL);
- MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
- MMIO_D(SPRSTRIDE(PIPE_A), D_ALL);
- MMIO_D(SPRPOS(PIPE_A), D_ALL);
- MMIO_D(SPRSIZE(PIPE_A), D_ALL);
- MMIO_D(SPRKEYVAL(PIPE_A), D_ALL);
- MMIO_D(SPRKEYMSK(PIPE_A), D_ALL);
MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write);
- MMIO_D(SPRKEYMAX(PIPE_A), D_ALL);
- MMIO_D(SPROFFSET(PIPE_A), D_ALL);
- MMIO_D(SPRSCALE(PIPE_A), D_ALL);
- MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
reg50080_mmio_write);
-
- MMIO_D(SPRCTL(PIPE_B), D_ALL);
- MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
- MMIO_D(SPRSTRIDE(PIPE_B), D_ALL);
- MMIO_D(SPRPOS(PIPE_B), D_ALL);
- MMIO_D(SPRSIZE(PIPE_B), D_ALL);
- MMIO_D(SPRKEYVAL(PIPE_B), D_ALL);
- MMIO_D(SPRKEYMSK(PIPE_B), D_ALL);
MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write);
- MMIO_D(SPRKEYMAX(PIPE_B), D_ALL);
- MMIO_D(SPROFFSET(PIPE_B), D_ALL);
- MMIO_D(SPRSCALE(PIPE_B), D_ALL);
- MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
reg50080_mmio_write);
-
- MMIO_D(SPRCTL(PIPE_C), D_ALL);
- MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
- MMIO_D(SPRSTRIDE(PIPE_C), D_ALL);
- MMIO_D(SPRPOS(PIPE_C), D_ALL);
- MMIO_D(SPRSIZE(PIPE_C), D_ALL);
- MMIO_D(SPRKEYVAL(PIPE_C), D_ALL);
- MMIO_D(SPRKEYMSK(PIPE_C), D_ALL);
MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write);
- MMIO_D(SPRKEYMAX(PIPE_C), D_ALL);
- MMIO_D(SPROFFSET(PIPE_C), D_ALL);
- MMIO_D(SPRSCALE(PIPE_C), D_ALL);
- MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
reg50080_mmio_write);
- MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
- MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
- MMIO_D(HSYNC(TRANSCODER_A), D_ALL);
- MMIO_D(VTOTAL(TRANSCODER_A), D_ALL);
- MMIO_D(VBLANK(TRANSCODER_A), D_ALL);
- MMIO_D(VSYNC(TRANSCODER_A), D_ALL);
- MMIO_D(BCLRPAT(TRANSCODER_A), D_ALL);
- MMIO_D(VSYNCSHIFT(TRANSCODER_A), D_ALL);
- MMIO_D(PIPESRC(TRANSCODER_A), D_ALL);
-
- MMIO_D(HTOTAL(TRANSCODER_B), D_ALL);
- MMIO_D(HBLANK(TRANSCODER_B), D_ALL);
- MMIO_D(HSYNC(TRANSCODER_B), D_ALL);
- MMIO_D(VTOTAL(TRANSCODER_B), D_ALL);
- MMIO_D(VBLANK(TRANSCODER_B), D_ALL);
- MMIO_D(VSYNC(TRANSCODER_B), D_ALL);
- MMIO_D(BCLRPAT(TRANSCODER_B), D_ALL);
- MMIO_D(VSYNCSHIFT(TRANSCODER_B), D_ALL);
- MMIO_D(PIPESRC(TRANSCODER_B), D_ALL);
-
- MMIO_D(HTOTAL(TRANSCODER_C), D_ALL);
- MMIO_D(HBLANK(TRANSCODER_C), D_ALL);
- MMIO_D(HSYNC(TRANSCODER_C), D_ALL);
- MMIO_D(VTOTAL(TRANSCODER_C), D_ALL);
- MMIO_D(VBLANK(TRANSCODER_C), D_ALL);
- MMIO_D(VSYNC(TRANSCODER_C), D_ALL);
- MMIO_D(BCLRPAT(TRANSCODER_C), D_ALL);
- MMIO_D(VSYNCSHIFT(TRANSCODER_C), D_ALL);
- MMIO_D(PIPESRC(TRANSCODER_C), D_ALL);
-
- MMIO_D(HTOTAL(TRANSCODER_EDP), D_ALL);
- MMIO_D(HBLANK(TRANSCODER_EDP), D_ALL);
- MMIO_D(HSYNC(TRANSCODER_EDP), D_ALL);
- MMIO_D(VTOTAL(TRANSCODER_EDP), D_ALL);
- MMIO_D(VBLANK(TRANSCODER_EDP), D_ALL);
- MMIO_D(VSYNC(TRANSCODER_EDP), D_ALL);
- MMIO_D(BCLRPAT(TRANSCODER_EDP), D_ALL);
- MMIO_D(VSYNCSHIFT(TRANSCODER_EDP), D_ALL);
-
- MMIO_D(PIPE_DATA_M1(TRANSCODER_A), D_ALL);
- MMIO_D(PIPE_DATA_N1(TRANSCODER_A), D_ALL);
- MMIO_D(PIPE_DATA_M2(TRANSCODER_A), D_ALL);
- MMIO_D(PIPE_DATA_N2(TRANSCODER_A), D_ALL);
- MMIO_D(PIPE_LINK_M1(TRANSCODER_A), D_ALL);
- MMIO_D(PIPE_LINK_N1(TRANSCODER_A), D_ALL);
- MMIO_D(PIPE_LINK_M2(TRANSCODER_A), D_ALL);
- MMIO_D(PIPE_LINK_N2(TRANSCODER_A), D_ALL);
-
- MMIO_D(PIPE_DATA_M1(TRANSCODER_B), D_ALL);
- MMIO_D(PIPE_DATA_N1(TRANSCODER_B), D_ALL);
- MMIO_D(PIPE_DATA_M2(TRANSCODER_B), D_ALL);
- MMIO_D(PIPE_DATA_N2(TRANSCODER_B), D_ALL);
- MMIO_D(PIPE_LINK_M1(TRANSCODER_B), D_ALL);
- MMIO_D(PIPE_LINK_N1(TRANSCODER_B), D_ALL);
- MMIO_D(PIPE_LINK_M2(TRANSCODER_B), D_ALL);
- MMIO_D(PIPE_LINK_N2(TRANSCODER_B), D_ALL);
-
- MMIO_D(PIPE_DATA_M1(TRANSCODER_C), D_ALL);
- MMIO_D(PIPE_DATA_N1(TRANSCODER_C), D_ALL);
- MMIO_D(PIPE_DATA_M2(TRANSCODER_C), D_ALL);
- MMIO_D(PIPE_DATA_N2(TRANSCODER_C), D_ALL);
- MMIO_D(PIPE_LINK_M1(TRANSCODER_C), D_ALL);
- MMIO_D(PIPE_LINK_N1(TRANSCODER_C), D_ALL);
- MMIO_D(PIPE_LINK_M2(TRANSCODER_C), D_ALL);
- MMIO_D(PIPE_LINK_N2(TRANSCODER_C), D_ALL);
-
- MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP), D_ALL);
- MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP), D_ALL);
- MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP), D_ALL);
- MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP), D_ALL);
- MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP), D_ALL);
- MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP), D_ALL);
- MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP), D_ALL);
- MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP), D_ALL);
-
- MMIO_D(PF_CTL(PIPE_A), D_ALL);
- MMIO_D(PF_WIN_SZ(PIPE_A), D_ALL);
- MMIO_D(PF_WIN_POS(PIPE_A), D_ALL);
- MMIO_D(PF_VSCALE(PIPE_A), D_ALL);
- MMIO_D(PF_HSCALE(PIPE_A), D_ALL);
-
- MMIO_D(PF_CTL(PIPE_B), D_ALL);
- MMIO_D(PF_WIN_SZ(PIPE_B), D_ALL);
- MMIO_D(PF_WIN_POS(PIPE_B), D_ALL);
- MMIO_D(PF_VSCALE(PIPE_B), D_ALL);
- MMIO_D(PF_HSCALE(PIPE_B), D_ALL);
-
- MMIO_D(PF_CTL(PIPE_C), D_ALL);
- MMIO_D(PF_WIN_SZ(PIPE_C), D_ALL);
- MMIO_D(PF_WIN_POS(PIPE_C), D_ALL);
- MMIO_D(PF_VSCALE(PIPE_C), D_ALL);
- MMIO_D(PF_HSCALE(PIPE_C), D_ALL);
-
- MMIO_D(WM0_PIPE_ILK(PIPE_A), D_ALL);
- MMIO_D(WM0_PIPE_ILK(PIPE_B), D_ALL);
- MMIO_D(WM0_PIPE_ILK(PIPE_C), D_ALL);
- MMIO_D(WM1_LP_ILK, D_ALL);
- MMIO_D(WM2_LP_ILK, D_ALL);
- MMIO_D(WM3_LP_ILK, D_ALL);
- MMIO_D(WM1S_LP_ILK, D_ALL);
- MMIO_D(WM2S_LP_IVB, D_ALL);
- MMIO_D(WM3S_LP_IVB, D_ALL);
-
- MMIO_D(BLC_PWM_CPU_CTL2, D_ALL);
- MMIO_D(BLC_PWM_CPU_CTL, D_ALL);
- MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
- MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
-
- MMIO_D(_MMIO(0x48268), D_ALL);
-
MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
gmbus_mmio_write);
MMIO_F(PCH_GPIO_BASE, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
@@ -2557,74 +2309,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(FDI_RX_CTL(PIPE_A), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
-
- MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A), D_ALL);
-
- MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B), D_ALL);
-
- MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2), D_ALL);
- MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2), D_ALL);
-
- MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
- MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
- MMIO_D(TRANS_DP_CTL(PIPE_C), D_ALL);
-
- MMIO_D(TVIDEO_DIP_CTL(PIPE_A), D_ALL);
- MMIO_D(TVIDEO_DIP_DATA(PIPE_A), D_ALL);
- MMIO_D(TVIDEO_DIP_GCP(PIPE_A), D_ALL);
-
- MMIO_D(TVIDEO_DIP_CTL(PIPE_B), D_ALL);
- MMIO_D(TVIDEO_DIP_DATA(PIPE_B), D_ALL);
- MMIO_D(TVIDEO_DIP_GCP(PIPE_B), D_ALL);
-
- MMIO_D(TVIDEO_DIP_CTL(PIPE_C), D_ALL);
- MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
- MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
-
- MMIO_D(_MMIO(_FDI_RXA_MISC), D_ALL);
- MMIO_D(_MMIO(_FDI_RXB_MISC), D_ALL);
- MMIO_D(_MMIO(_FDI_RXA_TUSIZE1), D_ALL);
- MMIO_D(_MMIO(_FDI_RXA_TUSIZE2), D_ALL);
- MMIO_D(_MMIO(_FDI_RXB_TUSIZE1), D_ALL);
- MMIO_D(_MMIO(_FDI_RXB_TUSIZE2), D_ALL);
-
MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
- MMIO_D(PCH_PP_DIVISOR, D_ALL);
- MMIO_D(PCH_PP_STATUS, D_ALL);
- MMIO_D(PCH_LVDS, D_ALL);
- MMIO_D(_MMIO(_PCH_DPLL_A), D_ALL);
- MMIO_D(_MMIO(_PCH_DPLL_B), D_ALL);
- MMIO_D(_MMIO(_PCH_FPA0), D_ALL);
- MMIO_D(_MMIO(_PCH_FPA1), D_ALL);
- MMIO_D(_MMIO(_PCH_FPB0), D_ALL);
- MMIO_D(_MMIO(_PCH_FPB1), D_ALL);
- MMIO_D(PCH_DREF_CONTROL, D_ALL);
- MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
- MMIO_D(PCH_DPLL_SEL, D_ALL);
-
- MMIO_D(_MMIO(0x61208), D_ALL);
- MMIO_D(_MMIO(0x6120c), D_ALL);
- MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
- MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
-
MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
@@ -2640,143 +2325,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
NULL, NULL);
MMIO_DH(LCPLL_CTL, D_ALL, NULL, lcpll_ctl_mmio_write);
- MMIO_D(FUSE_STRAP, D_ALL);
- MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL, D_ALL);
-
- MMIO_D(DISP_ARB_CTL, D_ALL);
- MMIO_D(DISP_ARB_CTL2, D_ALL);
-
- MMIO_D(ILK_DISPLAY_CHICKEN1, D_ALL);
- MMIO_D(ILK_DISPLAY_CHICKEN2, D_ALL);
- MMIO_D(ILK_DSPCLK_GATE_D, D_ALL);
-
- MMIO_D(SOUTH_CHICKEN1, D_ALL);
MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
- MMIO_D(_MMIO(_TRANSA_CHICKEN1), D_ALL);
- MMIO_D(_MMIO(_TRANSB_CHICKEN1), D_ALL);
- MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
- MMIO_D(_MMIO(_TRANSA_CHICKEN2), D_ALL);
- MMIO_D(_MMIO(_TRANSB_CHICKEN2), D_ALL);
-
- MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A), D_ALL);
- MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A), D_ALL);
- MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A), D_ALL);
- MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A), D_ALL);
- MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A), D_ALL);
- MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A), D_ALL);
- MMIO_D(ILK_FBC_RT_BASE, D_ALL);
-
- MMIO_D(IPS_CTL, D_ALL);
-
- MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_MODE(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A), D_ALL);
-
- MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_MODE(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B), D_ALL);
-
- MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_MODE(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C), D_ALL);
- MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C), D_ALL);
-
- MMIO_D(PREC_PAL_INDEX(PIPE_A), D_ALL);
- MMIO_D(PREC_PAL_DATA(PIPE_A), D_ALL);
- MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
-
- MMIO_D(PREC_PAL_INDEX(PIPE_B), D_ALL);
- MMIO_D(PREC_PAL_DATA(PIPE_B), D_ALL);
- MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
-
- MMIO_D(PREC_PAL_INDEX(PIPE_C), D_ALL);
- MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
- MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
-
- MMIO_D(_MMIO(0x60110), D_ALL);
- MMIO_D(_MMIO(0x61110), D_ALL);
- MMIO_F(_MMIO(0x70400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x71400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x72400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x70440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(_MMIO(0x71440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(_MMIO(0x72440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(_MMIO(0x7044c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
-
- MMIO_D(WM_LINETIME(PIPE_A), D_ALL);
- MMIO_D(WM_LINETIME(PIPE_B), D_ALL);
- MMIO_D(WM_LINETIME(PIPE_C), D_ALL);
- MMIO_D(SPLL_CTL, D_ALL);
- MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL);
- MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL);
- MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
- MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
- MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
- MMIO_D(PORT_CLK_SEL(PORT_D), D_ALL);
- MMIO_D(PORT_CLK_SEL(PORT_E), D_ALL);
- MMIO_D(TRANS_CLK_SEL(TRANSCODER_A), D_ALL);
- MMIO_D(TRANS_CLK_SEL(TRANSCODER_B), D_ALL);
- MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
-
- MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
- MMIO_D(_MMIO(0x46508), D_ALL);
-
- MMIO_D(_MMIO(0x49080), D_ALL);
- MMIO_D(_MMIO(0x49180), D_ALL);
- MMIO_D(_MMIO(0x49280), D_ALL);
-
- MMIO_F(_MMIO(0x49090), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x49190), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x49290), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
-
- MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
- MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
- MMIO_D(GAMMA_MODE(PIPE_C), D_ALL);
-
- MMIO_D(PIPE_MULT(PIPE_A), D_ALL);
- MMIO_D(PIPE_MULT(PIPE_B), D_ALL);
- MMIO_D(PIPE_MULT(PIPE_C), D_ALL);
-
- MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A), D_ALL);
- MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B), D_ALL);
- MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C), D_ALL);
-
MMIO_DH(SFUSE_STRAP, D_ALL, NULL, NULL);
- MMIO_D(SBI_ADDR, D_ALL);
MMIO_DH(SBI_DATA, D_ALL, sbi_data_mmio_read, NULL);
MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
- MMIO_D(PIXCLK_GATE, D_ALL);
MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL,
dp_aux_ch_ctl_mmio_write);
@@ -2799,65 +2351,18 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
- MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x64e60), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x64eC0), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x64f20), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_MMIO(0x64f80), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
-
- MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
- MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
- MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A), D_ALL);
-
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
- MMIO_D(_MMIO(_TRANSA_MSA_MISC), D_ALL);
- MMIO_D(_MMIO(_TRANSB_MSA_MISC), D_ALL);
- MMIO_D(_MMIO(_TRANSC_MSA_MISC), D_ALL);
- MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC), D_ALL);
-
MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
- MMIO_D(FORCEWAKE_ACK, D_ALL);
- MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
- MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_HSW, D_BDW, NULL, NULL);
- MMIO_D(ECOBUS, D_ALL);
MMIO_DH(GEN6_RC_CONTROL, D_ALL, NULL, NULL);
MMIO_DH(GEN6_RC_STATE, D_ALL, NULL, NULL);
- MMIO_D(GEN6_RPNSWREQ, D_ALL);
- MMIO_D(GEN6_RC_VIDEO_FREQ, D_ALL);
- MMIO_D(GEN6_RP_DOWN_TIMEOUT, D_ALL);
- MMIO_D(GEN6_RP_INTERRUPT_LIMITS, D_ALL);
- MMIO_D(GEN6_RPSTAT1, D_ALL);
- MMIO_D(GEN6_RP_CONTROL, D_ALL);
- MMIO_D(GEN6_RP_UP_THRESHOLD, D_ALL);
- MMIO_D(GEN6_RP_DOWN_THRESHOLD, D_ALL);
- MMIO_D(GEN6_RP_CUR_UP_EI, D_ALL);
- MMIO_D(GEN6_RP_CUR_UP, D_ALL);
- MMIO_D(GEN6_RP_PREV_UP, D_ALL);
- MMIO_D(GEN6_RP_CUR_DOWN_EI, D_ALL);
- MMIO_D(GEN6_RP_CUR_DOWN, D_ALL);
- MMIO_D(GEN6_RP_PREV_DOWN, D_ALL);
- MMIO_D(GEN6_RP_UP_EI, D_ALL);
- MMIO_D(GEN6_RP_DOWN_EI, D_ALL);
- MMIO_D(GEN6_RP_IDLE_HYSTERSIS, D_ALL);
- MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT, D_ALL);
- MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT, D_ALL);
- MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT, D_ALL);
- MMIO_D(GEN6_RC_EVALUATION_INTERVAL, D_ALL);
- MMIO_D(GEN6_RC_IDLE_HYSTERSIS, D_ALL);
- MMIO_D(GEN6_RC_SLEEP, D_ALL);
- MMIO_D(GEN6_RC1e_THRESHOLD, D_ALL);
- MMIO_D(GEN6_RC6_THRESHOLD, D_ALL);
- MMIO_D(GEN6_RC6p_THRESHOLD, D_ALL);
- MMIO_D(GEN6_RC6pp_THRESHOLD, D_ALL);
- MMIO_D(GEN6_PMINTRMSK, D_ALL);
MMIO_DH(HSW_PWR_WELL_CTL1, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL2, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL3, D_BDW, NULL, power_well_ctl_mmio_write);
@@ -2865,97 +2370,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(HSW_PWR_WELL_CTL5, D_BDW, NULL, power_well_ctl_mmio_write);
MMIO_DH(HSW_PWR_WELL_CTL6, D_BDW, NULL, power_well_ctl_mmio_write);
- MMIO_D(RSTDBYCTL, D_ALL);
-
MMIO_DH(GEN6_GDRST, D_ALL, NULL, gdrst_mmio_write);
MMIO_F(FENCE_REG_GEN6_LO(0), 0x80, 0, 0, 0, D_ALL, fence_mmio_read, fence_mmio_write);
MMIO_DH(CPU_VGACNTRL, D_ALL, NULL, vga_control_mmio_write);
- MMIO_D(TILECTL, D_ALL);
-
- MMIO_D(GEN6_UCGCTL1, D_ALL);
- MMIO_D(GEN6_UCGCTL2, D_ALL);
-
- MMIO_F(_MMIO(0x4f000), 0x90, 0, 0, 0, D_ALL, NULL, NULL);
-
- MMIO_D(GEN6_PCODE_DATA, D_ALL);
- MMIO_D(_MMIO(0x13812c), D_ALL);
MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
- MMIO_D(HSW_EDRAM_CAP, D_ALL);
- MMIO_D(HSW_IDICR, D_ALL);
MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
- MMIO_D(_MMIO(0x3c), D_ALL);
- MMIO_D(_MMIO(0x860), D_ALL);
- MMIO_D(ECOSKPD(RENDER_RING_BASE), D_ALL);
- MMIO_D(_MMIO(0x121d0), D_ALL);
- MMIO_D(ECOSKPD(BLT_RING_BASE), D_ALL);
- MMIO_D(_MMIO(0x41d0), D_ALL);
- MMIO_D(GAC_ECO_BITS, D_ALL);
- MMIO_D(_MMIO(0x6200), D_ALL);
- MMIO_D(_MMIO(0x6204), D_ALL);
- MMIO_D(_MMIO(0x6208), D_ALL);
- MMIO_D(_MMIO(0x7118), D_ALL);
- MMIO_D(_MMIO(0x7180), D_ALL);
- MMIO_D(_MMIO(0x7408), D_ALL);
- MMIO_D(_MMIO(0x7c00), D_ALL);
MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
- MMIO_D(_MMIO(0x911c), D_ALL);
- MMIO_D(_MMIO(0x9120), D_ALL);
MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(GAB_CTL, D_ALL);
- MMIO_D(_MMIO(0x48800), D_ALL);
- MMIO_D(_MMIO(0xce044), D_ALL);
- MMIO_D(_MMIO(0xe6500), D_ALL);
- MMIO_D(_MMIO(0xe6504), D_ALL);
- MMIO_D(_MMIO(0xe6600), D_ALL);
- MMIO_D(_MMIO(0xe6604), D_ALL);
- MMIO_D(_MMIO(0xe6700), D_ALL);
- MMIO_D(_MMIO(0xe6704), D_ALL);
- MMIO_D(_MMIO(0xe6800), D_ALL);
- MMIO_D(_MMIO(0xe6804), D_ALL);
- MMIO_D(PCH_GMBUS4, D_ALL);
- MMIO_D(PCH_GMBUS5, D_ALL);
-
- MMIO_D(_MMIO(0x902c), D_ALL);
- MMIO_D(_MMIO(0xec008), D_ALL);
- MMIO_D(_MMIO(0xec00c), D_ALL);
- MMIO_D(_MMIO(0xec008 + 0x18), D_ALL);
- MMIO_D(_MMIO(0xec00c + 0x18), D_ALL);
- MMIO_D(_MMIO(0xec008 + 0x18 * 2), D_ALL);
- MMIO_D(_MMIO(0xec00c + 0x18 * 2), D_ALL);
- MMIO_D(_MMIO(0xec008 + 0x18 * 3), D_ALL);
- MMIO_D(_MMIO(0xec00c + 0x18 * 3), D_ALL);
- MMIO_D(_MMIO(0xec408), D_ALL);
- MMIO_D(_MMIO(0xec40c), D_ALL);
- MMIO_D(_MMIO(0xec408 + 0x18), D_ALL);
- MMIO_D(_MMIO(0xec40c + 0x18), D_ALL);
- MMIO_D(_MMIO(0xec408 + 0x18 * 2), D_ALL);
- MMIO_D(_MMIO(0xec40c + 0x18 * 2), D_ALL);
- MMIO_D(_MMIO(0xec408 + 0x18 * 3), D_ALL);
- MMIO_D(_MMIO(0xec40c + 0x18 * 3), D_ALL);
- MMIO_D(_MMIO(0xfc810), D_ALL);
- MMIO_D(_MMIO(0xfc81c), D_ALL);
- MMIO_D(_MMIO(0xfc828), D_ALL);
- MMIO_D(_MMIO(0xfc834), D_ALL);
- MMIO_D(_MMIO(0xfcc00), D_ALL);
- MMIO_D(_MMIO(0xfcc0c), D_ALL);
- MMIO_D(_MMIO(0xfcc18), D_ALL);
- MMIO_D(_MMIO(0xfcc24), D_ALL);
- MMIO_D(_MMIO(0xfd000), D_ALL);
- MMIO_D(_MMIO(0xfd00c), D_ALL);
- MMIO_D(_MMIO(0xfd018), D_ALL);
- MMIO_D(_MMIO(0xfd024), D_ALL);
- MMIO_D(_MMIO(0xfd034), D_ALL);
-
MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
- MMIO_D(_MMIO(0x2054), D_ALL);
- MMIO_D(_MMIO(0x12054), D_ALL);
- MMIO_D(_MMIO(0x22054), D_ALL);
- MMIO_D(_MMIO(0x1a054), D_ALL);
-
- MMIO_D(_MMIO(0x44070), D_ALL);
MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2963,8 +2388,6 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
- MMIO_D(_MMIO(0x2b00), D_BDW_PLUS);
- MMIO_D(_MMIO(0x2360), D_BDW_PLUS);
MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
@@ -3012,28 +2435,23 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
static int init_bdw_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_GT_ISR(0), D_BDW_PLUS);
MMIO_DH(GEN8_GT_IMR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(1), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(1), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_GT_ISR(1), D_BDW_PLUS);
MMIO_DH(GEN8_GT_IMR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(2), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(2), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_GT_ISR(2), D_BDW_PLUS);
MMIO_DH(GEN8_GT_IMR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(3), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(3), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_GT_ISR(3), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_A), D_BDW_PLUS, NULL,
intel_vgpu_reg_imr_handler);
@@ -3041,7 +2459,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_A), D_BDW_PLUS, NULL,
intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_B), D_BDW_PLUS, NULL,
intel_vgpu_reg_imr_handler);
@@ -3049,7 +2466,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_B), D_BDW_PLUS, NULL,
intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PIPE_IMR(PIPE_C), D_BDW_PLUS, NULL,
intel_vgpu_reg_imr_handler);
@@ -3057,22 +2473,18 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PIPE_IIR(PIPE_C), D_BDW_PLUS, NULL,
intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C), D_BDW_PLUS);
MMIO_DH(GEN8_DE_PORT_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_DE_PORT_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_PORT_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_DE_PORT_ISR, D_BDW_PLUS);
MMIO_DH(GEN8_DE_MISC_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_DE_MISC_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_DE_MISC_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_DE_MISC_ISR, D_BDW_PLUS);
MMIO_DH(GEN8_PCU_IMR, D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_PCU_IER, D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_PCU_IIR, D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
- MMIO_D(GEN8_PCU_ISR, D_BDW_PLUS);
MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
intel_vgpu_reg_master_irq_handler);
@@ -3107,21 +2519,8 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
#undef RING_REG
- MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
- MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
- MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
- MMIO_D(_MMIO(0x1c1d0), D_BDW_PLUS);
- MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
- MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
- MMIO_D(_MMIO(0x1c054), D_BDW_PLUS);
-
MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
- MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT);
- MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
-
- MMIO_D(GAMTARBMODE, D_BDW_PLUS);
-
#define RING_REG(base) _MMIO((base) + 0x270)
MMIO_RING_F(RING_REG, 32, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
@@ -3130,24 +2529,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
- MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
- MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
-
- MMIO_D(WM_MISC, D_BDW);
- MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW);
-
- MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
- MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
- MMIO_D(_MMIO(0x66c04), D_BDW_PLUS);
-
- MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
-
- MMIO_D(GEN8_EU_DISABLE0, D_BDW_PLUS);
- MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
- MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
-
- MMIO_D(_MMIO(0xfdc), D_BDW_PLUS);
MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
@@ -3159,27 +2540,14 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(_MMIO(0xb110), D_BDW);
- MMIO_D(GEN9_SCRATCH_LNCF1, D_BDW_PLUS);
MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0,
D_BDW_PLUS, NULL, force_nonpriv_write);
- MMIO_D(_MMIO(0x44484), D_BDW_PLUS);
- MMIO_D(_MMIO(0x4448c), D_BDW_PLUS);
-
MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(_MMIO(0x110000), D_BDW_PLUS);
-
- MMIO_D(_MMIO(0x48400), D_BDW_PLUS);
-
- MMIO_D(_MMIO(0x6e570), D_BDW_PLUS);
- MMIO_D(_MMIO(0x65f10), D_BDW_PLUS);
-
MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -3219,30 +2587,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
- MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
- MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
- MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(MMCD_MISC_CTRL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
- MMIO_D(DC_STATE_EN, D_SKL_PLUS);
- MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
- MMIO_D(CDCLK_CTL, D_SKL_PLUS);
MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
- MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS);
- MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS);
- MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS);
- MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS);
- MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS);
- MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS);
- MMIO_D(DPLL_CTRL1, D_SKL_PLUS);
- MMIO_D(DPLL_CTRL2, D_SKL_PLUS);
MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
@@ -3285,22 +2638,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
-
- MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
-
- MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
-
- MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
- MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
-
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
@@ -3362,30 +2699,13 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS);
- MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
- MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
-
- MMIO_D(DMC_SSP_BASE, D_SKL_PLUS);
- MMIO_D(DMC_HTP_SKL, D_SKL_PLUS);
- MMIO_D(DMC_LAST_WRITE, D_SKL_PLUS);
-
MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(SKL_DFSM, D_SKL_PLUS);
- MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
-
MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
- MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
- MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
- MMIO_D(RC6_LOCATION, D_SKL_PLUS);
MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
@@ -3402,40 +2722,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
NULL, gen9_trtt_chicken_write);
- MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
-
- MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
-
- MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
- MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
- MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS);
- MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
- MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6e554), D_SKL_PLUS);
- MMIO_D(_MMIO(0x2b20), D_SKL_PLUS);
- MMIO_D(_MMIO(0x65f00), D_SKL_PLUS);
- MMIO_D(_MMIO(0x65f08), D_SKL_PLUS);
- MMIO_D(_MMIO(0x320f0), D_SKL_PLUS);
-
- MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
- MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
- MMIO_D(_MMIO(0x72034), D_SKL_PLUS);
-
- MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS);
- MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
-
- MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, csfe_chicken1_mmio_write);
@@ -3446,7 +2735,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
NULL, NULL);
MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT);
MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
@@ -3454,43 +2742,13 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
static int init_bxt_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
- MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
-
- MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT);
- MMIO_D(GEN7_ROW_INSTDONE, D_BXT);
- MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT);
- MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT);
- MMIO_D(ERROR_GEN6, D_BXT);
- MMIO_D(DONE_REG, D_BXT);
- MMIO_D(EIR, D_BXT);
- MMIO_D(PGTBL_ER, D_BXT);
- MMIO_D(_MMIO(0x4194), D_BXT);
- MMIO_D(_MMIO(0x4294), D_BXT);
- MMIO_D(_MMIO(0x4494), D_BXT);
-
- MMIO_RING_D(RING_PSMI_CTL, D_BXT);
- MMIO_RING_D(RING_DMA_FADD, D_BXT);
- MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT);
- MMIO_RING_D(RING_IPEHR, D_BXT);
- MMIO_RING_D(RING_INSTPS, D_BXT);
- MMIO_RING_D(RING_BBADDR_UDW, D_BXT);
- MMIO_RING_D(RING_BBSTATE, D_BXT);
- MMIO_RING_D(RING_IPEIR, D_BXT);
-
- MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL);
-
MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
- MMIO_D(BXT_RP_STATE_CAP, D_BXT);
MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
NULL, bxt_phy_ctl_family_write);
MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
NULL, bxt_phy_ctl_family_write);
- MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT);
- MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT);
- MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT);
MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
NULL, bxt_port_pll_enable_write);
MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
@@ -3498,128 +2756,19 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
bxt_port_pll_enable_write);
- MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT);
- MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT);
-
- MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT);
- MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT);
-
- MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT);
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
- MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
bxt_port_tx_dw3_read, NULL);
- MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT);
-
- MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT);
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
- MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
bxt_port_tx_dw3_read, NULL);
- MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT);
-
- MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT);
MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
NULL, bxt_pcs_dw12_grp_write);
- MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
bxt_port_tx_dw3_read, NULL);
- MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
- MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT);
- MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT);
-
- MMIO_D(BXT_DE_PLL_CTL, D_BXT);
MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
- MMIO_D(BXT_DSI_PLL_CTL, D_BXT);
- MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
-
- MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
- MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
-
- MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
- MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
- MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
-
- MMIO_D(RC6_CTX_BASE, D_BXT);
-
- MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
- MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
- MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
- MMIO_D(GEN6_GFXPAUSE, D_BXT);
MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
@@ -3639,17 +2788,14 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
return 0;
}
-static const struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
- unsigned int offset)
+static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
+ unsigned int offset)
{
- unsigned long device = intel_gvt_get_device_type(gvt);
- const struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
int num = gvt->mmio.num_mmio_block;
int i;
for (i = 0; i < num; i++, block++) {
- if (!(device & block->device))
- continue;
if (offset >= i915_mmio_reg_offset(block->offset) &&
offset < i915_mmio_reg_offset(block->offset) + block->size)
return block;
@@ -3674,23 +2820,117 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
hash_for_each_safe(gvt->mmio.mmio_info_table, i, tmp, e, node)
kfree(e);
+ kfree(gvt->mmio.mmio_block);
+ gvt->mmio.mmio_block = NULL;
+ gvt->mmio.num_mmio_block = 0;
+
vfree(gvt->mmio.mmio_attribute);
gvt->mmio.mmio_attribute = NULL;
}
-/* Special MMIO blocks. registers in MMIO block ranges should not be command
- * accessible (should have no F_CMD_ACCESS flag).
- * otherwise, need to update cmd_reg_handler in cmd_parser.c
- */
-static const struct gvt_mmio_block mmio_blocks[] = {
- {D_SKL_PLUS, _MMIO(DMC_MMIO_START_RANGE), 0x3000, NULL, NULL},
- {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
- {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
- pvinfo_mmio_read, pvinfo_mmio_write},
- {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
- {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
- {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
-};
+static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
+ u32 size)
+{
+ struct intel_gvt *gvt = iter->data;
+ struct intel_gvt_mmio_info *info, *p;
+ u32 start, end, i;
+
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+
+ start = offset;
+ end = offset + size;
+
+ for (i = start; i < end; i += 4) {
+ p = intel_gvt_find_mmio_info(gvt, i);
+ if (p) {
+ WARN(1, "dup mmio definition offset %x\n",
+ info->offset);
+
+ /* We return -EEXIST here to make GVT-g load fail.
+ * So duplicated MMIO can be found as soon as
+ * possible.
+ */
+ return -EEXIST;
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->offset = i;
+ info->read = intel_vgpu_default_mmio_read;
+ info->write = intel_vgpu_default_mmio_write;
+ INIT_HLIST_NODE(&info->node);
+ hash_add(gvt->mmio.mmio_info_table, &info->node, info->offset);
+ gvt->mmio.num_tracked_mmio++;
+ }
+ return 0;
+}
+
+static int handle_mmio_block(struct intel_gvt_mmio_table_iter *iter,
+ u32 offset, u32 size)
+{
+ struct intel_gvt *gvt = iter->data;
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ void *ret;
+
+ ret = krealloc(block,
+ (gvt->mmio.num_mmio_block + 1) * sizeof(*block),
+ GFP_KERNEL);
+ if (!ret)
+ return -ENOMEM;
+
+ gvt->mmio.mmio_block = block = ret;
+
+ block += gvt->mmio.num_mmio_block;
+
+ memset(block, 0, sizeof(*block));
+
+ block->offset = _MMIO(offset);
+ block->size = size;
+
+ gvt->mmio.num_mmio_block++;
+
+ return 0;
+}
+
+static int handle_mmio_cb(struct intel_gvt_mmio_table_iter *iter, u32 offset,
+ u32 size)
+{
+ if (size < 1024 || offset == i915_mmio_reg_offset(GEN9_GFX_MOCS(0)))
+ return handle_mmio(iter, offset, size);
+ else
+ return handle_mmio_block(iter, offset, size);
+}
+
+static int init_mmio_info(struct intel_gvt *gvt)
+{
+ struct intel_gvt_mmio_table_iter iter = {
+ .i915 = gvt->gt->i915,
+ .data = gvt,
+ .handle_mmio_cb = handle_mmio_cb,
+ };
+
+ return intel_gvt_iterate_mmio_table(&iter);
+}
+
+static int init_mmio_block_handlers(struct intel_gvt *gvt)
+{
+ struct gvt_mmio_block *block;
+
+ block = find_mmio_block(gvt, VGT_PVINFO_PAGE);
+ if (!block) {
+ WARN(1, "fail to assign handlers to mmio block %x\n",
+ i915_mmio_reg_offset(gvt->mmio.mmio_block->offset));
+ return -ENODEV;
+ }
+
+ block->read = pvinfo_mmio_read;
+ block->write = pvinfo_mmio_write;
+
+ return 0;
+}
/**
* intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
@@ -3713,6 +2953,14 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
if (!gvt->mmio.mmio_attribute)
return -ENOMEM;
+ ret = init_mmio_info(gvt);
+ if (ret)
+ goto err;
+
+ ret = init_mmio_block_handlers(gvt);
+ if (ret)
+ goto err;
+
ret = init_generic_mmio_info(gvt);
if (ret)
goto err;
@@ -3743,9 +2991,6 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
goto err;
}
- gvt->mmio.mmio_block = mmio_blocks;
- gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
-
return 0;
err:
intel_gvt_clean_mmio_info(gvt);
@@ -3765,7 +3010,7 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
void *data)
{
- const struct gvt_mmio_block *block = gvt->mmio.mmio_block;
+ struct gvt_mmio_block *block = gvt->mmio.mmio_block;
struct intel_gvt_mmio_info *e;
int i, j, ret;
@@ -3781,9 +3026,7 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
continue;
for (j = 0; j < block->size; j += 4) {
- ret = handler(gvt,
- i915_mmio_reg_offset(block->offset) + j,
- data);
+ ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, data);
if (ret)
return ret;
}
@@ -3883,7 +3126,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio_info;
- const struct gvt_mmio_block *mmio_block;
+ struct gvt_mmio_block *mmio_block;
gvt_mmio_func func;
int ret;
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
deleted file mode 100644
index f33e3cbd0439..000000000000
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors:
- * Eddie Dong <eddie.dong@intel.com>
- * Dexuan Cui
- * Jike Song <jike.song@intel.com>
- *
- * Contributors:
- * Zhi Wang <zhi.a.wang@intel.com>
- *
- */
-
-#ifndef _GVT_HYPERCALL_H_
-#define _GVT_HYPERCALL_H_
-
-#include <linux/types.h>
-
-struct device;
-
-enum hypervisor_type {
- INTEL_GVT_HYPERVISOR_XEN = 0,
- INTEL_GVT_HYPERVISOR_KVM,
-};
-
-/*
- * Specific GVT-g MPT modules function collections. Currently GVT-g supports
- * both Xen and KVM by providing dedicated hypervisor-related MPT modules.
- */
-struct intel_gvt_mpt {
- enum hypervisor_type type;
- int (*host_init)(struct device *dev, void *gvt, const void *ops);
- void (*host_exit)(struct device *dev, void *gvt);
- int (*attach_vgpu)(void *vgpu, unsigned long *handle);
- void (*detach_vgpu)(void *vgpu);
- int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
- unsigned long (*from_virt_to_mfn)(void *p);
- int (*enable_page_track)(unsigned long handle, u64 gfn);
- int (*disable_page_track)(unsigned long handle, u64 gfn);
- int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
- unsigned long len);
- int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
- unsigned long len);
- unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
-
- int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
- unsigned long size, dma_addr_t *dma_addr);
- void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
-
- int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr);
-
- int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
- unsigned long mfn, unsigned int nr, bool map);
- int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
- bool map);
- int (*set_opregion)(void *vgpu);
- int (*set_edid)(void *vgpu, int port_num);
- int (*get_vfio_device)(void *vgpu);
- void (*put_vfio_device)(void *vgpu);
- bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
-};
-
-#endif /* _GVT_HYPERCALL_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 228f623d466d..a6b2021b665f 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -29,6 +29,8 @@
*
*/
+#include <linux/eventfd.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "gvt.h"
@@ -397,9 +399,45 @@ static void init_irq_map(struct intel_gvt_irq *irq)
}
/* =======================vEvent injection===================== */
+
+#define MSI_CAP_CONTROL(offset) (offset + 2)
+#define MSI_CAP_ADDRESS(offset) (offset + 4)
+#define MSI_CAP_DATA(offset) (offset + 8)
+#define MSI_CAP_EN 0x1
+
static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
{
- return intel_gvt_hypervisor_inject_msi(vgpu);
+ unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
+ u16 control, data;
+ u32 addr;
+
+ control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
+ addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
+ data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
+
+ /* Do not generate MSI if MSIEN is disabled */
+ if (!(control & MSI_CAP_EN))
+ return 0;
+
+ if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
+ return -EINVAL;
+
+ trace_inject_msi(vgpu->id, addr, data);
+
+ /*
+ * When guest is powered off, msi_trigger is set to NULL, but vgpu's
+ * config and mmio register isn't restored to default during guest
+ * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
+ * may be enabled, then once this vgpu is active, it will get inject
+ * vblank interrupt request. But msi_trigger is null until msi is
+ * enabled by guest. so if msi_trigger is null, success is still
+ * returned and don't inject interrupt into guest.
+ */
+ if (!vgpu->attached)
+ return -ESRCH;
+ if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
+ return -EFAULT;
+ return 0;
}
static void propagate_event(struct intel_gvt_irq *irq,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 057ec4490104..0787ba5c301f 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1,7 +1,7 @@
/*
* KVMGT - the implementation of Intel mediated pass-through framework for KVM
*
- * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -26,6 +26,11 @@
* Kevin Tian <kevin.tian@intel.com>
* Jike Song <jike.song@intel.com>
* Xiaoguang Chen <xiaoguang.chen@intel.com>
+ * Eddie Dong <eddie.dong@intel.com>
+ *
+ * Contributors:
+ * Niu Bing <bing.niu@intel.com>
+ * Zhi Wang <zhi.a.wang@intel.com>
*/
#include <linux/init.h>
@@ -39,8 +44,6 @@
#include <linux/spinlock.h>
#include <linux/eventfd.h>
#include <linux/uuid.h>
-#include <linux/kvm_host.h>
-#include <linux/vfio.h>
#include <linux/mdev.h>
#include <linux/debugfs.h>
@@ -49,9 +52,11 @@
#include <drm/drm_edid.h>
#include "i915_drv.h"
+#include "intel_gvt.h"
#include "gvt.h"
-static const struct intel_gvt_ops *intel_gvt_ops;
+MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS(I915_GVT);
/* helper macros copied from vfio-pci */
#define VFIO_PCI_OFFSET_SHIFT 40
@@ -90,16 +95,6 @@ struct kvmgt_pgfn {
struct hlist_node hnode;
};
-#define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries"
-struct kvmgt_guest_info {
- struct kvm *kvm;
- struct intel_vgpu *vgpu;
- struct kvm_page_track_notifier_node track_node;
-#define NR_BKT (1 << 18)
- struct hlist_head ptable[NR_BKT];
-#undef NR_BKT
-};
-
struct gvt_dma {
struct intel_vgpu *vgpu;
struct rb_node gfn_node;
@@ -110,41 +105,15 @@ struct gvt_dma {
struct kref ref;
};
-struct kvmgt_vdev {
- struct intel_vgpu *vgpu;
- struct mdev_device *mdev;
- struct vfio_region *region;
- int num_regions;
- struct eventfd_ctx *intx_trigger;
- struct eventfd_ctx *msi_trigger;
+#define vfio_dev_to_vgpu(vfio_dev) \
+ container_of((vfio_dev), struct intel_vgpu, vfio_device)
- /*
- * Two caches are used to avoid mapping duplicated pages (eg.
- * scratch pages). This help to reduce dma setup overhead.
- */
- struct rb_root gfn_cache;
- struct rb_root dma_addr_cache;
- unsigned long nr_cache_entries;
- struct mutex cache_lock;
-
- struct notifier_block iommu_notifier;
- struct notifier_block group_notifier;
- struct kvm *kvm;
- struct work_struct release_work;
- atomic_t released;
- struct vfio_device *vfio_device;
- struct vfio_group *vfio_group;
-};
-
-static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
-{
- return intel_vgpu_vdev(vgpu);
-}
-
-static inline bool handle_valid(unsigned long handle)
-{
- return !!(handle & ~0xff);
-}
+static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *val, int len,
+ struct kvm_page_track_notifier_node *node);
+static void kvmgt_page_track_flush_slot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node);
static ssize_t available_instances_show(struct mdev_type *mtype,
struct mdev_type_attribute *attr,
@@ -259,15 +228,12 @@ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
}
}
-static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
-static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
int total_pages;
int npage;
int ret;
@@ -277,7 +243,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage;
- ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1);
+ ret = vfio_group_unpin_pages(vgpu->vfio_group, &cur_gfn, 1);
drm_WARN_ON(&i915->drm, ret != 1);
}
}
@@ -286,7 +252,6 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, struct page **page)
{
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long base_pfn = 0;
int total_pages;
int npage;
@@ -301,7 +266,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long cur_gfn = gfn + npage;
unsigned long pfn;
- ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1,
+ ret = vfio_group_pin_pages(vgpu->vfio_group, &cur_gfn, 1,
IOMMU_READ | IOMMU_WRITE, &pfn);
if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
@@ -368,7 +333,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
{
- struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node;
+ struct rb_node *node = vgpu->dma_addr_cache.rb_node;
struct gvt_dma *itr;
while (node) {
@@ -386,7 +351,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
{
- struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node;
+ struct rb_node *node = vgpu->gfn_cache.rb_node;
struct gvt_dma *itr;
while (node) {
@@ -407,7 +372,6 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
{
struct gvt_dma *new, *itr;
struct rb_node **link, *parent = NULL;
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
if (!new)
@@ -420,7 +384,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
kref_init(&new->ref);
/* gfn_cache maps gfn to struct gvt_dma. */
- link = &vdev->gfn_cache.rb_node;
+ link = &vgpu->gfn_cache.rb_node;
while (*link) {
parent = *link;
itr = rb_entry(parent, struct gvt_dma, gfn_node);
@@ -431,11 +395,11 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right;
}
rb_link_node(&new->gfn_node, parent, link);
- rb_insert_color(&new->gfn_node, &vdev->gfn_cache);
+ rb_insert_color(&new->gfn_node, &vgpu->gfn_cache);
/* dma_addr_cache maps dma addr to struct gvt_dma. */
parent = NULL;
- link = &vdev->dma_addr_cache.rb_node;
+ link = &vgpu->dma_addr_cache.rb_node;
while (*link) {
parent = *link;
itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
@@ -446,59 +410,54 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right;
}
rb_link_node(&new->dma_addr_node, parent, link);
- rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache);
+ rb_insert_color(&new->dma_addr_node, &vgpu->dma_addr_cache);
- vdev->nr_cache_entries++;
+ vgpu->nr_cache_entries++;
return 0;
}
static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
struct gvt_dma *entry)
{
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
-
- rb_erase(&entry->gfn_node, &vdev->gfn_cache);
- rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache);
+ rb_erase(&entry->gfn_node, &vgpu->gfn_cache);
+ rb_erase(&entry->dma_addr_node, &vgpu->dma_addr_cache);
kfree(entry);
- vdev->nr_cache_entries--;
+ vgpu->nr_cache_entries--;
}
static void gvt_cache_destroy(struct intel_vgpu *vgpu)
{
struct gvt_dma *dma;
struct rb_node *node = NULL;
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
for (;;) {
- mutex_lock(&vdev->cache_lock);
- node = rb_first(&vdev->gfn_cache);
+ mutex_lock(&vgpu->cache_lock);
+ node = rb_first(&vgpu->gfn_cache);
if (!node) {
- mutex_unlock(&vdev->cache_lock);
+ mutex_unlock(&vgpu->cache_lock);
break;
}
dma = rb_entry(node, struct gvt_dma, gfn_node);
gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
__gvt_cache_remove_entry(vgpu, dma);
- mutex_unlock(&vdev->cache_lock);
+ mutex_unlock(&vgpu->cache_lock);
}
}
static void gvt_cache_init(struct intel_vgpu *vgpu)
{
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
-
- vdev->gfn_cache = RB_ROOT;
- vdev->dma_addr_cache = RB_ROOT;
- vdev->nr_cache_entries = 0;
- mutex_init(&vdev->cache_lock);
+ vgpu->gfn_cache = RB_ROOT;
+ vgpu->dma_addr_cache = RB_ROOT;
+ vgpu->nr_cache_entries = 0;
+ mutex_init(&vgpu->cache_lock);
}
-static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
+static void kvmgt_protect_table_init(struct intel_vgpu *info)
{
hash_init(info->ptable);
}
-static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
+static void kvmgt_protect_table_destroy(struct intel_vgpu *info)
{
struct kvmgt_pgfn *p;
struct hlist_node *tmp;
@@ -511,7 +470,7 @@ static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
}
static struct kvmgt_pgfn *
-__kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
+__kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p, *res = NULL;
@@ -525,8 +484,7 @@ __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
return res;
}
-static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
- gfn_t gfn)
+static bool kvmgt_gfn_is_write_protected(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p;
@@ -534,7 +492,7 @@ static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
return !!p;
}
-static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
+static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p;
@@ -549,8 +507,7 @@ static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
hash_add(info->ptable, &p->hnode, gfn);
}
-static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
- gfn_t gfn)
+static void kvmgt_protect_table_del(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p;
@@ -564,18 +521,17 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool iswrite)
{
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
- void *base = vdev->region[i].data;
+ void *base = vgpu->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
- if (pos >= vdev->region[i].size || iswrite) {
+ if (pos >= vgpu->region[i].size || iswrite) {
gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
return -EINVAL;
}
- count = min(count, (size_t)(vdev->region[i].size - pos));
+ count = min(count, (size_t)(vgpu->region[i].size - pos));
memcpy(buf, base + pos, count);
return count;
@@ -617,9 +573,9 @@ static int handle_edid_regs(struct intel_vgpu *vgpu,
gvt_vgpu_err("invalid EDID blob\n");
return -EINVAL;
}
- intel_gvt_ops->emulate_hotplug(vgpu, true);
+ intel_vgpu_emulate_hotplug(vgpu, true);
} else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
- intel_gvt_ops->emulate_hotplug(vgpu, false);
+ intel_vgpu_emulate_hotplug(vgpu, false);
else {
gvt_vgpu_err("invalid EDID link state %d\n",
regs->link_state);
@@ -668,8 +624,7 @@ static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
int ret;
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
- struct vfio_edid_region *region =
- (struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data;
+ struct vfio_edid_region *region = vgpu->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
if (pos < region->vfio_edid_regs.edid_offset) {
@@ -701,44 +656,27 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
const struct intel_vgpu_regops *ops,
size_t size, u32 flags, void *data)
{
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct vfio_region *region;
- region = krealloc(vdev->region,
- (vdev->num_regions + 1) * sizeof(*region),
+ region = krealloc(vgpu->region,
+ (vgpu->num_regions + 1) * sizeof(*region),
GFP_KERNEL);
if (!region)
return -ENOMEM;
- vdev->region = region;
- vdev->region[vdev->num_regions].type = type;
- vdev->region[vdev->num_regions].subtype = subtype;
- vdev->region[vdev->num_regions].ops = ops;
- vdev->region[vdev->num_regions].size = size;
- vdev->region[vdev->num_regions].flags = flags;
- vdev->region[vdev->num_regions].data = data;
- vdev->num_regions++;
+ vgpu->region = region;
+ vgpu->region[vgpu->num_regions].type = type;
+ vgpu->region[vgpu->num_regions].subtype = subtype;
+ vgpu->region[vgpu->num_regions].ops = ops;
+ vgpu->region[vgpu->num_regions].size = size;
+ vgpu->region[vgpu->num_regions].flags = flags;
+ vgpu->region[vgpu->num_regions].data = data;
+ vgpu->num_regions++;
return 0;
}
-static int kvmgt_get_vfio_device(void *p_vgpu)
+int intel_gvt_set_opregion(struct intel_vgpu *vgpu)
{
- struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
-
- vdev->vfio_device = vfio_device_get_from_dev(
- mdev_dev(vdev->mdev));
- if (!vdev->vfio_device) {
- gvt_vgpu_err("failed to get vfio device\n");
- return -ENODEV;
- }
- return 0;
-}
-
-
-static int kvmgt_set_opregion(void *p_vgpu)
-{
- struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
void *base;
int ret;
@@ -764,9 +702,8 @@ static int kvmgt_set_opregion(void *p_vgpu)
return ret;
}
-static int kvmgt_set_edid(void *p_vgpu, int port_num)
+int intel_gvt_set_edid(struct intel_vgpu *vgpu, int port_num)
{
- struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
struct vfio_edid_region *base;
int ret;
@@ -794,71 +731,11 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num)
return ret;
}
-static void kvmgt_put_vfio_device(void *vgpu)
-{
- struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu);
-
- if (WARN_ON(!vdev->vfio_device))
- return;
-
- vfio_device_put(vdev->vfio_device);
-}
-
-static int intel_vgpu_create(struct mdev_device *mdev)
-{
- struct intel_vgpu *vgpu = NULL;
- struct intel_vgpu_type *type;
- struct device *pdev;
- struct intel_gvt *gvt;
- int ret;
-
- pdev = mdev_parent_dev(mdev);
- gvt = kdev_to_i915(pdev)->gvt;
-
- type = &gvt->types[mdev_get_type_group_id(mdev)];
- if (!type) {
- ret = -EINVAL;
- goto out;
- }
-
- vgpu = intel_gvt_ops->vgpu_create(gvt, type);
- if (IS_ERR_OR_NULL(vgpu)) {
- ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
- gvt_err("failed to create intel vgpu: %d\n", ret);
- goto out;
- }
-
- INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work);
-
- kvmgt_vdev(vgpu)->mdev = mdev;
- mdev_set_drvdata(mdev, vgpu);
-
- gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
- dev_name(mdev_dev(mdev)));
- ret = 0;
-
-out:
- return ret;
-}
-
-static int intel_vgpu_remove(struct mdev_device *mdev)
-{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
-
- if (handle_valid(vgpu->handle))
- return -EBUSY;
-
- intel_gvt_ops->vgpu_destroy(vgpu);
- return 0;
-}
-
static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct kvmgt_vdev *vdev = container_of(nb,
- struct kvmgt_vdev,
- iommu_notifier);
- struct intel_vgpu *vgpu = vdev->vgpu;
+ struct intel_vgpu *vgpu =
+ container_of(nb, struct intel_vgpu, iommu_notifier);
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
struct vfio_iommu_type1_dma_unmap *unmap = data;
@@ -868,7 +745,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
iov_pfn = unmap->iova >> PAGE_SHIFT;
end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
- mutex_lock(&vdev->cache_lock);
+ mutex_lock(&vgpu->cache_lock);
for (; iov_pfn < end_iov_pfn; iov_pfn++) {
entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
if (!entry)
@@ -878,7 +755,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
entry->size);
__gvt_cache_remove_entry(vgpu, entry);
}
- mutex_unlock(&vdev->cache_lock);
+ mutex_unlock(&vgpu->cache_lock);
}
return NOTIFY_OK;
@@ -887,35 +764,54 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
static int intel_vgpu_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct kvmgt_vdev *vdev = container_of(nb,
- struct kvmgt_vdev,
- group_notifier);
+ struct intel_vgpu *vgpu =
+ container_of(nb, struct intel_vgpu, group_notifier);
/* the only action we care about */
if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
- vdev->kvm = data;
+ vgpu->kvm = data;
if (!data)
- schedule_work(&vdev->release_work);
+ schedule_work(&vgpu->release_work);
}
return NOTIFY_OK;
}
-static int intel_vgpu_open_device(struct mdev_device *mdev)
+static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+ struct intel_vgpu *itr;
+ int id;
+ bool ret = false;
+
+ mutex_lock(&vgpu->gvt->lock);
+ for_each_active_vgpu(vgpu->gvt, itr, id) {
+ if (!itr->attached)
+ continue;
+
+ if (vgpu->kvm == itr->kvm) {
+ ret = true;
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&vgpu->gvt->lock);
+ return ret;
+}
+
+static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
+{
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned long events;
int ret;
struct vfio_group *vfio_group;
- vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
- vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
+ vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
+ vgpu->group_notifier.notifier_call = intel_vgpu_group_notifier;
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
- ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
- &vdev->iommu_notifier);
+ ret = vfio_register_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY, &events,
+ &vgpu->iommu_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
ret);
@@ -923,117 +819,129 @@ static int intel_vgpu_open_device(struct mdev_device *mdev)
}
events = VFIO_GROUP_NOTIFY_SET_KVM;
- ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
- &vdev->group_notifier);
+ ret = vfio_register_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY, &events,
+ &vgpu->group_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
ret);
goto undo_iommu;
}
- vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev));
+ vfio_group =
+ vfio_group_get_external_user_from_dev(vgpu->vfio_device.dev);
if (IS_ERR_OR_NULL(vfio_group)) {
ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
goto undo_register;
}
- vdev->vfio_group = vfio_group;
+ vgpu->vfio_group = vfio_group;
- /* Take a module reference as mdev core doesn't take
- * a reference for vendor driver.
- */
- if (!try_module_get(THIS_MODULE)) {
- ret = -ENODEV;
+ ret = -EEXIST;
+ if (vgpu->attached)
+ goto undo_group;
+
+ ret = -ESRCH;
+ if (!vgpu->kvm || vgpu->kvm->mm != current->mm) {
+ gvt_vgpu_err("KVM is required to use Intel vGPU\n");
goto undo_group;
}
- ret = kvmgt_guest_init(mdev);
- if (ret)
+ ret = -EEXIST;
+ if (__kvmgt_vgpu_exist(vgpu))
goto undo_group;
- intel_gvt_ops->vgpu_activate(vgpu);
+ vgpu->attached = true;
+ kvm_get_kvm(vgpu->kvm);
- atomic_set(&vdev->released, 0);
- return ret;
+ kvmgt_protect_table_init(vgpu);
+ gvt_cache_init(vgpu);
+
+ vgpu->track_node.track_write = kvmgt_page_track_write;
+ vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
+ kvm_page_track_register_notifier(vgpu->kvm, &vgpu->track_node);
+
+ debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
+ &vgpu->nr_cache_entries);
+
+ intel_gvt_activate_vgpu(vgpu);
+
+ atomic_set(&vgpu->released, 0);
+ return 0;
undo_group:
- vfio_group_put_external_user(vdev->vfio_group);
- vdev->vfio_group = NULL;
+ vfio_group_put_external_user(vgpu->vfio_group);
+ vgpu->vfio_group = NULL;
undo_register:
- vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
- &vdev->group_notifier);
+ vfio_unregister_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY,
+ &vgpu->group_notifier);
undo_iommu:
- vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
- &vdev->iommu_notifier);
+ vfio_unregister_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY,
+ &vgpu->iommu_notifier);
out:
return ret;
}
static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
{
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct eventfd_ctx *trigger;
- trigger = vdev->msi_trigger;
+ trigger = vgpu->msi_trigger;
if (trigger) {
eventfd_ctx_put(trigger);
- vdev->msi_trigger = NULL;
+ vgpu->msi_trigger = NULL;
}
}
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
{
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
- struct kvmgt_guest_info *info;
int ret;
- if (!handle_valid(vgpu->handle))
+ if (!vgpu->attached)
return;
- if (atomic_cmpxchg(&vdev->released, 0, 1))
+ if (atomic_cmpxchg(&vgpu->released, 0, 1))
return;
- intel_gvt_ops->vgpu_release(vgpu);
+ intel_gvt_release_vgpu(vgpu);
- ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY,
- &vdev->iommu_notifier);
+ ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_IOMMU_NOTIFY,
+ &vgpu->iommu_notifier);
drm_WARN(&i915->drm, ret,
"vfio_unregister_notifier for iommu failed: %d\n", ret);
- ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY,
- &vdev->group_notifier);
+ ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_GROUP_NOTIFY,
+ &vgpu->group_notifier);
drm_WARN(&i915->drm, ret,
"vfio_unregister_notifier for group failed: %d\n", ret);
- /* dereference module reference taken at open */
- module_put(THIS_MODULE);
+ debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
- info = (struct kvmgt_guest_info *)vgpu->handle;
- kvmgt_guest_exit(info);
+ kvm_page_track_unregister_notifier(vgpu->kvm, &vgpu->track_node);
+ kvm_put_kvm(vgpu->kvm);
+ kvmgt_protect_table_destroy(vgpu);
+ gvt_cache_destroy(vgpu);
intel_vgpu_release_msi_eventfd_ctx(vgpu);
- vfio_group_put_external_user(vdev->vfio_group);
+ vfio_group_put_external_user(vgpu->vfio_group);
- vdev->kvm = NULL;
- vgpu->handle = 0;
+ vgpu->kvm = NULL;
+ vgpu->attached = false;
}
-static void intel_vgpu_close_device(struct mdev_device *mdev)
+static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
-
- __intel_vgpu_release(vgpu);
+ __intel_vgpu_release(vfio_dev_to_vgpu(vfio_dev));
}
static void intel_vgpu_release_work(struct work_struct *work)
{
- struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev,
- release_work);
+ struct intel_vgpu *vgpu =
+ container_of(work, struct intel_vgpu, release_work);
- __intel_vgpu_release(vdev->vgpu);
+ __intel_vgpu_release(vgpu);
}
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -1070,10 +978,10 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
int ret;
if (is_write)
- ret = intel_gvt_ops->emulate_mmio_write(vgpu,
+ ret = intel_vgpu_emulate_mmio_write(vgpu,
bar_start + off, buf, count);
else
- ret = intel_gvt_ops->emulate_mmio_read(vgpu,
+ ret = intel_vgpu_emulate_mmio_read(vgpu,
bar_start + off, buf, count);
return ret;
}
@@ -1111,17 +1019,15 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
return 0;
}
-static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
+static ssize_t intel_vgpu_rw(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool is_write)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
int ret = -EINVAL;
- if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) {
+ if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions) {
gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL;
}
@@ -1129,10 +1035,10 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
switch (index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
if (is_write)
- ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
+ ret = intel_vgpu_emulate_cfg_write(vgpu, pos,
buf, count);
else
- ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
+ ret = intel_vgpu_emulate_cfg_read(vgpu, pos,
buf, count);
break;
case VFIO_PCI_BAR0_REGION_INDEX:
@@ -1150,20 +1056,19 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_ROM_REGION_INDEX:
break;
default:
- if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+ if (index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions)
return -EINVAL;
index -= VFIO_PCI_NUM_REGIONS;
- return vdev->region[index].ops->rw(vgpu, buf, count,
+ return vgpu->region[index].ops->rw(vgpu, buf, count,
ppos, is_write);
}
return ret == 0 ? count : ret;
}
-static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
+static bool gtt_entry(struct intel_vgpu *vgpu, loff_t *ppos)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
struct intel_gvt *gvt = vgpu->gvt;
int offset;
@@ -1180,9 +1085,10 @@ static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
true : false;
}
-static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
+static ssize_t intel_vgpu_read(struct vfio_device *vfio_dev, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned int done = 0;
int ret;
@@ -1191,10 +1097,10 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
/* Only support GGTT entry 8 bytes read */
if (count >= 8 && !(*ppos % 8) &&
- gtt_entry(mdev, ppos)) {
+ gtt_entry(vgpu, ppos)) {
u64 val;
- ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
ppos, false);
if (ret <= 0)
goto read_err;
@@ -1206,7 +1112,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
} else if (count >= 4 && !(*ppos % 4)) {
u32 val;
- ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
ppos, false);
if (ret <= 0)
goto read_err;
@@ -1218,7 +1124,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
} else if (count >= 2 && !(*ppos % 2)) {
u16 val;
- ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
ppos, false);
if (ret <= 0)
goto read_err;
@@ -1230,7 +1136,7 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
} else {
u8 val;
- ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
+ ret = intel_vgpu_rw(vgpu, &val, sizeof(val), ppos,
false);
if (ret <= 0)
goto read_err;
@@ -1253,10 +1159,11 @@ read_err:
return -EFAULT;
}
-static ssize_t intel_vgpu_write(struct mdev_device *mdev,
+static ssize_t intel_vgpu_write(struct vfio_device *vfio_dev,
const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned int done = 0;
int ret;
@@ -1265,13 +1172,13 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
/* Only support GGTT entry 8 bytes write */
if (count >= 8 && !(*ppos % 8) &&
- gtt_entry(mdev, ppos)) {
+ gtt_entry(vgpu, ppos)) {
u64 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
ppos, true);
if (ret <= 0)
goto write_err;
@@ -1283,7 +1190,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ret = intel_vgpu_rw(vgpu, (char *)&val, sizeof(val),
ppos, true);
if (ret <= 0)
goto write_err;
@@ -1295,7 +1202,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = intel_vgpu_rw(mdev, (char *)&val,
+ ret = intel_vgpu_rw(vgpu, (char *)&val,
sizeof(val), ppos, true);
if (ret <= 0)
goto write_err;
@@ -1307,7 +1214,7 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
- ret = intel_vgpu_rw(mdev, &val, sizeof(val),
+ ret = intel_vgpu_rw(vgpu, &val, sizeof(val),
ppos, true);
if (ret <= 0)
goto write_err;
@@ -1326,13 +1233,14 @@ write_err:
return -EFAULT;
}
-static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
+static int intel_vgpu_mmap(struct vfio_device *vfio_dev,
+ struct vm_area_struct *vma)
{
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned int index;
u64 virtaddr;
unsigned long req_size, pgoff, req_start;
pgprot_t pg_prot;
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
if (index >= VFIO_PCI_ROM_REGION_INDEX)
@@ -1407,7 +1315,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
gvt_vgpu_err("eventfd_ctx_fdget failed\n");
return PTR_ERR(trigger);
}
- kvmgt_vdev(vgpu)->msi_trigger = trigger;
+ vgpu->msi_trigger = trigger;
} else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
intel_vgpu_release_msi_eventfd_ctx(vgpu);
@@ -1455,11 +1363,10 @@ static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
return func(vgpu, index, start, count, flags, data);
}
-static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
+static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd,
unsigned long arg)
{
- struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
unsigned long minsz;
gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
@@ -1478,7 +1385,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
info.flags = VFIO_DEVICE_FLAGS_PCI;
info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = VFIO_PCI_NUM_REGIONS +
- vdev->num_regions;
+ vgpu->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -1569,22 +1476,22 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
.header.version = 1 };
if (info.index >= VFIO_PCI_NUM_REGIONS +
- vdev->num_regions)
+ vgpu->num_regions)
return -EINVAL;
info.index =
array_index_nospec(info.index,
VFIO_PCI_NUM_REGIONS +
- vdev->num_regions);
+ vgpu->num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS;
info.offset =
VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = vdev->region[i].size;
- info.flags = vdev->region[i].flags;
+ info.size = vgpu->region[i].size;
+ info.flags = vgpu->region[i].flags;
- cap_type.type = vdev->region[i].type;
- cap_type.subtype = vdev->region[i].subtype;
+ cap_type.type = vgpu->region[i].type;
+ cap_type.subtype = vgpu->region[i].subtype;
ret = vfio_info_add_capability(&caps,
&cap_type.header,
@@ -1700,7 +1607,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
return ret;
} else if (cmd == VFIO_DEVICE_RESET) {
- intel_gvt_ops->vgpu_reset(vgpu);
+ intel_gvt_reset_vgpu(vgpu);
return 0;
} else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
struct vfio_device_gfx_plane_info dmabuf;
@@ -1713,7 +1620,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (dmabuf.argsz < minsz)
return -EINVAL;
- ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
+ ret = intel_vgpu_query_plane(vgpu, &dmabuf);
if (ret != 0)
return ret;
@@ -1721,14 +1628,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
__u32 dmabuf_id;
- __s32 dmabuf_fd;
if (get_user(dmabuf_id, (__u32 __user *)arg))
return -EFAULT;
-
- dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
- return dmabuf_fd;
-
+ return intel_vgpu_get_dmabuf(vgpu, dmabuf_id);
}
return -ENOTTY;
@@ -1738,14 +1641,9 @@ static ssize_t
vgpu_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct mdev_device *mdev = mdev_from_dev(dev);
+ struct intel_vgpu *vgpu = dev_get_drvdata(dev);
- if (mdev) {
- struct intel_vgpu *vgpu = (struct intel_vgpu *)
- mdev_get_drvdata(mdev);
- return sprintf(buf, "%d\n", vgpu->id);
- }
- return sprintf(buf, "\n");
+ return sprintf(buf, "%d\n", vgpu->id);
}
static DEVICE_ATTR_RO(vgpu_id);
@@ -1765,57 +1663,78 @@ static const struct attribute_group *intel_vgpu_groups[] = {
NULL,
};
-static struct mdev_parent_ops intel_vgpu_ops = {
- .mdev_attr_groups = intel_vgpu_groups,
- .create = intel_vgpu_create,
- .remove = intel_vgpu_remove,
-
- .open_device = intel_vgpu_open_device,
- .close_device = intel_vgpu_close_device,
-
- .read = intel_vgpu_read,
- .write = intel_vgpu_write,
- .mmap = intel_vgpu_mmap,
- .ioctl = intel_vgpu_ioctl,
+static const struct vfio_device_ops intel_vgpu_dev_ops = {
+ .open_device = intel_vgpu_open_device,
+ .close_device = intel_vgpu_close_device,
+ .read = intel_vgpu_read,
+ .write = intel_vgpu_write,
+ .mmap = intel_vgpu_mmap,
+ .ioctl = intel_vgpu_ioctl,
};
-static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
+static int intel_vgpu_probe(struct mdev_device *mdev)
{
+ struct device *pdev = mdev_parent_dev(mdev);
+ struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt;
+ struct intel_vgpu_type *type;
+ struct intel_vgpu *vgpu;
int ret;
- ret = intel_gvt_init_vgpu_type_groups((struct intel_gvt *)gvt);
- if (ret)
- return ret;
+ type = &gvt->types[mdev_get_type_group_id(mdev)];
+ if (!type)
+ return -EINVAL;
- intel_gvt_ops = ops;
- intel_vgpu_ops.supported_type_groups = gvt_vgpu_type_groups;
+ vgpu = intel_gvt_create_vgpu(gvt, type);
+ if (IS_ERR(vgpu)) {
+ gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu));
+ return PTR_ERR(vgpu);
+ }
- ret = mdev_register_device(dev, &intel_vgpu_ops);
- if (ret)
- intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
+ INIT_WORK(&vgpu->release_work, intel_vgpu_release_work);
+ vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev,
+ &intel_vgpu_dev_ops);
- return ret;
+ dev_set_drvdata(&mdev->dev, vgpu);
+ ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device);
+ if (ret) {
+ intel_gvt_destroy_vgpu(vgpu);
+ return ret;
+ }
+
+ gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
+ dev_name(mdev_dev(mdev)));
+ return 0;
}
-static void kvmgt_host_exit(struct device *dev, void *gvt)
+static void intel_vgpu_remove(struct mdev_device *mdev)
{
- mdev_unregister_device(dev);
- intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
-}
+ struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev);
-static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
+ if (WARN_ON_ONCE(vgpu->attached))
+ return;
+ intel_gvt_destroy_vgpu(vgpu);
+}
+
+static struct mdev_driver intel_vgpu_mdev_driver = {
+ .driver = {
+ .name = "intel_vgpu_mdev",
+ .owner = THIS_MODULE,
+ .dev_groups = intel_vgpu_groups,
+ },
+ .probe = intel_vgpu_probe,
+ .remove = intel_vgpu_remove,
+ .supported_type_groups = gvt_vgpu_type_groups,
+};
+
+int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
{
- struct kvmgt_guest_info *info;
- struct kvm *kvm;
+ struct kvm *kvm = info->kvm;
struct kvm_memory_slot *slot;
int idx;
- if (!handle_valid(handle))
+ if (!info->attached)
return -ESRCH;
- info = (struct kvmgt_guest_info *)handle;
- kvm = info->kvm;
-
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
if (!slot) {
@@ -1837,19 +1756,15 @@ out:
return 0;
}
-static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
+int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
{
- struct kvmgt_guest_info *info;
- struct kvm *kvm;
+ struct kvm *kvm = info->kvm;
struct kvm_memory_slot *slot;
int idx;
- if (!handle_valid(handle))
+ if (!info->attached)
return 0;
- info = (struct kvmgt_guest_info *)handle;
- kvm = info->kvm;
-
idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn);
if (!slot) {
@@ -1875,11 +1790,11 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *val, int len,
struct kvm_page_track_notifier_node *node)
{
- struct kvmgt_guest_info *info = container_of(node,
- struct kvmgt_guest_info, track_node);
+ struct intel_vgpu *info =
+ container_of(node, struct intel_vgpu, track_node);
if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
- intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
+ intel_vgpu_page_track_handler(info, gpa,
(void *)val, len);
}
@@ -1889,8 +1804,8 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
{
int i;
gfn_t gfn;
- struct kvmgt_guest_info *info = container_of(node,
- struct kvmgt_guest_info, track_node);
+ struct intel_vgpu *info =
+ container_of(node, struct intel_vgpu, track_node);
write_lock(&kvm->mmu_lock);
for (i = 0; i < slot->npages; i++) {
@@ -1904,182 +1819,32 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
write_unlock(&kvm->mmu_lock);
}
-static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
-{
- struct intel_vgpu *itr;
- struct kvmgt_guest_info *info;
- int id;
- bool ret = false;
-
- mutex_lock(&vgpu->gvt->lock);
- for_each_active_vgpu(vgpu->gvt, itr, id) {
- if (!handle_valid(itr->handle))
- continue;
-
- info = (struct kvmgt_guest_info *)itr->handle;
- if (kvm && kvm == info->kvm) {
- ret = true;
- goto out;
- }
- }
-out:
- mutex_unlock(&vgpu->gvt->lock);
- return ret;
-}
-
-static int kvmgt_guest_init(struct mdev_device *mdev)
-{
- struct kvmgt_guest_info *info;
- struct intel_vgpu *vgpu;
- struct kvmgt_vdev *vdev;
- struct kvm *kvm;
-
- vgpu = mdev_get_drvdata(mdev);
- if (handle_valid(vgpu->handle))
- return -EEXIST;
-
- vdev = kvmgt_vdev(vgpu);
- kvm = vdev->kvm;
- if (!kvm || kvm->mm != current->mm) {
- gvt_vgpu_err("KVM is required to use Intel vGPU\n");
- return -ESRCH;
- }
-
- if (__kvmgt_vgpu_exist(vgpu, kvm))
- return -EEXIST;
-
- info = vzalloc(sizeof(struct kvmgt_guest_info));
- if (!info)
- return -ENOMEM;
-
- vgpu->handle = (unsigned long)info;
- info->vgpu = vgpu;
- info->kvm = kvm;
- kvm_get_kvm(info->kvm);
-
- kvmgt_protect_table_init(info);
- gvt_cache_init(vgpu);
-
- info->track_node.track_write = kvmgt_page_track_write;
- info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
- kvm_page_track_register_notifier(kvm, &info->track_node);
-
- debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
- &vdev->nr_cache_entries);
- return 0;
-}
-
-static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
-{
- debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME,
- info->vgpu->debugfs));
-
- kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
- kvm_put_kvm(info->kvm);
- kvmgt_protect_table_destroy(info);
- gvt_cache_destroy(info->vgpu);
- vfree(info);
-
- return true;
-}
-
-static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle)
-{
- struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
-
- vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL);
-
- if (!vgpu->vdev)
- return -ENOMEM;
-
- kvmgt_vdev(vgpu)->vgpu = vgpu;
-
- return 0;
-}
-
-static void kvmgt_detach_vgpu(void *p_vgpu)
+void intel_vgpu_detach_regions(struct intel_vgpu *vgpu)
{
int i;
- struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
- struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
- if (!vdev->region)
+ if (!vgpu->region)
return;
- for (i = 0; i < vdev->num_regions; i++)
- if (vdev->region[i].ops->release)
- vdev->region[i].ops->release(vgpu,
- &vdev->region[i]);
- vdev->num_regions = 0;
- kfree(vdev->region);
- vdev->region = NULL;
-
- kfree(vdev);
+ for (i = 0; i < vgpu->num_regions; i++)
+ if (vgpu->region[i].ops->release)
+ vgpu->region[i].ops->release(vgpu,
+ &vgpu->region[i]);
+ vgpu->num_regions = 0;
+ kfree(vgpu->region);
+ vgpu->region = NULL;
}
-static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
-{
- struct kvmgt_guest_info *info;
- struct intel_vgpu *vgpu;
- struct kvmgt_vdev *vdev;
-
- if (!handle_valid(handle))
- return -ESRCH;
-
- info = (struct kvmgt_guest_info *)handle;
- vgpu = info->vgpu;
- vdev = kvmgt_vdev(vgpu);
-
- /*
- * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
- * config and mmio register isn't restored to default during guest
- * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
- * may be enabled, then once this vgpu is active, it will get inject
- * vblank interrupt request. But msi_trigger is null until msi is
- * enabled by guest. so if msi_trigger is null, success is still
- * returned and don't inject interrupt into guest.
- */
- if (vdev->msi_trigger == NULL)
- return 0;
-
- if (eventfd_signal(vdev->msi_trigger, 1) == 1)
- return 0;
-
- return -EFAULT;
-}
-
-static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
-{
- struct kvmgt_guest_info *info;
- kvm_pfn_t pfn;
-
- if (!handle_valid(handle))
- return INTEL_GVT_INVALID_ADDR;
-
- info = (struct kvmgt_guest_info *)handle;
-
- pfn = gfn_to_pfn(info->kvm, gfn);
- if (is_error_noslot_pfn(pfn))
- return INTEL_GVT_INVALID_ADDR;
-
- return pfn;
-}
-
-static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
+int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr)
{
- struct intel_vgpu *vgpu;
- struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
int ret;
- if (!handle_valid(handle))
+ if (!vgpu->attached)
return -EINVAL;
- vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
- vdev = kvmgt_vdev(vgpu);
-
- mutex_lock(&vdev->cache_lock);
+ mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_gfn(vgpu, gfn);
if (!entry) {
@@ -2107,36 +1872,31 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
*dma_addr = entry->dma_addr;
}
- mutex_unlock(&vdev->cache_lock);
+ mutex_unlock(&vgpu->cache_lock);
return 0;
err_unmap:
gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
err_unlock:
- mutex_unlock(&vdev->cache_lock);
+ mutex_unlock(&vgpu->cache_lock);
return ret;
}
-static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
+int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr)
{
- struct kvmgt_guest_info *info;
- struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
int ret = 0;
- if (!handle_valid(handle))
+ if (!vgpu->attached)
return -ENODEV;
- info = (struct kvmgt_guest_info *)handle;
- vdev = kvmgt_vdev(info->vgpu);
-
- mutex_lock(&vdev->cache_lock);
- entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
+ mutex_lock(&vgpu->cache_lock);
+ entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
if (entry)
kref_get(&entry->ref);
else
ret = -ENOMEM;
- mutex_unlock(&vdev->cache_lock);
+ mutex_unlock(&vgpu->cache_lock);
return ret;
}
@@ -2150,109 +1910,290 @@ static void __gvt_dma_release(struct kref *ref)
__gvt_cache_remove_entry(entry->vgpu, entry);
}
-static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
+void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
+ dma_addr_t dma_addr)
{
- struct intel_vgpu *vgpu;
- struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
- if (!handle_valid(handle))
+ if (!vgpu->attached)
return;
- vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
- vdev = kvmgt_vdev(vgpu);
-
- mutex_lock(&vdev->cache_lock);
+ mutex_lock(&vgpu->cache_lock);
entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
if (entry)
kref_put(&entry->ref, __gvt_dma_release);
- mutex_unlock(&vdev->cache_lock);
+ mutex_unlock(&vgpu->cache_lock);
}
-static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
- void *buf, unsigned long len, bool write)
+static void init_device_info(struct intel_gvt *gvt)
{
- struct kvmgt_guest_info *info;
+ struct intel_gvt_device_info *info = &gvt->device_info;
+ struct pci_dev *pdev = to_pci_dev(gvt->gt->i915->drm.dev);
- if (!handle_valid(handle))
- return -ESRCH;
+ info->max_support_vgpus = 8;
+ info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
+ info->mmio_size = 2 * 1024 * 1024;
+ info->mmio_bar = 0;
+ info->gtt_start_offset = 8 * 1024 * 1024;
+ info->gtt_entry_size = 8;
+ info->gtt_entry_size_shift = 3;
+ info->gmadr_bytes_in_cmd = 8;
+ info->max_surface_size = 36 * 1024 * 1024;
+ info->msi_cap_offset = pdev->msi_cap;
+}
- info = (struct kvmgt_guest_info *)handle;
+static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
+{
+ struct intel_vgpu *vgpu;
+ int id;
- return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,
- gpa, buf, len, write);
+ mutex_lock(&gvt->lock);
+ idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
+ if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
+ (void *)&gvt->service_request)) {
+ if (vgpu->active)
+ intel_vgpu_emulate_vblank(vgpu);
+ }
+ }
+ mutex_unlock(&gvt->lock);
}
-static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
- void *buf, unsigned long len)
+static int gvt_service_thread(void *data)
{
- return kvmgt_rw_gpa(handle, gpa, buf, len, false);
+ struct intel_gvt *gvt = (struct intel_gvt *)data;
+ int ret;
+
+ gvt_dbg_core("service thread start\n");
+
+ while (!kthread_should_stop()) {
+ ret = wait_event_interruptible(gvt->service_thread_wq,
+ kthread_should_stop() || gvt->service_request);
+
+ if (kthread_should_stop())
+ break;
+
+ if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
+ continue;
+
+ intel_gvt_test_and_emulate_vblank(gvt);
+
+ if (test_bit(INTEL_GVT_REQUEST_SCHED,
+ (void *)&gvt->service_request) ||
+ test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
+ (void *)&gvt->service_request)) {
+ intel_gvt_schedule(gvt);
+ }
+ }
+
+ return 0;
}
-static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
- void *buf, unsigned long len)
+static void clean_service_thread(struct intel_gvt *gvt)
{
- return kvmgt_rw_gpa(handle, gpa, buf, len, true);
+ kthread_stop(gvt->service_thread);
}
-static unsigned long kvmgt_virt_to_pfn(void *addr)
+static int init_service_thread(struct intel_gvt *gvt)
{
- return PFN_DOWN(__pa(addr));
+ init_waitqueue_head(&gvt->service_thread_wq);
+
+ gvt->service_thread = kthread_run(gvt_service_thread,
+ gvt, "gvt_service_thread");
+ if (IS_ERR(gvt->service_thread)) {
+ gvt_err("fail to start service thread.\n");
+ return PTR_ERR(gvt->service_thread);
+ }
+ return 0;
}
-static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
+/**
+ * intel_gvt_clean_device - clean a GVT device
+ * @i915: i915 private
+ *
+ * This function is called at the driver unloading stage, to free the
+ * resources owned by a GVT device.
+ *
+ */
+static void intel_gvt_clean_device(struct drm_i915_private *i915)
{
- struct kvmgt_guest_info *info;
- struct kvm *kvm;
- int idx;
- bool ret;
+ struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
- if (!handle_valid(handle))
- return false;
+ if (drm_WARN_ON(&i915->drm, !gvt))
+ return;
- info = (struct kvmgt_guest_info *)handle;
- kvm = info->kvm;
+ mdev_unregister_device(i915->drm.dev);
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
+ intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
+ intel_gvt_clean_vgpu_types(gvt);
- idx = srcu_read_lock(&kvm->srcu);
- ret = kvm_is_visible_gfn(kvm, gfn);
- srcu_read_unlock(&kvm->srcu, idx);
+ intel_gvt_debugfs_clean(gvt);
+ clean_service_thread(gvt);
+ intel_gvt_clean_cmd_parser(gvt);
+ intel_gvt_clean_sched_policy(gvt);
+ intel_gvt_clean_workload_scheduler(gvt);
+ intel_gvt_clean_gtt(gvt);
+ intel_gvt_free_firmware(gvt);
+ intel_gvt_clean_mmio_info(gvt);
+ idr_destroy(&gvt->vgpu_idr);
+
+ kfree(i915->gvt);
+}
+
+/**
+ * intel_gvt_init_device - initialize a GVT device
+ * @i915: drm i915 private data
+ *
+ * This function is called at the initialization stage, to initialize
+ * necessary GVT components.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+static int intel_gvt_init_device(struct drm_i915_private *i915)
+{
+ struct intel_gvt *gvt;
+ struct intel_vgpu *vgpu;
+ int ret;
+
+ if (drm_WARN_ON(&i915->drm, i915->gvt))
+ return -EEXIST;
+
+ gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
+ if (!gvt)
+ return -ENOMEM;
+
+ gvt_dbg_core("init gvt device\n");
+
+ idr_init_base(&gvt->vgpu_idr, 1);
+ spin_lock_init(&gvt->scheduler.mmio_context_lock);
+ mutex_init(&gvt->lock);
+ mutex_init(&gvt->sched_lock);
+ gvt->gt = to_gt(i915);
+ i915->gvt = gvt;
+
+ init_device_info(gvt);
+
+ ret = intel_gvt_setup_mmio_info(gvt);
+ if (ret)
+ goto out_clean_idr;
+
+ intel_gvt_init_engine_mmio_context(gvt);
+
+ ret = intel_gvt_load_firmware(gvt);
+ if (ret)
+ goto out_clean_mmio_info;
+
+ ret = intel_gvt_init_irq(gvt);
+ if (ret)
+ goto out_free_firmware;
+
+ ret = intel_gvt_init_gtt(gvt);
+ if (ret)
+ goto out_free_firmware;
+ ret = intel_gvt_init_workload_scheduler(gvt);
+ if (ret)
+ goto out_clean_gtt;
+
+ ret = intel_gvt_init_sched_policy(gvt);
+ if (ret)
+ goto out_clean_workload_scheduler;
+
+ ret = intel_gvt_init_cmd_parser(gvt);
+ if (ret)
+ goto out_clean_sched_policy;
+
+ ret = init_service_thread(gvt);
+ if (ret)
+ goto out_clean_cmd_parser;
+
+ ret = intel_gvt_init_vgpu_types(gvt);
+ if (ret)
+ goto out_clean_thread;
+
+ vgpu = intel_gvt_create_idle_vgpu(gvt);
+ if (IS_ERR(vgpu)) {
+ ret = PTR_ERR(vgpu);
+ gvt_err("failed to create idle vgpu\n");
+ goto out_clean_types;
+ }
+ gvt->idle_vgpu = vgpu;
+
+ intel_gvt_debugfs_init(gvt);
+
+ ret = intel_gvt_init_vgpu_type_groups(gvt);
+ if (ret)
+ goto out_destroy_idle_vgpu;
+
+ ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_driver);
+ if (ret)
+ goto out_cleanup_vgpu_type_groups;
+
+ gvt_dbg_core("gvt device initialization is done\n");
+ return 0;
+
+out_cleanup_vgpu_type_groups:
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
+out_destroy_idle_vgpu:
+ intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
+ intel_gvt_debugfs_clean(gvt);
+out_clean_types:
+ intel_gvt_clean_vgpu_types(gvt);
+out_clean_thread:
+ clean_service_thread(gvt);
+out_clean_cmd_parser:
+ intel_gvt_clean_cmd_parser(gvt);
+out_clean_sched_policy:
+ intel_gvt_clean_sched_policy(gvt);
+out_clean_workload_scheduler:
+ intel_gvt_clean_workload_scheduler(gvt);
+out_clean_gtt:
+ intel_gvt_clean_gtt(gvt);
+out_free_firmware:
+ intel_gvt_free_firmware(gvt);
+out_clean_mmio_info:
+ intel_gvt_clean_mmio_info(gvt);
+out_clean_idr:
+ idr_destroy(&gvt->vgpu_idr);
+ kfree(gvt);
+ i915->gvt = NULL;
return ret;
}
-static const struct intel_gvt_mpt kvmgt_mpt = {
- .type = INTEL_GVT_HYPERVISOR_KVM,
- .host_init = kvmgt_host_init,
- .host_exit = kvmgt_host_exit,
- .attach_vgpu = kvmgt_attach_vgpu,
- .detach_vgpu = kvmgt_detach_vgpu,
- .inject_msi = kvmgt_inject_msi,
- .from_virt_to_mfn = kvmgt_virt_to_pfn,
- .enable_page_track = kvmgt_page_track_add,
- .disable_page_track = kvmgt_page_track_remove,
- .read_gpa = kvmgt_read_gpa,
- .write_gpa = kvmgt_write_gpa,
- .gfn_to_mfn = kvmgt_gfn_to_pfn,
- .dma_map_guest_page = kvmgt_dma_map_guest_page,
- .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
- .dma_pin_guest_page = kvmgt_dma_pin_guest_page,
- .set_opregion = kvmgt_set_opregion,
- .set_edid = kvmgt_set_edid,
- .get_vfio_device = kvmgt_get_vfio_device,
- .put_vfio_device = kvmgt_put_vfio_device,
- .is_valid_gfn = kvmgt_is_valid_gfn,
+static void intel_gvt_pm_resume(struct drm_i915_private *i915)
+{
+ struct intel_gvt *gvt = i915->gvt;
+
+ intel_gvt_restore_fence(gvt);
+ intel_gvt_restore_mmio(gvt);
+ intel_gvt_restore_ggtt(gvt);
+}
+
+static const struct intel_vgpu_ops intel_gvt_vgpu_ops = {
+ .init_device = intel_gvt_init_device,
+ .clean_device = intel_gvt_clean_device,
+ .pm_resume = intel_gvt_pm_resume,
};
static int __init kvmgt_init(void)
{
- if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0)
- return -ENODEV;
- return 0;
+ int ret;
+
+ ret = intel_gvt_set_ops(&intel_gvt_vgpu_ops);
+ if (ret)
+ return ret;
+
+ ret = mdev_register_driver(&intel_vgpu_mdev_driver);
+ if (ret)
+ intel_gvt_clear_ops(&intel_gvt_vgpu_ops);
+ return ret;
}
static void __exit kvmgt_exit(void)
{
- intel_gvt_unregister_hypervisor();
+ mdev_unregister_driver(&intel_vgpu_mdev_driver);
+ intel_gvt_clear_ops(&intel_gvt_vgpu_ops);
}
module_init(kvmgt_init);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 5db0ef83d522..9acc00505fde 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -139,7 +139,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
}
if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
- ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
+ ret = intel_gvt_read_gpa(vgpu, pa, p_data, bytes);
goto out;
}
@@ -215,7 +215,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
}
if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
- ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
+ ret = intel_gvt_write_gpa(vgpu, pa, p_data, bytes);
goto out;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 7c26af39fbfc..bba154e38705 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -72,7 +72,6 @@ struct intel_gvt_mmio_info {
const struct intel_engine_cs *
intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg);
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
-bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt);
void intel_gvt_clean_mmio_info(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
deleted file mode 100644
index e6c5a792a49a..000000000000
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors:
- * Eddie Dong <eddie.dong@intel.com>
- * Dexuan Cui
- * Jike Song <jike.song@intel.com>
- *
- * Contributors:
- * Zhi Wang <zhi.a.wang@intel.com>
- *
- */
-
-#ifndef _GVT_MPT_H_
-#define _GVT_MPT_H_
-
-#include "gvt.h"
-
-/**
- * DOC: Hypervisor Service APIs for GVT-g Core Logic
- *
- * This is the glue layer between specific hypervisor MPT modules and GVT-g core
- * logic. Each kind of hypervisor MPT module provides a collection of function
- * callbacks and will be attached to GVT host when the driver is loading.
- * GVT-g core logic will call these APIs to request specific services from
- * hypervisor.
- */
-
-/**
- * intel_gvt_hypervisor_host_init - init GVT-g host side
- *
- * Returns:
- * Zero on success, negative error code if failed
- */
-static inline int intel_gvt_hypervisor_host_init(struct device *dev,
- void *gvt, const void *ops)
-{
- if (!intel_gvt_host.mpt->host_init)
- return -ENODEV;
-
- return intel_gvt_host.mpt->host_init(dev, gvt, ops);
-}
-
-/**
- * intel_gvt_hypervisor_host_exit - exit GVT-g host side
- */
-static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
-{
- /* optional to provide */
- if (!intel_gvt_host.mpt->host_exit)
- return;
-
- intel_gvt_host.mpt->host_exit(dev, gvt);
-}
-
-/**
- * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
- * related stuffs inside hypervisor.
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
-{
- /* optional to provide */
- if (!intel_gvt_host.mpt->attach_vgpu)
- return 0;
-
- return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
-}
-
-/**
- * intel_gvt_hypervisor_detach_vgpu - call hypervisor to release vGPU
- * related stuffs inside hypervisor.
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
-{
- /* optional to provide */
- if (!intel_gvt_host.mpt->detach_vgpu)
- return;
-
- intel_gvt_host.mpt->detach_vgpu(vgpu);
-}
-
-#define MSI_CAP_CONTROL(offset) (offset + 2)
-#define MSI_CAP_ADDRESS(offset) (offset + 4)
-#define MSI_CAP_DATA(offset) (offset + 8)
-#define MSI_CAP_EN 0x1
-
-/**
- * intel_gvt_hypervisor_inject_msi - inject a MSI interrupt into vGPU
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_inject_msi(struct intel_vgpu *vgpu)
-{
- unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
- u16 control, data;
- u32 addr;
- int ret;
-
- control = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_CONTROL(offset));
- addr = *(u32 *)(vgpu_cfg_space(vgpu) + MSI_CAP_ADDRESS(offset));
- data = *(u16 *)(vgpu_cfg_space(vgpu) + MSI_CAP_DATA(offset));
-
- /* Do not generate MSI if MSIEN is disable */
- if (!(control & MSI_CAP_EN))
- return 0;
-
- if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
- return -EINVAL;
-
- trace_inject_msi(vgpu->id, addr, data);
-
- ret = intel_gvt_host.mpt->inject_msi(vgpu->handle, addr, data);
- if (ret)
- return ret;
- return 0;
-}
-
-/**
- * intel_gvt_hypervisor_set_wp_page - translate a host VA into MFN
- * @p: host kernel virtual address
- *
- * Returns:
- * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
- */
-static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
-{
- return intel_gvt_host.mpt->from_virt_to_mfn(p);
-}
-
-/**
- * intel_gvt_hypervisor_enable_page_track - track a guest page
- * @vgpu: a vGPU
- * @gfn: the gfn of guest
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_enable_page_track(
- struct intel_vgpu *vgpu, unsigned long gfn)
-{
- return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
-}
-
-/**
- * intel_gvt_hypervisor_disable_page_track - untrack a guest page
- * @vgpu: a vGPU
- * @gfn: the gfn of guest
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_disable_page_track(
- struct intel_vgpu *vgpu, unsigned long gfn)
-{
- return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
-}
-
-/**
- * intel_gvt_hypervisor_read_gpa - copy data from GPA to host data buffer
- * @vgpu: a vGPU
- * @gpa: guest physical address
- * @buf: host data buffer
- * @len: data length
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_read_gpa(struct intel_vgpu *vgpu,
- unsigned long gpa, void *buf, unsigned long len)
-{
- return intel_gvt_host.mpt->read_gpa(vgpu->handle, gpa, buf, len);
-}
-
-/**
- * intel_gvt_hypervisor_write_gpa - copy data from host data buffer to GPA
- * @vgpu: a vGPU
- * @gpa: guest physical address
- * @buf: host data buffer
- * @len: data length
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_write_gpa(struct intel_vgpu *vgpu,
- unsigned long gpa, void *buf, unsigned long len)
-{
- return intel_gvt_host.mpt->write_gpa(vgpu->handle, gpa, buf, len);
-}
-
-/**
- * intel_gvt_hypervisor_gfn_to_mfn - translate a GFN to MFN
- * @vgpu: a vGPU
- * @gpfn: guest pfn
- *
- * Returns:
- * MFN on success, INTEL_GVT_INVALID_ADDR if failed.
- */
-static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
- struct intel_vgpu *vgpu, unsigned long gfn)
-{
- return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
-}
-
-/**
- * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
- * @vgpu: a vGPU
- * @gfn: guest pfn
- * @size: page size
- * @dma_addr: retrieve allocated dma addr
- *
- * Returns:
- * 0 on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_dma_map_guest_page(
- struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
- dma_addr_t *dma_addr)
-{
- return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
- dma_addr);
-}
-
-/**
- * intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
- * @vgpu: a vGPU
- * @dma_addr: the mapped dma addr
- */
-static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
- struct intel_vgpu *vgpu, dma_addr_t dma_addr)
-{
- intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
-}
-
-/**
- * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
- * @vgpu: a vGPU
- * @dma_addr: guest dma addr
- *
- * Returns:
- * 0 on success, negative error code if failed.
- */
-static inline int
-intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu,
- dma_addr_t dma_addr)
-{
- return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr);
-}
-
-/**
- * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
- * @vgpu: a vGPU
- * @gfn: guest PFN
- * @mfn: host PFN
- * @nr: amount of PFNs
- * @map: map or unmap
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
- struct intel_vgpu *vgpu, unsigned long gfn,
- unsigned long mfn, unsigned int nr,
- bool map)
-{
- /* a MPT implementation could have MMIO mapped elsewhere */
- if (!intel_gvt_host.mpt->map_gfn_to_mfn)
- return 0;
-
- return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
- map);
-}
-
-/**
- * intel_gvt_hypervisor_set_trap_area - Trap a guest PA region
- * @vgpu: a vGPU
- * @start: the beginning of the guest physical address region
- * @end: the end of the guest physical address region
- * @map: map or unmap
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_set_trap_area(
- struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
-{
- /* a MPT implementation could have MMIO trapped elsewhere */
- if (!intel_gvt_host.mpt->set_trap_area)
- return 0;
-
- return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
-}
-
-/**
- * intel_gvt_hypervisor_set_opregion - Set opregion for guest
- * @vgpu: a vGPU
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
-{
- if (!intel_gvt_host.mpt->set_opregion)
- return 0;
-
- return intel_gvt_host.mpt->set_opregion(vgpu);
-}
-
-/**
- * intel_gvt_hypervisor_set_edid - Set EDID region for guest
- * @vgpu: a vGPU
- * @port_num: display port number
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_set_edid(struct intel_vgpu *vgpu,
- int port_num)
-{
- if (!intel_gvt_host.mpt->set_edid)
- return 0;
-
- return intel_gvt_host.mpt->set_edid(vgpu, port_num);
-}
-
-/**
- * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
- * @vgpu: a vGPU
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
-{
- if (!intel_gvt_host.mpt->get_vfio_device)
- return 0;
-
- return intel_gvt_host.mpt->get_vfio_device(vgpu);
-}
-
-/**
- * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
- * @vgpu: a vGPU
- *
- * Returns:
- * Zero on success, negative error code if failed.
- */
-static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
-{
- if (!intel_gvt_host.mpt->put_vfio_device)
- return;
-
- intel_gvt_host.mpt->put_vfio_device(vgpu);
-}
-
-/**
- * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
- * @vgpu: a vGPU
- * @gfn: guest PFN
- *
- * Returns:
- * true on valid gfn, false on not.
- */
-static inline bool intel_gvt_hypervisor_is_valid_gfn(
- struct intel_vgpu *vgpu, unsigned long gfn)
-{
- if (!intel_gvt_host.mpt->is_valid_gfn)
- return true;
-
- return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
-}
-
-int intel_gvt_register_hypervisor(const struct intel_gvt_mpt *);
-void intel_gvt_unregister_hypervisor(void);
-
-#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 33569b910ed5..d2bed466540a 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -255,33 +255,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
return 0;
}
-static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
-{
- u64 mfn;
- int i, ret;
-
- for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
- mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
- + i * PAGE_SIZE);
- if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("fail to get MFN from VA\n");
- return -EINVAL;
- }
- ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
- vgpu_opregion(vgpu)->gfn[i],
- mfn, 1, map);
- if (ret) {
- gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n",
- ret);
- return ret;
- }
- }
-
- vgpu_opregion(vgpu)->mapped = map;
-
- return 0;
-}
-
/**
* intel_vgpu_opregion_base_write_handler - Opregion base register write handler
*
@@ -294,34 +267,13 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
{
- int i, ret = 0;
+ int i;
gvt_dbg_core("emulate opregion from kernel\n");
- switch (intel_gvt_host.hypervisor_type) {
- case INTEL_GVT_HYPERVISOR_KVM:
- for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
- vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
- break;
- case INTEL_GVT_HYPERVISOR_XEN:
- /**
- * Wins guest on Xengt will write this register twice: xen
- * hvmloader and windows graphic driver.
- */
- if (vgpu_opregion(vgpu)->mapped)
- map_vgpu_opregion(vgpu, false);
-
- for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
- vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
-
- ret = map_vgpu_opregion(vgpu, true);
- break;
- default:
- ret = -EINVAL;
- gvt_vgpu_err("not supported hypervisor\n");
- }
-
- return ret;
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
+ vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
+ return 0;
}
/**
@@ -336,12 +288,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
if (!vgpu_opregion(vgpu)->va)
return;
- if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
- if (vgpu_opregion(vgpu)->mapped)
- map_vgpu_opregion(vgpu, false);
- } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
- /* Guest opregion is released by VFIO */
- }
+ /* Guest opregion is released by VFIO */
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
get_order(INTEL_GVT_OPREGION_SIZE));
@@ -470,39 +417,22 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
u64 scic_pa = 0, parm_pa = 0;
int ret;
- switch (intel_gvt_host.hypervisor_type) {
- case INTEL_GVT_HYPERVISOR_XEN:
- scic = *((u32 *)vgpu_opregion(vgpu)->va +
- INTEL_GVT_OPREGION_SCIC);
- parm = *((u32 *)vgpu_opregion(vgpu)->va +
- INTEL_GVT_OPREGION_PARM);
- break;
- case INTEL_GVT_HYPERVISOR_KVM:
- scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
- INTEL_GVT_OPREGION_SCIC;
- parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
- INTEL_GVT_OPREGION_PARM;
-
- ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
- &scic, sizeof(scic));
- if (ret) {
- gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
- ret, scic_pa, sizeof(scic));
- return ret;
- }
-
- ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
- &parm, sizeof(parm));
- if (ret) {
- gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
- ret, scic_pa, sizeof(scic));
- return ret;
- }
+ scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
+ INTEL_GVT_OPREGION_SCIC;
+ parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
+ INTEL_GVT_OPREGION_PARM;
+ ret = intel_gvt_read_gpa(vgpu, scic_pa, &scic, sizeof(scic));
+ if (ret) {
+ gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
+ }
- break;
- default:
- gvt_vgpu_err("not supported hypervisor\n");
- return -EINVAL;
+ ret = intel_gvt_read_gpa(vgpu, parm_pa, &parm, sizeof(parm));
+ if (ret) {
+ gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
}
if (!(swsci & SWSCI_SCI_SELECT)) {
@@ -535,34 +465,18 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
parm = 0;
out:
- switch (intel_gvt_host.hypervisor_type) {
- case INTEL_GVT_HYPERVISOR_XEN:
- *((u32 *)vgpu_opregion(vgpu)->va +
- INTEL_GVT_OPREGION_SCIC) = scic;
- *((u32 *)vgpu_opregion(vgpu)->va +
- INTEL_GVT_OPREGION_PARM) = parm;
- break;
- case INTEL_GVT_HYPERVISOR_KVM:
- ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
- &scic, sizeof(scic));
- if (ret) {
- gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
- ret, scic_pa, sizeof(scic));
- return ret;
- }
-
- ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
- &parm, sizeof(parm));
- if (ret) {
- gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
- ret, scic_pa, sizeof(scic));
- return ret;
- }
+ ret = intel_gvt_write_gpa(vgpu, scic_pa, &scic, sizeof(scic));
+ if (ret) {
+ gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
+ }
- break;
- default:
- gvt_vgpu_err("not supported hypervisor\n");
- return -EINVAL;
+ ret = intel_gvt_write_gpa(vgpu, parm_pa, &parm, sizeof(parm));
+ if (ret) {
+ gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
}
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c
index 84856022528e..3375b51c75f1 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.c
+++ b/drivers/gpu/drm/i915/gvt/page_track.c
@@ -87,7 +87,7 @@ void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
track = radix_tree_delete(&vgpu->page_track_tree, gfn);
if (track) {
if (track->tracked)
- intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
+ intel_gvt_page_track_remove(vgpu, gfn);
kfree(track);
}
}
@@ -112,7 +112,7 @@ int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
if (track->tracked)
return 0;
- ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn);
+ ret = intel_gvt_page_track_add(vgpu, gfn);
if (ret)
return ret;
track->tracked = true;
@@ -139,7 +139,7 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
if (!track->tracked)
return 0;
- ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
+ ret = intel_gvt_page_track_remove(vgpu, gfn);
if (ret)
return ret;
track->tracked = false;
@@ -172,7 +172,7 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
if (unlikely(vgpu->failsafe)) {
/* Remove write protection to prevent furture traps. */
- intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT);
+ intel_gvt_page_track_remove(vgpu, gpa >> PAGE_SHIFT);
} else {
ret = page_track->handler(page_track, gpa, data, bytes);
if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 7d666d34f9ff..d8216c63c39a 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -132,6 +132,13 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4)
-
#define BCS_TILE_REGISTER_VAL_OFFSET (0x43*4)
+
+/* XXX FIXME i915 has changed PP_XXX definition */
+#define PCH_PP_STATUS _MMIO(0xc7200)
+#define PCH_PP_CONTROL _MMIO(0xc7204)
+#define PCH_PP_ON_DELAYS _MMIO(0xc7208)
+#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
+#define PCH_PP_DIVISOR _MMIO(0xc7210)
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 679476da0640..d6fe94cd0fdb 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -150,10 +150,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
#define COPY_REG(name) \
- intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
#define COPY_REG_MASKED(name) {\
- intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
+ RING_CTX_OFF(name.val),\
&shadow_ring_context->name.val, 4);\
shadow_ring_context->name.val |= 0xffff << 16;\
@@ -167,7 +167,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
} else if (workload->engine->id == BCS0)
- intel_gvt_hypervisor_read_gpa(vgpu,
+ intel_gvt_read_gpa(vgpu,
workload->ring_context_gpa +
BCS_TILE_REGISTER_VAL_OFFSET,
(void *)shadow_ring_context +
@@ -178,7 +178,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
/* don't copy Ring Context (the first 0x50 dwords),
* only copy the Engine Context part from guest
*/
- intel_gvt_hypervisor_read_gpa(vgpu,
+ intel_gvt_read_gpa(vgpu,
workload->ring_context_gpa +
RING_CTX_SIZE,
(void *)shadow_ring_context +
@@ -245,7 +245,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
continue;
read:
- intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size);
+ intel_gvt_read_gpa(vgpu, gpa_base, dst, gpa_size);
gpa_base = context_gpa;
gpa_size = I915_GTT_PAGE_SIZE;
dst = context_base + (i << I915_GTT_PAGE_SHIFT);
@@ -911,8 +911,7 @@ static void update_guest_pdps(struct intel_vgpu *vgpu,
gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
for (i = 0; i < 8; i++)
- intel_gvt_hypervisor_write_gpa(vgpu,
- gpa + i * 8, &pdp[7 - i], 4);
+ intel_gvt_write_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4);
}
static __maybe_unused bool
@@ -1007,13 +1006,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
continue;
write:
- intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size);
+ intel_gvt_write_gpa(vgpu, gpa_base, src, gpa_size);
gpa_base = context_gpa;
gpa_size = I915_GTT_PAGE_SIZE;
src = context_base + (i << I915_GTT_PAGE_SHIFT);
}
- intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
+ intel_gvt_write_gpa(vgpu, workload->ring_context_gpa +
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
shadow_ring_context = (void *) ctx->lrc_reg_state;
@@ -1028,7 +1027,7 @@ write:
}
#define COPY_REG(name) \
- intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
+ intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \
RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
COPY_REG(ctx_ctrl);
@@ -1036,7 +1035,7 @@ write:
#undef COPY_REG
- intel_gvt_hypervisor_write_gpa(vgpu,
+ intel_gvt_write_gpa(vgpu,
workload->ring_context_gpa +
sizeof(*shadow_ring_context),
(void *)shadow_ring_context +
@@ -1573,7 +1572,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu,
gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
for (i = 0; i < 8; i++)
- intel_gvt_hypervisor_read_gpa(vgpu,
+ intel_gvt_read_gpa(vgpu,
gpa + i * 8, &pdp[7 - i], 4);
}
@@ -1644,10 +1643,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
return ERR_PTR(-EINVAL);
}
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_header.val), &head, 4);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_tail.val), &tail, 4);
guest_head = head;
@@ -1674,11 +1673,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
gvt_dbg_el("ring %s begin a new workload\n", engine->name);
/* record some ring buffer register values for scan and shadow */
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_start.val), &start, 4);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
if (!intel_gvt_ggtt_validate_range(vgpu, start,
@@ -1701,9 +1700,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
workload->rb_ctl = ctl;
if (engine->id == RCS0) {
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
- intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
+ intel_gvt_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
workload->wa_ctx.indirect_ctx.guest_gma =
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 6d787750d279..020f1aa28322 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -377,7 +377,7 @@ TRACE_EVENT(render_mmio,
/* This part must be out of protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915/gvt
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 8dddd0a940a1..46da19b3225d 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -293,7 +293,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_opregion(vgpu);
intel_vgpu_reset_ggtt(vgpu, true);
intel_vgpu_clean_gtt(vgpu);
- intel_gvt_hypervisor_detach_vgpu(vgpu);
+ intel_vgpu_detach_regions(vgpu);
intel_vgpu_free_resource(vgpu);
intel_vgpu_clean_mmio(vgpu);
intel_vgpu_dmabuf_cleanup(vgpu);
@@ -370,8 +370,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu *vgpu;
int ret;
- gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n",
- param->handle, param->low_gm_sz, param->high_gm_sz,
+ gvt_dbg_core("low %llu MB high %llu MB fence %llu\n",
+ param->low_gm_sz, param->high_gm_sz,
param->fence_sz);
vgpu = vzalloc(sizeof(*vgpu));
@@ -384,7 +384,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
goto out_free_vgpu;
vgpu->id = ret;
- vgpu->handle = param->handle;
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
mutex_init(&vgpu->vgpu_lock);
@@ -405,13 +404,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
populate_pvinfo_page(vgpu);
- ret = intel_gvt_hypervisor_attach_vgpu(vgpu);
- if (ret)
- goto out_clean_vgpu_resource;
-
ret = intel_vgpu_init_gtt(vgpu);
if (ret)
- goto out_detach_hypervisor_vgpu;
+ goto out_clean_vgpu_resource;
ret = intel_vgpu_init_opregion(vgpu);
if (ret)
@@ -431,14 +426,14 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
intel_gvt_debugfs_add_vgpu(vgpu);
- ret = intel_gvt_hypervisor_set_opregion(vgpu);
+ ret = intel_gvt_set_opregion(vgpu);
if (ret)
goto out_clean_sched_policy;
if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
- ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
+ ret = intel_gvt_set_edid(vgpu, PORT_B);
else
- ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
+ ret = intel_gvt_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
@@ -454,8 +449,6 @@ out_clean_opregion:
intel_vgpu_clean_opregion(vgpu);
out_clean_gtt:
intel_vgpu_clean_gtt(vgpu);
-out_detach_hypervisor_vgpu:
- intel_gvt_hypervisor_detach_vgpu(vgpu);
out_clean_vgpu_resource:
intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio:
@@ -483,7 +476,6 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_creation_params param;
struct intel_vgpu *vgpu;
- param.handle = 0;
param.primary = 1;
param.low_gm_sz = type->low_gm_size;
param.high_gm_sz = type->high_gm_size;
diff --git a/drivers/gpu/drm/i915/i915_deps.c b/drivers/gpu/drm/i915/i915_deps.c
index 999210b37325..297b8e4e42ee 100644
--- a/drivers/gpu/drm/i915/i915_deps.c
+++ b/drivers/gpu/drm/i915/i915_deps.c
@@ -226,7 +226,7 @@ int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv,
struct dma_fence *fence;
dma_resv_assert_held(resv);
- dma_resv_for_each_fence(&iter, resv, true, fence) {
+ dma_resv_for_each_fence(&iter, resv, dma_resv_usage_rw(true), fence) {
int ret = i915_deps_add_dependency(deps, fence, ctx);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 3ffb617d75c9..b47746152d97 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -465,11 +465,6 @@ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
pci_dev_put(dev_priv->bridge_dev);
}
-static void intel_sanitize_options(struct drm_i915_private *dev_priv)
-{
- intel_gvt_sanitize_options(dev_priv);
-}
-
/**
* i915_set_dma_info - set all relevant PCI dma info as configured for the
* platform
@@ -563,8 +558,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
}
}
- intel_sanitize_options(dev_priv);
-
/* needs to be done before ggtt probe */
intel_dram_edram_detect(dev_priv);
@@ -636,7 +629,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
intel_opregion_setup(dev_priv);
- ret = intel_pcode_init(dev_priv);
+ ret = intel_pcode_init(&dev_priv->uncore);
if (ret)
goto err_msi;
@@ -1258,7 +1251,7 @@ static int i915_drm_resume(struct drm_device *dev)
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- ret = intel_pcode_init(dev_priv);
+ ret = intel_pcode_init(&dev_priv->uncore);
if (ret)
return ret;
@@ -1556,7 +1549,7 @@ static int intel_runtime_suspend(struct device *kdev)
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
- drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
+ drm_dbg(&dev_priv->drm, "Suspending device\n");
disable_rpm_wakeref_asserts(rpm);
@@ -1632,7 +1625,7 @@ static int intel_runtime_suspend(struct device *kdev)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_poll_enable(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
+ drm_dbg(&dev_priv->drm, "Device suspended\n");
return 0;
}
@@ -1646,7 +1639,7 @@ static int intel_runtime_resume(struct device *kdev)
if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
- drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
+ drm_dbg(&dev_priv->drm, "Resuming device\n");
drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
disable_rpm_wakeref_asserts(rpm);
@@ -1690,7 +1683,7 @@ static int intel_runtime_resume(struct device *kdev)
drm_err(&dev_priv->drm,
"Runtime resume failed, disabling it (%d)\n", ret);
else
- drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
+ drm_dbg(&dev_priv->drm, "Device resumed\n");
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 72394a11baf9..c5fc402d9c50 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -327,6 +327,7 @@ struct intel_vbt_data {
bool override_afc_startup;
u8 override_afc_startup_val;
+ u8 seamless_drrs_min_refresh_rate;
enum drrs_type drrs_type;
struct {
@@ -401,6 +402,9 @@ struct i915_virtual_gpu {
struct mutex lock; /* serialises sending of g2v_notify command pkts */
bool active;
u32 caps;
+ u32 *initial_mmio;
+ u8 *initial_cfg_space;
+ struct list_head entry;
};
struct i915_selftest_stash {
@@ -1068,9 +1072,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_DG2_G12(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G12)
#define IS_ADLS_RPLS(dev_priv) \
- IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S)
+ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL)
#define IS_ADLP_N(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N)
+#define IS_ADLP_RPLP(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -1224,6 +1230,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define CCS_MASK(gt) \
ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS)
+#define HAS_MEDIA_RATIO_MODE(dev_priv) (INTEL_INFO(dev_priv)->has_media_ratio_mode)
+
/*
* The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
* All later gens can run the final buffer from the ppgtt
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index d0752e5553db..a2be323a4be5 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -54,9 +54,6 @@ struct drm_i915_private;
} while(0)
#define GEM_WARN_ON(expr) WARN_ON(expr)
-#define GEM_DEBUG_DECL(var) var
-#define GEM_DEBUG_EXEC(expr) expr
-#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
#define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr)
#else
@@ -66,9 +63,6 @@ struct drm_i915_private;
#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); })
-#define GEM_DEBUG_DECL(var)
-#define GEM_DEBUG_EXEC(expr) do { } while (0)
-#define GEM_DEBUG_BUG_ON(expr)
#define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; })
#endif
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 664a218030be..b9f474d3bec4 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -1033,6 +1033,7 @@ static const struct intel_device_info xehpsdv_info = {
.display = { },
.has_64k_pages = 1,
.needs_compact_pt = 1,
+ .has_media_ratio_mode = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) |
BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
@@ -1054,13 +1055,13 @@ static const struct intel_device_info xehpsdv_info = {
.has_guc_deprivilege = 1, \
.has_heci_pxp = 1, \
.needs_compact_pt = 1, \
+ .has_media_ratio_mode = 1, \
.platform_engine_mask = \
BIT(RCS0) | BIT(BCS0) | \
BIT(VECS0) | BIT(VECS1) | \
BIT(VCS0) | BIT(VCS2) | \
BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3)
-__maybe_unused
static const struct intel_device_info dg2_info = {
DG2_FEATURES,
XE_LPD_FEATURES,
@@ -1177,6 +1178,8 @@ static const struct pci_device_id pciidlist[] = {
INTEL_ADLN_IDS(&adl_p_info),
INTEL_DG1_IDS(&dg1_info),
INTEL_RPLS_IDS(&adl_s_info),
+ INTEL_RPLP_IDS(&adl_p_info),
+ INTEL_DG2_IDS(&dg2_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 117f9d909e95..d1806dcee668 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1412,6 +1412,7 @@
#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */
#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */
#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */
+#define DPFC_CHICKEN_FORCE_SLB_INVALIDATION REG_BIT(13) /* icl+ */
#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */
#define GLK_FBC_STRIDE(fbc_id) _MMIO_PIPE((fbc_id), 0x43228, 0x43268)
@@ -4376,12 +4377,12 @@
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
-#define DISP_POS_Y_MASK REG_GENMASK(31, 0)
+#define DISP_POS_Y_MASK REG_GENMASK(31, 16)
#define DISP_POS_Y(y) REG_FIELD_PREP(DISP_POS_Y_MASK, (y))
#define DISP_POS_X_MASK REG_GENMASK(15, 0)
#define DISP_POS_X(x) REG_FIELD_PREP(DISP_POS_X_MASK, (x))
#define _DSPASIZE 0x70190
-#define DISP_HEIGHT_MASK REG_GENMASK(31, 0)
+#define DISP_HEIGHT_MASK REG_GENMASK(31, 16)
#define DISP_HEIGHT(h) REG_FIELD_PREP(DISP_HEIGHT_MASK, (h))
#define DISP_WIDTH_MASK REG_GENMASK(15, 0)
#define DISP_WIDTH(w) REG_FIELD_PREP(DISP_WIDTH_MASK, (w))
@@ -5184,7 +5185,7 @@
#define _SEL_FETCH_PLANE_BASE_6_A 0x70940
#define _SEL_FETCH_PLANE_BASE_7_A 0x70960
#define _SEL_FETCH_PLANE_BASE_CUR_A 0x70880
-#define _SEL_FETCH_PLANE_BASE_1_B 0x70990
+#define _SEL_FETCH_PLANE_BASE_1_B 0x71890
#define _SEL_FETCH_PLANE_BASE_A(plane) _PICK(plane, \
_SEL_FETCH_PLANE_BASE_1_A, \
@@ -6697,6 +6698,9 @@
#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
#define GEN6_PCODE_READY (1 << 31)
+#define GEN6_PCODE_MB_PARAM2 REG_GENMASK(23, 16)
+#define GEN6_PCODE_MB_PARAM1 REG_GENMASK(15, 8)
+#define GEN6_PCODE_MB_COMMAND REG_GENMASK(7, 0)
#define GEN6_PCODE_ERROR_MASK 0xFF
#define GEN6_PCODE_SUCCESS 0x0
#define GEN6_PCODE_ILLEGAL_CMD 0x1
@@ -7569,25 +7573,25 @@ enum skl_power_gate {
#define _PORT_CLK_SEL_A 0x46100
#define _PORT_CLK_SEL_B 0x46104
#define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B)
-#define PORT_CLK_SEL_LCPLL_2700 (0 << 29)
-#define PORT_CLK_SEL_LCPLL_1350 (1 << 29)
-#define PORT_CLK_SEL_LCPLL_810 (2 << 29)
-#define PORT_CLK_SEL_SPLL (3 << 29)
-#define PORT_CLK_SEL_WRPLL(pll) (((pll) + 4) << 29)
-#define PORT_CLK_SEL_WRPLL1 (4 << 29)
-#define PORT_CLK_SEL_WRPLL2 (5 << 29)
-#define PORT_CLK_SEL_NONE (7 << 29)
-#define PORT_CLK_SEL_MASK (7 << 29)
+#define PORT_CLK_SEL_MASK REG_GENMASK(31, 29)
+#define PORT_CLK_SEL_LCPLL_2700 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 0)
+#define PORT_CLK_SEL_LCPLL_1350 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 1)
+#define PORT_CLK_SEL_LCPLL_810 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 2)
+#define PORT_CLK_SEL_SPLL REG_FIELD_PREP(PORT_CLK_SEL_MASK, 3)
+#define PORT_CLK_SEL_WRPLL(pll) REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4 + (pll))
+#define PORT_CLK_SEL_WRPLL1 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 4)
+#define PORT_CLK_SEL_WRPLL2 REG_FIELD_PREP(PORT_CLK_SEL_MASK, 5)
+#define PORT_CLK_SEL_NONE REG_FIELD_PREP(PORT_CLK_SEL_MASK, 7)
/* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */
#define DDI_CLK_SEL(port) PORT_CLK_SEL(port)
-#define DDI_CLK_SEL_NONE (0x0 << 28)
-#define DDI_CLK_SEL_MG (0x8 << 28)
-#define DDI_CLK_SEL_TBT_162 (0xC << 28)
-#define DDI_CLK_SEL_TBT_270 (0xD << 28)
-#define DDI_CLK_SEL_TBT_540 (0xE << 28)
-#define DDI_CLK_SEL_TBT_810 (0xF << 28)
-#define DDI_CLK_SEL_MASK (0xF << 28)
+#define DDI_CLK_SEL_MASK REG_GENMASK(31, 28)
+#define DDI_CLK_SEL_NONE REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x0)
+#define DDI_CLK_SEL_MG REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0x8)
+#define DDI_CLK_SEL_TBT_162 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xC)
+#define DDI_CLK_SEL_TBT_270 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xD)
+#define DDI_CLK_SEL_TBT_540 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xE)
+#define DDI_CLK_SEL_TBT_810 REG_FIELD_PREP(DDI_CLK_SEL_MASK, 0xF)
/* Transcoder clock selection */
#define _TRANS_CLK_SEL_A 0x46140
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 582770360ad1..73d5195146b0 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1598,7 +1598,8 @@ i915_request_await_object(struct i915_request *to,
struct dma_fence *fence;
int ret = 0;
- dma_resv_for_each_fence(&cursor, obj->base.resv, write, fence) {
+ dma_resv_for_each_fence(&cursor, obj->base.resv,
+ dma_resv_usage_rw(write), fence) {
ret = i915_request_await_dma_fence(to, fence);
if (ret)
break;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 2a74a9a1cafe..ae984c66c48a 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -585,7 +585,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
debug_fence_assert(fence);
might_sleep_if(gfpflags_allow_blocking(gfp));
- dma_resv_iter_begin(&cursor, resv, write);
+ dma_resv_iter_begin(&cursor, resv, dma_resv_usage_rw(write));
dma_resv_for_each_fence_unlocked(&cursor, f) {
pending = i915_sw_fence_await_dma_fence(fence, f, timeout,
gfp);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 79c286f85413..4f6db539571a 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1381,7 +1381,10 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
work->vm = vma->vm;
- moving = i915_gem_object_get_moving_fence(vma->obj);
+ err = i915_gem_object_get_moving_fence(vma->obj, &moving);
+ if (err)
+ goto err_rpm;
+
dma_fence_work_chain(&work->base, moving);
/* Allocate enough page directories to used PTE */
@@ -1837,7 +1840,8 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
}
if (fence) {
- dma_resv_add_excl_fence(vma->obj->base.resv, fence);
+ dma_resv_add_fence(vma->obj->base.resv, fence,
+ DMA_RESV_USAGE_WRITE);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0;
}
@@ -1849,7 +1853,8 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
}
if (fence) {
- dma_resv_add_shared_fence(vma->obj->base.resv, fence);
+ dma_resv_add_fence(vma->obj->base.resv, fence,
+ DMA_RESV_USAGE_READ);
obj->write_domain = 0;
}
}
@@ -2091,7 +2096,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
goto out_rpm;
}
- dma_resv_add_shared_fence(obj->base.resv, fence);
+ dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ);
dma_fence_put(fence);
out_rpm:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index b0e62a411534..7eb893666595 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -182,8 +182,21 @@ static const u16 subplatform_n_ids[] = {
INTEL_ADLN_IDS(0),
};
-static const u16 subplatform_rpls_ids[] = {
+static const u16 subplatform_rpl_ids[] = {
INTEL_RPLS_IDS(0),
+ INTEL_RPLP_IDS(0),
+};
+
+static const u16 subplatform_g10_ids[] = {
+ INTEL_DG2_G10_IDS(0),
+};
+
+static const u16 subplatform_g11_ids[] = {
+ INTEL_DG2_G11_IDS(0),
+};
+
+static const u16 subplatform_g12_ids[] = {
+ INTEL_DG2_G12_IDS(0),
};
static bool find_devid(u16 id, const u16 *p, unsigned int num)
@@ -228,9 +241,18 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915)
} else if (find_devid(devid, subplatform_n_ids,
ARRAY_SIZE(subplatform_n_ids))) {
mask = BIT(INTEL_SUBPLATFORM_N);
- } else if (find_devid(devid, subplatform_rpls_ids,
- ARRAY_SIZE(subplatform_rpls_ids))) {
- mask = BIT(INTEL_SUBPLATFORM_RPL_S);
+ } else if (find_devid(devid, subplatform_rpl_ids,
+ ARRAY_SIZE(subplatform_rpl_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_RPL);
+ } else if (find_devid(devid, subplatform_g10_ids,
+ ARRAY_SIZE(subplatform_g10_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_G10);
+ } else if (find_devid(devid, subplatform_g11_ids,
+ ARRAY_SIZE(subplatform_g11_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_G11);
+ } else if (find_devid(devid, subplatform_g12_ids,
+ ARRAY_SIZE(subplatform_g12_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_G12);
}
GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_MASK);
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index c4ea5c4abfc6..a134914237dd 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -115,11 +115,16 @@ enum intel_platform {
#define INTEL_SUBPLATFORM_G11 1
#define INTEL_SUBPLATFORM_G12 2
-/* ADL-S */
-#define INTEL_SUBPLATFORM_RPL_S 0
+/* ADL */
+#define INTEL_SUBPLATFORM_RPL 0
/* ADL-P */
-#define INTEL_SUBPLATFORM_N 0
+/*
+ * As #define INTEL_SUBPLATFORM_RPL 0 will apply
+ * here too, SUBPLATFORM_N will have different
+ * bit set
+ */
+#define INTEL_SUBPLATFORM_N 1
enum intel_ppgtt_type {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
@@ -150,6 +155,7 @@ enum intel_ppgtt_type {
func(has_llc); \
func(has_logical_ring_contexts); \
func(has_logical_ring_elsq); \
+ func(has_media_ratio_mode); \
func(has_mslices); \
func(has_pooled_eu); \
func(has_pxp); \
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 2b9e7833da96..437447119770 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -393,7 +393,7 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
u32 val = 0;
int ret;
- ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index cf6e98962d82..e98b6d69a91a 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -24,7 +24,10 @@
#include "i915_drv.h"
#include "i915_vgpu.h"
#include "intel_gvt.h"
-#include "gvt/gvt.h"
+#include "gem/i915_gem_dmabuf.h"
+#include "gt/intel_context.h"
+#include "gt/intel_ring.h"
+#include "gt/shmem_utils.h"
/**
* DOC: Intel GVT-g host support
@@ -41,6 +44,10 @@
* doc is available on https://01.org/group/2230/documentation-list.
*/
+static LIST_HEAD(intel_gvt_devices);
+static const struct intel_vgpu_ops *intel_gvt_ops;
+static DEFINE_MUTEX(intel_gvt_mutex);
+
static bool is_supported_device(struct drm_i915_private *dev_priv)
{
if (IS_BROADWELL(dev_priv))
@@ -59,32 +66,162 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return false;
}
-/**
- * intel_gvt_sanitize_options - sanitize GVT related options
- * @dev_priv: drm i915 private data
- *
- * This function is called at the i915 options sanitize stage.
- */
-void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
+static void free_initial_hw_state(struct drm_i915_private *dev_priv)
+{
+ struct i915_virtual_gpu *vgpu = &dev_priv->vgpu;
+
+ vfree(vgpu->initial_mmio);
+ vgpu->initial_mmio = NULL;
+
+ kfree(vgpu->initial_cfg_space);
+ vgpu->initial_cfg_space = NULL;
+}
+
+static void save_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
+ u32 size)
+{
+ struct drm_i915_private *dev_priv = iter->i915;
+ u32 *mmio, i;
+
+ for (i = offset; i < offset + size; i += 4) {
+ mmio = iter->data + i;
+ *mmio = intel_uncore_read_notrace(to_gt(dev_priv)->uncore,
+ _MMIO(i));
+ }
+}
+
+static int handle_mmio(struct intel_gvt_mmio_table_iter *iter,
+ u32 offset, u32 size)
+{
+ if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ return -EINVAL;
+
+ save_mmio(iter, offset, size);
+ return 0;
+}
+
+static int save_initial_hw_state(struct drm_i915_private *dev_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct i915_virtual_gpu *vgpu = &dev_priv->vgpu;
+ struct intel_gvt_mmio_table_iter iter;
+ void *mem;
+ int i, ret;
+
+ mem = kzalloc(PCI_CFG_SPACE_EXP_SIZE, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ vgpu->initial_cfg_space = mem;
+
+ for (i = 0; i < PCI_CFG_SPACE_EXP_SIZE; i += 4)
+ pci_read_config_dword(pdev, i, mem + i);
+
+ mem = vzalloc(2 * SZ_1M);
+ if (!mem) {
+ ret = -ENOMEM;
+ goto err_mmio;
+ }
+
+ vgpu->initial_mmio = mem;
+
+ iter.i915 = dev_priv;
+ iter.data = vgpu->initial_mmio;
+ iter.handle_mmio_cb = handle_mmio;
+
+ ret = intel_gvt_iterate_mmio_table(&iter);
+ if (ret)
+ goto err_iterate;
+
+ return 0;
+
+err_iterate:
+ vfree(vgpu->initial_mmio);
+ vgpu->initial_mmio = NULL;
+err_mmio:
+ kfree(vgpu->initial_cfg_space);
+ vgpu->initial_cfg_space = NULL;
+
+ return ret;
+}
+
+static void intel_gvt_init_device(struct drm_i915_private *dev_priv)
{
- if (!dev_priv->params.enable_gvt)
+ if (!dev_priv->params.enable_gvt) {
+ drm_dbg(&dev_priv->drm,
+ "GVT-g is disabled by kernel params\n");
return;
+ }
if (intel_vgpu_active(dev_priv)) {
drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n");
- goto bail;
+ return;
}
if (!is_supported_device(dev_priv)) {
drm_info(&dev_priv->drm,
"Unsupported device. GVT-g is disabled\n");
- goto bail;
+ return;
+ }
+
+ if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) {
+ drm_err(&dev_priv->drm,
+ "Graphics virtualization is not yet supported with GuC submission\n");
+ return;
}
- return;
-bail:
- dev_priv->params.enable_gvt = 0;
+ if (save_initial_hw_state(dev_priv)) {
+ drm_dbg(&dev_priv->drm, "Failed to save initial HW state\n");
+ return;
+ }
+
+ if (intel_gvt_ops->init_device(dev_priv))
+ drm_dbg(&dev_priv->drm, "Fail to init GVT device\n");
+}
+
+static void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->gvt)
+ intel_gvt_ops->clean_device(dev_priv);
+ free_initial_hw_state(dev_priv);
+}
+
+int intel_gvt_set_ops(const struct intel_vgpu_ops *ops)
+{
+ struct drm_i915_private *dev_priv;
+
+ mutex_lock(&intel_gvt_mutex);
+ if (intel_gvt_ops) {
+ mutex_unlock(&intel_gvt_mutex);
+ return -EINVAL;
+ }
+ intel_gvt_ops = ops;
+
+ list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry)
+ intel_gvt_init_device(dev_priv);
+ mutex_unlock(&intel_gvt_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(intel_gvt_set_ops, I915_GVT);
+
+void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops)
+{
+ struct drm_i915_private *dev_priv;
+
+ mutex_lock(&intel_gvt_mutex);
+ if (intel_gvt_ops != ops) {
+ mutex_unlock(&intel_gvt_mutex);
+ return;
+ }
+
+ list_for_each_entry(dev_priv, &intel_gvt_devices, vgpu.entry)
+ intel_gvt_clean_device(dev_priv);
+
+ intel_gvt_ops = NULL;
+ mutex_unlock(&intel_gvt_mutex);
}
+EXPORT_SYMBOL_NS_GPL(intel_gvt_clear_ops, I915_GVT);
/**
* intel_gvt_init - initialize GVT components
@@ -98,41 +235,18 @@ bail:
*/
int intel_gvt_init(struct drm_i915_private *dev_priv)
{
- int ret;
-
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
- if (!dev_priv->params.enable_gvt) {
- drm_dbg(&dev_priv->drm,
- "GVT-g is disabled by kernel params\n");
- return 0;
- }
-
- if (intel_uc_wants_guc_submission(&to_gt(dev_priv)->uc)) {
- drm_err(&dev_priv->drm,
- "i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
- return -EIO;
- }
-
- ret = intel_gvt_init_device(dev_priv);
- if (ret) {
- drm_dbg(&dev_priv->drm, "Fail to init GVT device\n");
- goto bail;
- }
-
- return 0;
+ mutex_lock(&intel_gvt_mutex);
+ list_add_tail(&dev_priv->vgpu.entry, &intel_gvt_devices);
+ if (intel_gvt_ops)
+ intel_gvt_init_device(dev_priv);
+ mutex_unlock(&intel_gvt_mutex);
-bail:
- dev_priv->params.enable_gvt = 0;
return 0;
}
-static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
-{
- return dev_priv->gvt;
-}
-
/**
* intel_gvt_driver_remove - cleanup GVT components when i915 driver is
* unbinding
@@ -143,10 +257,10 @@ static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
*/
void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
{
- if (!intel_gvt_active(dev_priv))
- return;
-
+ mutex_lock(&intel_gvt_mutex);
intel_gvt_clean_device(dev_priv);
+ list_del(&dev_priv->vgpu.entry);
+ mutex_unlock(&intel_gvt_mutex);
}
/**
@@ -159,6 +273,50 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
*/
void intel_gvt_resume(struct drm_i915_private *dev_priv)
{
- if (intel_gvt_active(dev_priv))
- intel_gvt_pm_resume(dev_priv->gvt);
+ mutex_lock(&intel_gvt_mutex);
+ if (dev_priv->gvt)
+ intel_gvt_ops->pm_resume(dev_priv);
+ mutex_unlock(&intel_gvt_mutex);
}
+
+/*
+ * Exported here so that the exports only get created when GVT support is
+ * actually enabled.
+ */
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_alloc, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_create_shmem, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_init, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_ggtt_pin_ww, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_pin_map, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_object_set_to_cpu_domain, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(__i915_gem_object_flush_map, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(__i915_gem_object_set_pages, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_gtt_insert, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_prime_export, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_init, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_backoff, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_gem_ww_ctx_fini, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_ppgtt_create, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_request_add, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_request_create, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_request_wait, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_reserve_fence, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_unreserve_fence, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_vm_release, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(_i915_vma_move_to_active, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_context_create, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(__intel_context_do_pin, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(__intel_context_do_unpin, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_ring_begin, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_get, I915_GVT);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put, I915_GVT);
+#endif
+EXPORT_SYMBOL_NS_GPL(intel_runtime_pm_put_unchecked, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_for_reg, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_get, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(intel_uncore_forcewake_put, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(shmem_pin_map, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(shmem_unpin_map, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(__px_dma, I915_GVT);
+EXPORT_SYMBOL_NS_GPL(i915_fence_ops, I915_GVT);
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
index d7d3fb6186fd..eb2a2be252ca 100644
--- a/drivers/gpu/drm/i915/intel_gvt.h
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -24,16 +24,34 @@
#ifndef _INTEL_GVT_H_
#define _INTEL_GVT_H_
+#include <linux/types.h>
+
struct drm_i915_private;
#ifdef CONFIG_DRM_I915_GVT
+
+struct intel_gvt_mmio_table_iter {
+ struct drm_i915_private *i915;
+ void *data;
+ int (*handle_mmio_cb)(struct intel_gvt_mmio_table_iter *iter,
+ u32 offset, u32 size);
+};
+
int intel_gvt_init(struct drm_i915_private *dev_priv);
void intel_gvt_driver_remove(struct drm_i915_private *dev_priv);
-int intel_gvt_init_device(struct drm_i915_private *dev_priv);
-void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
int intel_gvt_init_host(void);
-void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv);
void intel_gvt_resume(struct drm_i915_private *dev_priv);
+int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter);
+
+struct intel_vgpu_ops {
+ int (*init_device)(struct drm_i915_private *dev_priv);
+ void (*clean_device)(struct drm_i915_private *dev_priv);
+ void (*pm_resume)(struct drm_i915_private *i915);
+};
+
+int intel_gvt_set_ops(const struct intel_vgpu_ops *ops);
+void intel_gvt_clear_ops(const struct intel_vgpu_ops *ops);
+
#else
static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
{
@@ -44,12 +62,16 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
{
}
-static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
+static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
{
}
-static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
+struct intel_gvt_mmio_table_iter {
+};
+
+static inline int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter)
{
+ return 0;
}
#endif
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
new file mode 100644
index 000000000000..72dac1718f3e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -0,0 +1,1292 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "display/intel_dmc_regs.h"
+#include "display/vlv_dsi_pll_regs.h"
+#include "gt/intel_gt_regs.h"
+#include "gvt/gvt.h"
+#include "i915_drv.h"
+#include "i915_pvinfo.h"
+#include "i915_reg.h"
+#include "intel_gvt.h"
+#include "intel_mchbar_regs.h"
+
+#define MMIO_F(reg, s) do { \
+ int ret; \
+ ret = iter->handle_mmio_cb(iter, i915_mmio_reg_offset(reg), s); \
+ if (ret) \
+ return ret; \
+} while (0)
+
+#define MMIO_D(reg) MMIO_F(reg, 4)
+
+#define MMIO_RING_F(prefix, s) do { \
+ MMIO_F(prefix(RENDER_RING_BASE), s); \
+ MMIO_F(prefix(BLT_RING_BASE), s); \
+ MMIO_F(prefix(GEN6_BSD_RING_BASE), s); \
+ MMIO_F(prefix(VEBOX_RING_BASE), s); \
+ if (HAS_ENGINE(to_gt(iter->i915), VCS1)) \
+ MMIO_F(prefix(GEN8_BSD2_RING_BASE), s); \
+} while (0)
+
+#define MMIO_RING_D(prefix) \
+ MMIO_RING_F(prefix, 4)
+
+static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
+{
+ struct drm_i915_private *dev_priv = iter->i915;
+
+ MMIO_RING_D(RING_IMR);
+ MMIO_D(SDEIMR);
+ MMIO_D(SDEIER);
+ MMIO_D(SDEIIR);
+ MMIO_D(SDEISR);
+ MMIO_RING_D(RING_HWSTAM);
+ MMIO_D(BSD_HWS_PGA_GEN7);
+ MMIO_D(BLT_HWS_PGA_GEN7);
+ MMIO_D(VEBOX_HWS_PGA_GEN7);
+
+#define RING_REG(base) _MMIO((base) + 0x28)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+
+#define RING_REG(base) _MMIO((base) + 0x134)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+
+#define RING_REG(base) _MMIO((base) + 0x6c)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+ MMIO_D(_MMIO(0x2148));
+ MMIO_D(CCID(RENDER_RING_BASE));
+ MMIO_D(_MMIO(0x12198));
+ MMIO_D(GEN7_CXT_SIZE);
+ MMIO_RING_D(RING_TAIL);
+ MMIO_RING_D(RING_HEAD);
+ MMIO_RING_D(RING_CTL);
+ MMIO_RING_D(RING_ACTHD);
+ MMIO_RING_D(RING_START);
+
+ /* RING MODE */
+#define RING_REG(base) _MMIO((base) + 0x29c)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+
+ MMIO_RING_D(RING_MI_MODE);
+ MMIO_RING_D(RING_INSTPM);
+ MMIO_RING_D(RING_TIMESTAMP);
+ MMIO_RING_D(RING_TIMESTAMP_UDW);
+ MMIO_D(GEN7_GT_MODE);
+ MMIO_D(CACHE_MODE_0_GEN7);
+ MMIO_D(CACHE_MODE_1);
+ MMIO_D(CACHE_MODE_0);
+ MMIO_D(_MMIO(0x2124));
+ MMIO_D(_MMIO(0x20dc));
+ MMIO_D(_3D_CHICKEN3);
+ MMIO_D(_MMIO(0x2088));
+ MMIO_D(FF_SLICE_CS_CHICKEN2);
+ MMIO_D(_MMIO(0x2470));
+ MMIO_D(GAM_ECOCHK);
+ MMIO_D(GEN7_COMMON_SLICE_CHICKEN1);
+ MMIO_D(COMMON_SLICE_CHICKEN2);
+ MMIO_D(_MMIO(0x9030));
+ MMIO_D(_MMIO(0x20a0));
+ MMIO_D(_MMIO(0x2420));
+ MMIO_D(_MMIO(0x2430));
+ MMIO_D(_MMIO(0x2434));
+ MMIO_D(_MMIO(0x2438));
+ MMIO_D(_MMIO(0x243c));
+ MMIO_D(_MMIO(0x7018));
+ MMIO_D(HALF_SLICE_CHICKEN3);
+ MMIO_D(GEN7_HALF_SLICE_CHICKEN1);
+ /* display */
+ MMIO_F(_MMIO(0x60220), 0x20);
+ MMIO_D(_MMIO(0x602a0));
+ MMIO_D(_MMIO(0x65050));
+ MMIO_D(_MMIO(0x650b4));
+ MMIO_D(_MMIO(0xc4040));
+ MMIO_D(DERRMR);
+ MMIO_D(PIPEDSL(PIPE_A));
+ MMIO_D(PIPEDSL(PIPE_B));
+ MMIO_D(PIPEDSL(PIPE_C));
+ MMIO_D(PIPEDSL(_PIPE_EDP));
+ MMIO_D(PIPECONF(PIPE_A));
+ MMIO_D(PIPECONF(PIPE_B));
+ MMIO_D(PIPECONF(PIPE_C));
+ MMIO_D(PIPECONF(_PIPE_EDP));
+ MMIO_D(PIPESTAT(PIPE_A));
+ MMIO_D(PIPESTAT(PIPE_B));
+ MMIO_D(PIPESTAT(PIPE_C));
+ MMIO_D(PIPESTAT(_PIPE_EDP));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_A));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_B));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(PIPE_C));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(_PIPE_EDP));
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_A));
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_B));
+ MMIO_D(PIPE_FRMCOUNT_G4X(PIPE_C));
+ MMIO_D(PIPE_FRMCOUNT_G4X(_PIPE_EDP));
+ MMIO_D(CURCNTR(PIPE_A));
+ MMIO_D(CURCNTR(PIPE_B));
+ MMIO_D(CURCNTR(PIPE_C));
+ MMIO_D(CURPOS(PIPE_A));
+ MMIO_D(CURPOS(PIPE_B));
+ MMIO_D(CURPOS(PIPE_C));
+ MMIO_D(CURBASE(PIPE_A));
+ MMIO_D(CURBASE(PIPE_B));
+ MMIO_D(CURBASE(PIPE_C));
+ MMIO_D(CUR_FBC_CTL(PIPE_A));
+ MMIO_D(CUR_FBC_CTL(PIPE_B));
+ MMIO_D(CUR_FBC_CTL(PIPE_C));
+ MMIO_D(_MMIO(0x700ac));
+ MMIO_D(_MMIO(0x710ac));
+ MMIO_D(_MMIO(0x720ac));
+ MMIO_D(_MMIO(0x70090));
+ MMIO_D(_MMIO(0x70094));
+ MMIO_D(_MMIO(0x70098));
+ MMIO_D(_MMIO(0x7009c));
+ MMIO_D(DSPCNTR(PIPE_A));
+ MMIO_D(DSPADDR(PIPE_A));
+ MMIO_D(DSPSTRIDE(PIPE_A));
+ MMIO_D(DSPPOS(PIPE_A));
+ MMIO_D(DSPSIZE(PIPE_A));
+ MMIO_D(DSPSURF(PIPE_A));
+ MMIO_D(DSPOFFSET(PIPE_A));
+ MMIO_D(DSPSURFLIVE(PIPE_A));
+ MMIO_D(REG_50080(PIPE_A, PLANE_PRIMARY));
+ MMIO_D(DSPCNTR(PIPE_B));
+ MMIO_D(DSPADDR(PIPE_B));
+ MMIO_D(DSPSTRIDE(PIPE_B));
+ MMIO_D(DSPPOS(PIPE_B));
+ MMIO_D(DSPSIZE(PIPE_B));
+ MMIO_D(DSPSURF(PIPE_B));
+ MMIO_D(DSPOFFSET(PIPE_B));
+ MMIO_D(DSPSURFLIVE(PIPE_B));
+ MMIO_D(REG_50080(PIPE_B, PLANE_PRIMARY));
+ MMIO_D(DSPCNTR(PIPE_C));
+ MMIO_D(DSPADDR(PIPE_C));
+ MMIO_D(DSPSTRIDE(PIPE_C));
+ MMIO_D(DSPPOS(PIPE_C));
+ MMIO_D(DSPSIZE(PIPE_C));
+ MMIO_D(DSPSURF(PIPE_C));
+ MMIO_D(DSPOFFSET(PIPE_C));
+ MMIO_D(DSPSURFLIVE(PIPE_C));
+ MMIO_D(REG_50080(PIPE_C, PLANE_PRIMARY));
+ MMIO_D(SPRCTL(PIPE_A));
+ MMIO_D(SPRLINOFF(PIPE_A));
+ MMIO_D(SPRSTRIDE(PIPE_A));
+ MMIO_D(SPRPOS(PIPE_A));
+ MMIO_D(SPRSIZE(PIPE_A));
+ MMIO_D(SPRKEYVAL(PIPE_A));
+ MMIO_D(SPRKEYMSK(PIPE_A));
+ MMIO_D(SPRSURF(PIPE_A));
+ MMIO_D(SPRKEYMAX(PIPE_A));
+ MMIO_D(SPROFFSET(PIPE_A));
+ MMIO_D(SPRSCALE(PIPE_A));
+ MMIO_D(SPRSURFLIVE(PIPE_A));
+ MMIO_D(REG_50080(PIPE_A, PLANE_SPRITE0));
+ MMIO_D(SPRCTL(PIPE_B));
+ MMIO_D(SPRLINOFF(PIPE_B));
+ MMIO_D(SPRSTRIDE(PIPE_B));
+ MMIO_D(SPRPOS(PIPE_B));
+ MMIO_D(SPRSIZE(PIPE_B));
+ MMIO_D(SPRKEYVAL(PIPE_B));
+ MMIO_D(SPRKEYMSK(PIPE_B));
+ MMIO_D(SPRSURF(PIPE_B));
+ MMIO_D(SPRKEYMAX(PIPE_B));
+ MMIO_D(SPROFFSET(PIPE_B));
+ MMIO_D(SPRSCALE(PIPE_B));
+ MMIO_D(SPRSURFLIVE(PIPE_B));
+ MMIO_D(REG_50080(PIPE_B, PLANE_SPRITE0));
+ MMIO_D(SPRCTL(PIPE_C));
+ MMIO_D(SPRLINOFF(PIPE_C));
+ MMIO_D(SPRSTRIDE(PIPE_C));
+ MMIO_D(SPRPOS(PIPE_C));
+ MMIO_D(SPRSIZE(PIPE_C));
+ MMIO_D(SPRKEYVAL(PIPE_C));
+ MMIO_D(SPRKEYMSK(PIPE_C));
+ MMIO_D(SPRSURF(PIPE_C));
+ MMIO_D(SPRKEYMAX(PIPE_C));
+ MMIO_D(SPROFFSET(PIPE_C));
+ MMIO_D(SPRSCALE(PIPE_C));
+ MMIO_D(SPRSURFLIVE(PIPE_C));
+ MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0));
+ MMIO_D(HTOTAL(TRANSCODER_A));
+ MMIO_D(HBLANK(TRANSCODER_A));
+ MMIO_D(HSYNC(TRANSCODER_A));
+ MMIO_D(VTOTAL(TRANSCODER_A));
+ MMIO_D(VBLANK(TRANSCODER_A));
+ MMIO_D(VSYNC(TRANSCODER_A));
+ MMIO_D(BCLRPAT(TRANSCODER_A));
+ MMIO_D(VSYNCSHIFT(TRANSCODER_A));
+ MMIO_D(PIPESRC(TRANSCODER_A));
+ MMIO_D(HTOTAL(TRANSCODER_B));
+ MMIO_D(HBLANK(TRANSCODER_B));
+ MMIO_D(HSYNC(TRANSCODER_B));
+ MMIO_D(VTOTAL(TRANSCODER_B));
+ MMIO_D(VBLANK(TRANSCODER_B));
+ MMIO_D(VSYNC(TRANSCODER_B));
+ MMIO_D(BCLRPAT(TRANSCODER_B));
+ MMIO_D(VSYNCSHIFT(TRANSCODER_B));
+ MMIO_D(PIPESRC(TRANSCODER_B));
+ MMIO_D(HTOTAL(TRANSCODER_C));
+ MMIO_D(HBLANK(TRANSCODER_C));
+ MMIO_D(HSYNC(TRANSCODER_C));
+ MMIO_D(VTOTAL(TRANSCODER_C));
+ MMIO_D(VBLANK(TRANSCODER_C));
+ MMIO_D(VSYNC(TRANSCODER_C));
+ MMIO_D(BCLRPAT(TRANSCODER_C));
+ MMIO_D(VSYNCSHIFT(TRANSCODER_C));
+ MMIO_D(PIPESRC(TRANSCODER_C));
+ MMIO_D(HTOTAL(TRANSCODER_EDP));
+ MMIO_D(HBLANK(TRANSCODER_EDP));
+ MMIO_D(HSYNC(TRANSCODER_EDP));
+ MMIO_D(VTOTAL(TRANSCODER_EDP));
+ MMIO_D(VBLANK(TRANSCODER_EDP));
+ MMIO_D(VSYNC(TRANSCODER_EDP));
+ MMIO_D(BCLRPAT(TRANSCODER_EDP));
+ MMIO_D(VSYNCSHIFT(TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_A));
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_A));
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_A));
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_A));
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_A));
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_A));
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_A));
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_A));
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_B));
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_B));
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_B));
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_B));
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_B));
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_B));
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_B));
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_B));
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_C));
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_C));
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_C));
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_C));
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_C));
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_C));
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_C));
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_C));
+ MMIO_D(PIPE_DATA_M1(TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_N1(TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_M2(TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_N2(TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_M1(TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_N1(TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_M2(TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_N2(TRANSCODER_EDP));
+ MMIO_D(PF_CTL(PIPE_A));
+ MMIO_D(PF_WIN_SZ(PIPE_A));
+ MMIO_D(PF_WIN_POS(PIPE_A));
+ MMIO_D(PF_VSCALE(PIPE_A));
+ MMIO_D(PF_HSCALE(PIPE_A));
+ MMIO_D(PF_CTL(PIPE_B));
+ MMIO_D(PF_WIN_SZ(PIPE_B));
+ MMIO_D(PF_WIN_POS(PIPE_B));
+ MMIO_D(PF_VSCALE(PIPE_B));
+ MMIO_D(PF_HSCALE(PIPE_B));
+ MMIO_D(PF_CTL(PIPE_C));
+ MMIO_D(PF_WIN_SZ(PIPE_C));
+ MMIO_D(PF_WIN_POS(PIPE_C));
+ MMIO_D(PF_VSCALE(PIPE_C));
+ MMIO_D(PF_HSCALE(PIPE_C));
+ MMIO_D(WM0_PIPE_ILK(PIPE_A));
+ MMIO_D(WM0_PIPE_ILK(PIPE_B));
+ MMIO_D(WM0_PIPE_ILK(PIPE_C));
+ MMIO_D(WM1_LP_ILK);
+ MMIO_D(WM2_LP_ILK);
+ MMIO_D(WM3_LP_ILK);
+ MMIO_D(WM1S_LP_ILK);
+ MMIO_D(WM2S_LP_IVB);
+ MMIO_D(WM3S_LP_IVB);
+ MMIO_D(BLC_PWM_CPU_CTL2);
+ MMIO_D(BLC_PWM_CPU_CTL);
+ MMIO_D(BLC_PWM_PCH_CTL1);
+ MMIO_D(BLC_PWM_PCH_CTL2);
+ MMIO_D(_MMIO(0x48268));
+ MMIO_F(PCH_GMBUS0, 4 * 4);
+ MMIO_F(PCH_GPIO_BASE, 6 * 4);
+ MMIO_F(_MMIO(0xe4f00), 0x28);
+ MMIO_D(_MMIO(_PCH_TRANSACONF));
+ MMIO_D(_MMIO(_PCH_TRANSBCONF));
+ MMIO_D(FDI_RX_IIR(PIPE_A));
+ MMIO_D(FDI_RX_IIR(PIPE_B));
+ MMIO_D(FDI_RX_IIR(PIPE_C));
+ MMIO_D(FDI_RX_IMR(PIPE_A));
+ MMIO_D(FDI_RX_IMR(PIPE_B));
+ MMIO_D(FDI_RX_IMR(PIPE_C));
+ MMIO_D(FDI_RX_CTL(PIPE_A));
+ MMIO_D(FDI_RX_CTL(PIPE_B));
+ MMIO_D(FDI_RX_CTL(PIPE_C));
+ MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A));
+ MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A));
+ MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A));
+ MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A));
+ MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A));
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A));
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A));
+ MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B));
+ MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B));
+ MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B));
+ MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B));
+ MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B));
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B));
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B));
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1));
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1));
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2));
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2));
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1));
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1));
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2));
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2));
+ MMIO_D(TRANS_DP_CTL(PIPE_A));
+ MMIO_D(TRANS_DP_CTL(PIPE_B));
+ MMIO_D(TRANS_DP_CTL(PIPE_C));
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_A));
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_A));
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_A));
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_B));
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_B));
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_B));
+ MMIO_D(TVIDEO_DIP_CTL(PIPE_C));
+ MMIO_D(TVIDEO_DIP_DATA(PIPE_C));
+ MMIO_D(TVIDEO_DIP_GCP(PIPE_C));
+ MMIO_D(_MMIO(_FDI_RXA_MISC));
+ MMIO_D(_MMIO(_FDI_RXB_MISC));
+ MMIO_D(_MMIO(_FDI_RXA_TUSIZE1));
+ MMIO_D(_MMIO(_FDI_RXA_TUSIZE2));
+ MMIO_D(_MMIO(_FDI_RXB_TUSIZE1));
+ MMIO_D(_MMIO(_FDI_RXB_TUSIZE2));
+ MMIO_D(PCH_PP_CONTROL);
+ MMIO_D(PCH_PP_DIVISOR);
+ MMIO_D(PCH_PP_STATUS);
+ MMIO_D(PCH_LVDS);
+ MMIO_D(_MMIO(_PCH_DPLL_A));
+ MMIO_D(_MMIO(_PCH_DPLL_B));
+ MMIO_D(_MMIO(_PCH_FPA0));
+ MMIO_D(_MMIO(_PCH_FPA1));
+ MMIO_D(_MMIO(_PCH_FPB0));
+ MMIO_D(_MMIO(_PCH_FPB1));
+ MMIO_D(PCH_DREF_CONTROL);
+ MMIO_D(PCH_RAWCLK_FREQ);
+ MMIO_D(PCH_DPLL_SEL);
+ MMIO_D(_MMIO(0x61208));
+ MMIO_D(_MMIO(0x6120c));
+ MMIO_D(PCH_PP_ON_DELAYS);
+ MMIO_D(PCH_PP_OFF_DELAYS);
+ MMIO_D(_MMIO(0xe651c));
+ MMIO_D(_MMIO(0xe661c));
+ MMIO_D(_MMIO(0xe671c));
+ MMIO_D(_MMIO(0xe681c));
+ MMIO_D(_MMIO(0xe6c04));
+ MMIO_D(_MMIO(0xe6e1c));
+ MMIO_D(PCH_PORT_HOTPLUG);
+ MMIO_D(LCPLL_CTL);
+ MMIO_D(FUSE_STRAP);
+ MMIO_D(DIGITAL_PORT_HOTPLUG_CNTRL);
+ MMIO_D(DISP_ARB_CTL);
+ MMIO_D(DISP_ARB_CTL2);
+ MMIO_D(ILK_DISPLAY_CHICKEN1);
+ MMIO_D(ILK_DISPLAY_CHICKEN2);
+ MMIO_D(ILK_DSPCLK_GATE_D);
+ MMIO_D(SOUTH_CHICKEN1);
+ MMIO_D(SOUTH_CHICKEN2);
+ MMIO_D(_MMIO(_TRANSA_CHICKEN1));
+ MMIO_D(_MMIO(_TRANSB_CHICKEN1));
+ MMIO_D(SOUTH_DSPCLK_GATE_D);
+ MMIO_D(_MMIO(_TRANSA_CHICKEN2));
+ MMIO_D(_MMIO(_TRANSB_CHICKEN2));
+ MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A));
+ MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A));
+ MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A));
+ MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A));
+ MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A));
+ MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A));
+ MMIO_D(ILK_FBC_RT_BASE);
+ MMIO_D(IPS_CTL);
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_A));
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_A));
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_A));
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_A));
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_A));
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_A));
+ MMIO_D(PIPE_CSC_MODE(PIPE_A));
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_A));
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_A));
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_A));
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_A));
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_A));
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_A));
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_B));
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_B));
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_B));
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_B));
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_B));
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_B));
+ MMIO_D(PIPE_CSC_MODE(PIPE_B));
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_B));
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_B));
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_B));
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_B));
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_B));
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_B));
+ MMIO_D(PIPE_CSC_COEFF_RY_GY(PIPE_C));
+ MMIO_D(PIPE_CSC_COEFF_BY(PIPE_C));
+ MMIO_D(PIPE_CSC_COEFF_RU_GU(PIPE_C));
+ MMIO_D(PIPE_CSC_COEFF_BU(PIPE_C));
+ MMIO_D(PIPE_CSC_COEFF_RV_GV(PIPE_C));
+ MMIO_D(PIPE_CSC_COEFF_BV(PIPE_C));
+ MMIO_D(PIPE_CSC_MODE(PIPE_C));
+ MMIO_D(PIPE_CSC_PREOFF_HI(PIPE_C));
+ MMIO_D(PIPE_CSC_PREOFF_ME(PIPE_C));
+ MMIO_D(PIPE_CSC_PREOFF_LO(PIPE_C));
+ MMIO_D(PIPE_CSC_POSTOFF_HI(PIPE_C));
+ MMIO_D(PIPE_CSC_POSTOFF_ME(PIPE_C));
+ MMIO_D(PIPE_CSC_POSTOFF_LO(PIPE_C));
+ MMIO_D(PREC_PAL_INDEX(PIPE_A));
+ MMIO_D(PREC_PAL_DATA(PIPE_A));
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_A, 0), 4 * 3);
+ MMIO_D(PREC_PAL_INDEX(PIPE_B));
+ MMIO_D(PREC_PAL_DATA(PIPE_B));
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_B, 0), 4 * 3);
+ MMIO_D(PREC_PAL_INDEX(PIPE_C));
+ MMIO_D(PREC_PAL_DATA(PIPE_C));
+ MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3);
+ MMIO_D(_MMIO(0x60110));
+ MMIO_D(_MMIO(0x61110));
+ MMIO_F(_MMIO(0x70400), 0x40);
+ MMIO_F(_MMIO(0x71400), 0x40);
+ MMIO_F(_MMIO(0x72400), 0x40);
+ MMIO_D(WM_LINETIME(PIPE_A));
+ MMIO_D(WM_LINETIME(PIPE_B));
+ MMIO_D(WM_LINETIME(PIPE_C));
+ MMIO_D(SPLL_CTL);
+ MMIO_D(_MMIO(_WRPLL_CTL1));
+ MMIO_D(_MMIO(_WRPLL_CTL2));
+ MMIO_D(PORT_CLK_SEL(PORT_A));
+ MMIO_D(PORT_CLK_SEL(PORT_B));
+ MMIO_D(PORT_CLK_SEL(PORT_C));
+ MMIO_D(PORT_CLK_SEL(PORT_D));
+ MMIO_D(PORT_CLK_SEL(PORT_E));
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_A));
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_B));
+ MMIO_D(TRANS_CLK_SEL(TRANSCODER_C));
+ MMIO_D(HSW_NDE_RSTWRN_OPT);
+ MMIO_D(_MMIO(0x46508));
+ MMIO_D(_MMIO(0x49080));
+ MMIO_D(_MMIO(0x49180));
+ MMIO_D(_MMIO(0x49280));
+ MMIO_F(_MMIO(0x49090), 0x14);
+ MMIO_F(_MMIO(0x49190), 0x14);
+ MMIO_F(_MMIO(0x49290), 0x14);
+ MMIO_D(GAMMA_MODE(PIPE_A));
+ MMIO_D(GAMMA_MODE(PIPE_B));
+ MMIO_D(GAMMA_MODE(PIPE_C));
+ MMIO_D(PIPE_MULT(PIPE_A));
+ MMIO_D(PIPE_MULT(PIPE_B));
+ MMIO_D(PIPE_MULT(PIPE_C));
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A));
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B));
+ MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C));
+ MMIO_D(SFUSE_STRAP);
+ MMIO_D(SBI_ADDR);
+ MMIO_D(SBI_DATA);
+ MMIO_D(SBI_CTL_STAT);
+ MMIO_D(PIXCLK_GATE);
+ MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4);
+ MMIO_D(DDI_BUF_CTL(PORT_A));
+ MMIO_D(DDI_BUF_CTL(PORT_B));
+ MMIO_D(DDI_BUF_CTL(PORT_C));
+ MMIO_D(DDI_BUF_CTL(PORT_D));
+ MMIO_D(DDI_BUF_CTL(PORT_E));
+ MMIO_D(DP_TP_CTL(PORT_A));
+ MMIO_D(DP_TP_CTL(PORT_B));
+ MMIO_D(DP_TP_CTL(PORT_C));
+ MMIO_D(DP_TP_CTL(PORT_D));
+ MMIO_D(DP_TP_CTL(PORT_E));
+ MMIO_D(DP_TP_STATUS(PORT_A));
+ MMIO_D(DP_TP_STATUS(PORT_B));
+ MMIO_D(DP_TP_STATUS(PORT_C));
+ MMIO_D(DP_TP_STATUS(PORT_D));
+ MMIO_D(DP_TP_STATUS(PORT_E));
+ MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50);
+ MMIO_F(_MMIO(0x64e60), 0x50);
+ MMIO_F(_MMIO(0x64eC0), 0x50);
+ MMIO_F(_MMIO(0x64f20), 0x50);
+ MMIO_F(_MMIO(0x64f80), 0x50);
+ MMIO_D(HSW_AUD_CFG(PIPE_A));
+ MMIO_D(HSW_AUD_PIN_ELD_CP_VLD);
+ MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A));
+ MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_A));
+ MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_B));
+ MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_C));
+ MMIO_D(_MMIO(_TRANS_DDI_FUNC_CTL_EDP));
+ MMIO_D(_MMIO(_TRANSA_MSA_MISC));
+ MMIO_D(_MMIO(_TRANSB_MSA_MISC));
+ MMIO_D(_MMIO(_TRANSC_MSA_MISC));
+ MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC));
+ MMIO_D(FORCEWAKE);
+ MMIO_D(FORCEWAKE_ACK);
+ MMIO_D(GEN6_GT_CORE_STATUS);
+ MMIO_D(GEN6_GT_THREAD_STATUS_REG);
+ MMIO_D(GTFIFODBG);
+ MMIO_D(GTFIFOCTL);
+ MMIO_D(ECOBUS);
+ MMIO_D(GEN6_RC_CONTROL);
+ MMIO_D(GEN6_RC_STATE);
+ MMIO_D(GEN6_RPNSWREQ);
+ MMIO_D(GEN6_RC_VIDEO_FREQ);
+ MMIO_D(GEN6_RP_DOWN_TIMEOUT);
+ MMIO_D(GEN6_RP_INTERRUPT_LIMITS);
+ MMIO_D(GEN6_RPSTAT1);
+ MMIO_D(GEN6_RP_CONTROL);
+ MMIO_D(GEN6_RP_UP_THRESHOLD);
+ MMIO_D(GEN6_RP_DOWN_THRESHOLD);
+ MMIO_D(GEN6_RP_CUR_UP_EI);
+ MMIO_D(GEN6_RP_CUR_UP);
+ MMIO_D(GEN6_RP_PREV_UP);
+ MMIO_D(GEN6_RP_CUR_DOWN_EI);
+ MMIO_D(GEN6_RP_CUR_DOWN);
+ MMIO_D(GEN6_RP_PREV_DOWN);
+ MMIO_D(GEN6_RP_UP_EI);
+ MMIO_D(GEN6_RP_DOWN_EI);
+ MMIO_D(GEN6_RP_IDLE_HYSTERSIS);
+ MMIO_D(GEN6_RC1_WAKE_RATE_LIMIT);
+ MMIO_D(GEN6_RC6_WAKE_RATE_LIMIT);
+ MMIO_D(GEN6_RC6pp_WAKE_RATE_LIMIT);
+ MMIO_D(GEN6_RC_EVALUATION_INTERVAL);
+ MMIO_D(GEN6_RC_IDLE_HYSTERSIS);
+ MMIO_D(GEN6_RC_SLEEP);
+ MMIO_D(GEN6_RC1e_THRESHOLD);
+ MMIO_D(GEN6_RC6_THRESHOLD);
+ MMIO_D(GEN6_RC6p_THRESHOLD);
+ MMIO_D(GEN6_RC6pp_THRESHOLD);
+ MMIO_D(GEN6_PMINTRMSK);
+
+ MMIO_D(RSTDBYCTL);
+ MMIO_D(GEN6_GDRST);
+ MMIO_F(FENCE_REG_GEN6_LO(0), 0x80);
+ MMIO_D(CPU_VGACNTRL);
+ MMIO_D(TILECTL);
+ MMIO_D(GEN6_UCGCTL1);
+ MMIO_D(GEN6_UCGCTL2);
+ MMIO_F(_MMIO(0x4f000), 0x90);
+ MMIO_D(GEN6_PCODE_DATA);
+ MMIO_D(_MMIO(0x13812c));
+ MMIO_D(GEN7_ERR_INT);
+ MMIO_D(HSW_EDRAM_CAP);
+ MMIO_D(HSW_IDICR);
+ MMIO_D(GFX_FLSH_CNTL_GEN6);
+ MMIO_D(_MMIO(0x3c));
+ MMIO_D(_MMIO(0x860));
+ MMIO_D(ECOSKPD(RENDER_RING_BASE));
+ MMIO_D(_MMIO(0x121d0));
+ MMIO_D(ECOSKPD(BLT_RING_BASE));
+ MMIO_D(_MMIO(0x41d0));
+ MMIO_D(GAC_ECO_BITS);
+ MMIO_D(_MMIO(0x6200));
+ MMIO_D(_MMIO(0x6204));
+ MMIO_D(_MMIO(0x6208));
+ MMIO_D(_MMIO(0x7118));
+ MMIO_D(_MMIO(0x7180));
+ MMIO_D(_MMIO(0x7408));
+ MMIO_D(_MMIO(0x7c00));
+ MMIO_D(GEN6_MBCTL);
+ MMIO_D(_MMIO(0x911c));
+ MMIO_D(_MMIO(0x9120));
+ MMIO_D(GEN7_UCGCTL4);
+ MMIO_D(GAB_CTL);
+ MMIO_D(_MMIO(0x48800));
+ MMIO_D(_MMIO(0xce044));
+ MMIO_D(_MMIO(0xe6500));
+ MMIO_D(_MMIO(0xe6504));
+ MMIO_D(_MMIO(0xe6600));
+ MMIO_D(_MMIO(0xe6604));
+ MMIO_D(_MMIO(0xe6700));
+ MMIO_D(_MMIO(0xe6704));
+ MMIO_D(_MMIO(0xe6800));
+ MMIO_D(_MMIO(0xe6804));
+ MMIO_D(PCH_GMBUS4);
+ MMIO_D(PCH_GMBUS5);
+ MMIO_D(_MMIO(0x902c));
+ MMIO_D(_MMIO(0xec008));
+ MMIO_D(_MMIO(0xec00c));
+ MMIO_D(_MMIO(0xec008 + 0x18));
+ MMIO_D(_MMIO(0xec00c + 0x18));
+ MMIO_D(_MMIO(0xec008 + 0x18 * 2));
+ MMIO_D(_MMIO(0xec00c + 0x18 * 2));
+ MMIO_D(_MMIO(0xec008 + 0x18 * 3));
+ MMIO_D(_MMIO(0xec00c + 0x18 * 3));
+ MMIO_D(_MMIO(0xec408));
+ MMIO_D(_MMIO(0xec40c));
+ MMIO_D(_MMIO(0xec408 + 0x18));
+ MMIO_D(_MMIO(0xec40c + 0x18));
+ MMIO_D(_MMIO(0xec408 + 0x18 * 2));
+ MMIO_D(_MMIO(0xec40c + 0x18 * 2));
+ MMIO_D(_MMIO(0xec408 + 0x18 * 3));
+ MMIO_D(_MMIO(0xec40c + 0x18 * 3));
+ MMIO_D(_MMIO(0xfc810));
+ MMIO_D(_MMIO(0xfc81c));
+ MMIO_D(_MMIO(0xfc828));
+ MMIO_D(_MMIO(0xfc834));
+ MMIO_D(_MMIO(0xfcc00));
+ MMIO_D(_MMIO(0xfcc0c));
+ MMIO_D(_MMIO(0xfcc18));
+ MMIO_D(_MMIO(0xfcc24));
+ MMIO_D(_MMIO(0xfd000));
+ MMIO_D(_MMIO(0xfd00c));
+ MMIO_D(_MMIO(0xfd018));
+ MMIO_D(_MMIO(0xfd024));
+ MMIO_D(_MMIO(0xfd034));
+ MMIO_D(FPGA_DBG);
+ MMIO_D(_MMIO(0x2054));
+ MMIO_D(_MMIO(0x12054));
+ MMIO_D(_MMIO(0x22054));
+ MMIO_D(_MMIO(0x1a054));
+ MMIO_D(_MMIO(0x44070));
+ MMIO_D(_MMIO(0x2178));
+ MMIO_D(_MMIO(0x217c));
+ MMIO_D(_MMIO(0x12178));
+ MMIO_D(_MMIO(0x1217c));
+ MMIO_F(_MMIO(0x5200), 32);
+ MMIO_F(_MMIO(0x5240), 32);
+ MMIO_F(_MMIO(0x5280), 16);
+ MMIO_D(BCS_SWCTRL);
+ MMIO_F(HS_INVOCATION_COUNT, 8);
+ MMIO_F(DS_INVOCATION_COUNT, 8);
+ MMIO_F(IA_VERTICES_COUNT, 8);
+ MMIO_F(IA_PRIMITIVES_COUNT, 8);
+ MMIO_F(VS_INVOCATION_COUNT, 8);
+ MMIO_F(GS_INVOCATION_COUNT, 8);
+ MMIO_F(GS_PRIMITIVES_COUNT, 8);
+ MMIO_F(CL_INVOCATION_COUNT, 8);
+ MMIO_F(CL_PRIMITIVES_COUNT, 8);
+ MMIO_F(PS_INVOCATION_COUNT, 8);
+ MMIO_F(PS_DEPTH_COUNT, 8);
+ MMIO_D(ARB_MODE);
+ MMIO_RING_D(RING_BBADDR);
+ MMIO_D(_MMIO(0x2220));
+ MMIO_D(_MMIO(0x12220));
+ MMIO_D(_MMIO(0x22220));
+ MMIO_RING_D(RING_SYNC_1);
+ MMIO_RING_D(RING_SYNC_0);
+ MMIO_D(GUC_STATUS);
+
+ MMIO_F(_MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000);
+ MMIO_F(_MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE);
+ MMIO_F(LGC_PALETTE(PIPE_A, 0), 1024);
+ MMIO_F(LGC_PALETTE(PIPE_B, 0), 1024);
+ MMIO_F(LGC_PALETTE(PIPE_C, 0), 1024);
+
+ return 0;
+}
+
+static int iterate_bdw_only_mmio(struct intel_gvt_mmio_table_iter *iter)
+{
+ MMIO_D(HSW_PWR_WELL_CTL1);
+ MMIO_D(HSW_PWR_WELL_CTL2);
+ MMIO_D(HSW_PWR_WELL_CTL3);
+ MMIO_D(HSW_PWR_WELL_CTL4);
+ MMIO_D(HSW_PWR_WELL_CTL5);
+ MMIO_D(HSW_PWR_WELL_CTL6);
+
+ MMIO_D(WM_MISC);
+ MMIO_D(_MMIO(_SRD_CTL_EDP));
+
+ MMIO_D(_MMIO(0xb1f0));
+ MMIO_D(_MMIO(0xb1c0));
+ MMIO_D(_MMIO(0xb100));
+ MMIO_D(_MMIO(0xb10c));
+ MMIO_D(_MMIO(0xb110));
+ MMIO_D(_MMIO(0x83a4));
+ MMIO_D(_MMIO(0x8430));
+ MMIO_D(_MMIO(0x2248));
+ MMIO_D(FORCEWAKE_ACK_HSW);
+
+ return 0;
+}
+
+static int iterate_bdw_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
+{
+ struct drm_i915_private *dev_priv = iter->i915;
+
+ MMIO_D(GEN8_GT_IMR(0));
+ MMIO_D(GEN8_GT_IER(0));
+ MMIO_D(GEN8_GT_IIR(0));
+ MMIO_D(GEN8_GT_ISR(0));
+ MMIO_D(GEN8_GT_IMR(1));
+ MMIO_D(GEN8_GT_IER(1));
+ MMIO_D(GEN8_GT_IIR(1));
+ MMIO_D(GEN8_GT_ISR(1));
+ MMIO_D(GEN8_GT_IMR(2));
+ MMIO_D(GEN8_GT_IER(2));
+ MMIO_D(GEN8_GT_IIR(2));
+ MMIO_D(GEN8_GT_ISR(2));
+ MMIO_D(GEN8_GT_IMR(3));
+ MMIO_D(GEN8_GT_IER(3));
+ MMIO_D(GEN8_GT_IIR(3));
+ MMIO_D(GEN8_GT_ISR(3));
+ MMIO_D(GEN8_DE_PIPE_IMR(PIPE_A));
+ MMIO_D(GEN8_DE_PIPE_IER(PIPE_A));
+ MMIO_D(GEN8_DE_PIPE_IIR(PIPE_A));
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_A));
+ MMIO_D(GEN8_DE_PIPE_IMR(PIPE_B));
+ MMIO_D(GEN8_DE_PIPE_IER(PIPE_B));
+ MMIO_D(GEN8_DE_PIPE_IIR(PIPE_B));
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_B));
+ MMIO_D(GEN8_DE_PIPE_IMR(PIPE_C));
+ MMIO_D(GEN8_DE_PIPE_IER(PIPE_C));
+ MMIO_D(GEN8_DE_PIPE_IIR(PIPE_C));
+ MMIO_D(GEN8_DE_PIPE_ISR(PIPE_C));
+ MMIO_D(GEN8_DE_PORT_IMR);
+ MMIO_D(GEN8_DE_PORT_IER);
+ MMIO_D(GEN8_DE_PORT_IIR);
+ MMIO_D(GEN8_DE_PORT_ISR);
+ MMIO_D(GEN8_DE_MISC_IMR);
+ MMIO_D(GEN8_DE_MISC_IER);
+ MMIO_D(GEN8_DE_MISC_IIR);
+ MMIO_D(GEN8_DE_MISC_ISR);
+ MMIO_D(GEN8_PCU_IMR);
+ MMIO_D(GEN8_PCU_IER);
+ MMIO_D(GEN8_PCU_IIR);
+ MMIO_D(GEN8_PCU_ISR);
+ MMIO_D(GEN8_MASTER_IRQ);
+ MMIO_RING_D(RING_ACTHD_UDW);
+
+#define RING_REG(base) _MMIO((base) + 0xd0)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+
+#define RING_REG(base) _MMIO((base) + 0x230)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+
+#define RING_REG(base) _MMIO((base) + 0x234)
+ MMIO_RING_F(RING_REG, 8);
+#undef RING_REG
+
+#define RING_REG(base) _MMIO((base) + 0x244)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+
+#define RING_REG(base) _MMIO((base) + 0x370)
+ MMIO_RING_F(RING_REG, 48);
+#undef RING_REG
+
+#define RING_REG(base) _MMIO((base) + 0x3a0)
+ MMIO_RING_D(RING_REG);
+#undef RING_REG
+
+ MMIO_D(PIPEMISC(PIPE_A));
+ MMIO_D(PIPEMISC(PIPE_B));
+ MMIO_D(PIPEMISC(PIPE_C));
+ MMIO_D(_MMIO(0x1c1d0));
+ MMIO_D(GEN6_MBCUNIT_SNPCR);
+ MMIO_D(GEN7_MISCCPCTL);
+ MMIO_D(_MMIO(0x1c054));
+ MMIO_D(GEN6_PCODE_MAILBOX);
+ if (!IS_BROXTON(dev_priv))
+ MMIO_D(GEN8_PRIVATE_PAT_LO);
+ MMIO_D(GEN8_PRIVATE_PAT_HI);
+ MMIO_D(GAMTARBMODE);
+
+#define RING_REG(base) _MMIO((base) + 0x270)
+ MMIO_RING_F(RING_REG, 32);
+#undef RING_REG
+
+ MMIO_RING_D(RING_HWS_PGA);
+ MMIO_D(HDC_CHICKEN0);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_A));
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_B));
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_C));
+ MMIO_D(_MMIO(0x6671c));
+ MMIO_D(_MMIO(0x66c00));
+ MMIO_D(_MMIO(0x66c04));
+ MMIO_D(HSW_GTT_CACHE_EN);
+ MMIO_D(GEN8_EU_DISABLE0);
+ MMIO_D(GEN8_EU_DISABLE1);
+ MMIO_D(GEN8_EU_DISABLE2);
+ MMIO_D(_MMIO(0xfdc));
+ MMIO_D(GEN8_ROW_CHICKEN);
+ MMIO_D(GEN7_ROW_CHICKEN2);
+ MMIO_D(GEN8_UCGCTL6);
+ MMIO_D(GEN8_L3SQCREG4);
+ MMIO_D(GEN9_SCRATCH_LNCF1);
+ MMIO_F(_MMIO(0x24d0), 48);
+ MMIO_D(_MMIO(0x44484));
+ MMIO_D(_MMIO(0x4448c));
+ MMIO_D(GEN8_L3_LRA_1_GPGPU);
+ MMIO_D(_MMIO(0x110000));
+ MMIO_D(_MMIO(0x48400));
+ MMIO_D(_MMIO(0x6e570));
+ MMIO_D(_MMIO(0x65f10));
+ MMIO_D(_MMIO(0xe194));
+ MMIO_D(_MMIO(0xe188));
+ MMIO_D(HALF_SLICE_CHICKEN2);
+ MMIO_D(_MMIO(0x2580));
+ MMIO_D(_MMIO(0xe220));
+ MMIO_D(_MMIO(0xe230));
+ MMIO_D(_MMIO(0xe240));
+ MMIO_D(_MMIO(0xe260));
+ MMIO_D(_MMIO(0xe270));
+ MMIO_D(_MMIO(0xe280));
+ MMIO_D(_MMIO(0xe2a0));
+ MMIO_D(_MMIO(0xe2b0));
+ MMIO_D(_MMIO(0xe2c0));
+ MMIO_D(_MMIO(0x21f0));
+ MMIO_D(GEN8_GAMW_ECO_DEV_RW_IA);
+ MMIO_D(_MMIO(0x215c));
+ MMIO_F(_MMIO(0x2290), 8);
+ MMIO_D(_MMIO(0x2b00));
+ MMIO_D(_MMIO(0x2360));
+ MMIO_D(_MMIO(0x1c17c));
+ MMIO_D(_MMIO(0x1c178));
+ MMIO_D(_MMIO(0x4260));
+ MMIO_D(_MMIO(0x4264));
+ MMIO_D(_MMIO(0x4268));
+ MMIO_D(_MMIO(0x426c));
+ MMIO_D(_MMIO(0x4270));
+ MMIO_D(_MMIO(0x4094));
+ MMIO_D(_MMIO(0x22178));
+ MMIO_D(_MMIO(0x1a178));
+ MMIO_D(_MMIO(0x1a17c));
+ MMIO_D(_MMIO(0x2217c));
+ MMIO_D(EDP_PSR_IMR);
+ MMIO_D(EDP_PSR_IIR);
+ MMIO_D(_MMIO(0xe4cc));
+ MMIO_D(GEN7_SC_INSTDONE);
+
+ return 0;
+}
+
+static int iterate_pre_skl_mmio(struct intel_gvt_mmio_table_iter *iter)
+{
+ MMIO_D(FORCEWAKE_MT);
+
+ MMIO_D(PCH_ADPA);
+ MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4);
+ MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4);
+ MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4);
+
+ MMIO_F(_MMIO(0x70440), 0xc);
+ MMIO_F(_MMIO(0x71440), 0xc);
+ MMIO_F(_MMIO(0x72440), 0xc);
+ MMIO_F(_MMIO(0x7044c), 0xc);
+ MMIO_F(_MMIO(0x7144c), 0xc);
+ MMIO_F(_MMIO(0x7244c), 0xc);
+
+ return 0;
+}
+
+static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
+{
+ struct drm_i915_private *dev_priv = iter->i915;
+
+ MMIO_D(FORCEWAKE_RENDER_GEN9);
+ MMIO_D(FORCEWAKE_ACK_RENDER_GEN9);
+ MMIO_D(FORCEWAKE_GT_GEN9);
+ MMIO_D(FORCEWAKE_ACK_GT_GEN9);
+ MMIO_D(FORCEWAKE_MEDIA_GEN9);
+ MMIO_D(FORCEWAKE_ACK_MEDIA_GEN9);
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4);
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4);
+ MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4);
+ MMIO_D(HSW_PWR_WELL_CTL1);
+ MMIO_D(HSW_PWR_WELL_CTL2);
+ MMIO_D(DBUF_CTL_S(0));
+ MMIO_D(GEN9_PG_ENABLE);
+ MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS);
+ MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS);
+ MMIO_D(GEN9_GAMT_ECO_REG_RW_IA);
+ MMIO_D(MMCD_MISC_CTRL);
+ MMIO_D(CHICKEN_PAR1_1);
+ MMIO_D(DC_STATE_EN);
+ MMIO_D(DC_STATE_DEBUG);
+ MMIO_D(CDCLK_CTL);
+ MMIO_D(LCPLL1_CTL);
+ MMIO_D(LCPLL2_CTL);
+ MMIO_D(_MMIO(_DPLL1_CFGCR1));
+ MMIO_D(_MMIO(_DPLL2_CFGCR1));
+ MMIO_D(_MMIO(_DPLL3_CFGCR1));
+ MMIO_D(_MMIO(_DPLL1_CFGCR2));
+ MMIO_D(_MMIO(_DPLL2_CFGCR2));
+ MMIO_D(_MMIO(_DPLL3_CFGCR2));
+ MMIO_D(DPLL_CTRL1);
+ MMIO_D(DPLL_CTRL2);
+ MMIO_D(DPLL_STATUS);
+ MMIO_D(SKL_PS_WIN_POS(PIPE_A, 0));
+ MMIO_D(SKL_PS_WIN_POS(PIPE_A, 1));
+ MMIO_D(SKL_PS_WIN_POS(PIPE_B, 0));
+ MMIO_D(SKL_PS_WIN_POS(PIPE_B, 1));
+ MMIO_D(SKL_PS_WIN_POS(PIPE_C, 0));
+ MMIO_D(SKL_PS_WIN_POS(PIPE_C, 1));
+ MMIO_D(SKL_PS_WIN_SZ(PIPE_A, 0));
+ MMIO_D(SKL_PS_WIN_SZ(PIPE_A, 1));
+ MMIO_D(SKL_PS_WIN_SZ(PIPE_B, 0));
+ MMIO_D(SKL_PS_WIN_SZ(PIPE_B, 1));
+ MMIO_D(SKL_PS_WIN_SZ(PIPE_C, 0));
+ MMIO_D(SKL_PS_WIN_SZ(PIPE_C, 1));
+ MMIO_D(SKL_PS_CTRL(PIPE_A, 0));
+ MMIO_D(SKL_PS_CTRL(PIPE_A, 1));
+ MMIO_D(SKL_PS_CTRL(PIPE_B, 0));
+ MMIO_D(SKL_PS_CTRL(PIPE_B, 1));
+ MMIO_D(SKL_PS_CTRL(PIPE_C, 0));
+ MMIO_D(SKL_PS_CTRL(PIPE_C, 1));
+ MMIO_D(PLANE_BUF_CFG(PIPE_A, 0));
+ MMIO_D(PLANE_BUF_CFG(PIPE_A, 1));
+ MMIO_D(PLANE_BUF_CFG(PIPE_A, 2));
+ MMIO_D(PLANE_BUF_CFG(PIPE_A, 3));
+ MMIO_D(PLANE_BUF_CFG(PIPE_B, 0));
+ MMIO_D(PLANE_BUF_CFG(PIPE_B, 1));
+ MMIO_D(PLANE_BUF_CFG(PIPE_B, 2));
+ MMIO_D(PLANE_BUF_CFG(PIPE_B, 3));
+ MMIO_D(PLANE_BUF_CFG(PIPE_C, 0));
+ MMIO_D(PLANE_BUF_CFG(PIPE_C, 1));
+ MMIO_D(PLANE_BUF_CFG(PIPE_C, 2));
+ MMIO_D(PLANE_BUF_CFG(PIPE_C, 3));
+ MMIO_D(CUR_BUF_CFG(PIPE_A));
+ MMIO_D(CUR_BUF_CFG(PIPE_B));
+ MMIO_D(CUR_BUF_CFG(PIPE_C));
+ MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8);
+ MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8);
+ MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8);
+ MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8);
+ MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8);
+ MMIO_D(PLANE_WM_TRANS(PIPE_A, 0));
+ MMIO_D(PLANE_WM_TRANS(PIPE_A, 1));
+ MMIO_D(PLANE_WM_TRANS(PIPE_A, 2));
+ MMIO_D(PLANE_WM_TRANS(PIPE_B, 0));
+ MMIO_D(PLANE_WM_TRANS(PIPE_B, 1));
+ MMIO_D(PLANE_WM_TRANS(PIPE_B, 2));
+ MMIO_D(PLANE_WM_TRANS(PIPE_C, 0));
+ MMIO_D(PLANE_WM_TRANS(PIPE_C, 1));
+ MMIO_D(PLANE_WM_TRANS(PIPE_C, 2));
+ MMIO_D(CUR_WM_TRANS(PIPE_A));
+ MMIO_D(CUR_WM_TRANS(PIPE_B));
+ MMIO_D(CUR_WM_TRANS(PIPE_C));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 0));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 1));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 2));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_A, 3));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 0));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 1));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 2));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_B, 3));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 0));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 1));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 2));
+ MMIO_D(PLANE_NV12_BUF_CFG(PIPE_C, 3));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_A, 1)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_A, 2)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_A, 3)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_A, 4)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_B, 1)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_B, 2)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_B, 3)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_B, 4)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_C, 1)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_C, 2)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_C, 3)));
+ MMIO_D(_MMIO(_REG_701C0(PIPE_C, 4)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_A, 1)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_A, 2)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_A, 3)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_A, 4)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_B, 1)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_B, 2)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_B, 3)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_B, 4)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_C, 1)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_C, 2)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_C, 3)));
+ MMIO_D(_MMIO(_REG_701C4(PIPE_C, 4)));
+ MMIO_D(_MMIO(_PLANE_CTL_3_A));
+ MMIO_D(_MMIO(_PLANE_CTL_3_B));
+ MMIO_D(_MMIO(0x72380));
+ MMIO_D(_MMIO(0x7239c));
+ MMIO_D(_MMIO(_PLANE_SURF_3_A));
+ MMIO_D(_MMIO(_PLANE_SURF_3_B));
+ MMIO_D(DMC_SSP_BASE);
+ MMIO_D(DMC_HTP_SKL);
+ MMIO_D(DMC_LAST_WRITE);
+ MMIO_D(BDW_SCRATCH1);
+ MMIO_D(SKL_DFSM);
+ MMIO_D(DISPIO_CR_TX_BMU_CR0);
+ MMIO_F(GEN9_GFX_MOCS(0), 0x7f8);
+ MMIO_F(GEN7_L3CNTLREG2, 0x80);
+ MMIO_D(RPM_CONFIG0);
+ MMIO_D(_MMIO(0xd08));
+ MMIO_D(RC6_LOCATION);
+ MMIO_D(GEN7_FF_SLICE_CS_CHICKEN1);
+ MMIO_D(GEN9_CS_DEBUG_MODE1);
+ /* TRTT */
+ MMIO_D(TRVATTL3PTRDW(0));
+ MMIO_D(TRVATTL3PTRDW(1));
+ MMIO_D(TRVATTL3PTRDW(2));
+ MMIO_D(TRVATTL3PTRDW(3));
+ MMIO_D(TRVADR);
+ MMIO_D(TRTTE);
+ MMIO_D(_MMIO(0x4dfc));
+ MMIO_D(_MMIO(0x46430));
+ MMIO_D(_MMIO(0x46520));
+ MMIO_D(_MMIO(0xc403c));
+ MMIO_D(GEN8_GARBCNTL);
+ MMIO_D(DMA_CTRL);
+ MMIO_D(_MMIO(0x65900));
+ MMIO_D(GEN6_STOLEN_RESERVED);
+ MMIO_D(_MMIO(0x4068));
+ MMIO_D(_MMIO(0x67054));
+ MMIO_D(_MMIO(0x6e560));
+ MMIO_D(_MMIO(0x6e554));
+ MMIO_D(_MMIO(0x2b20));
+ MMIO_D(_MMIO(0x65f00));
+ MMIO_D(_MMIO(0x65f08));
+ MMIO_D(_MMIO(0x320f0));
+ MMIO_D(_MMIO(0x70034));
+ MMIO_D(_MMIO(0x71034));
+ MMIO_D(_MMIO(0x72034));
+ MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)));
+ MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)));
+ MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)));
+ MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)));
+ MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)));
+ MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)));
+ MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)));
+ MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)));
+ MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)));
+ MMIO_D(_MMIO(0x44500));
+#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
+ MMIO_RING_D(CSFE_CHICKEN1_REG);
+#undef CSFE_CHICKEN1_REG
+ MMIO_D(GEN8_HDC_CHICKEN1);
+ MMIO_D(GEN9_WM_CHICKEN3);
+
+ if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ MMIO_D(GAMT_CHKN_BIT_REG);
+ if (!IS_BROXTON(dev_priv))
+ MMIO_D(GEN9_CTX_PREEMPT_REG);
+ MMIO_F(_MMIO(DMC_MMIO_START_RANGE), 0x3000);
+ return 0;
+}
+
+static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
+{
+ struct drm_i915_private *dev_priv = iter->i915;
+
+ MMIO_F(_MMIO(0x80000), 0x3000);
+ MMIO_D(GEN7_SAMPLER_INSTDONE);
+ MMIO_D(GEN7_ROW_INSTDONE);
+ MMIO_D(GEN8_FAULT_TLB_DATA0);
+ MMIO_D(GEN8_FAULT_TLB_DATA1);
+ MMIO_D(ERROR_GEN6);
+ MMIO_D(DONE_REG);
+ MMIO_D(EIR);
+ MMIO_D(PGTBL_ER);
+ MMIO_D(_MMIO(0x4194));
+ MMIO_D(_MMIO(0x4294));
+ MMIO_D(_MMIO(0x4494));
+ MMIO_RING_D(RING_PSMI_CTL);
+ MMIO_RING_D(RING_DMA_FADD);
+ MMIO_RING_D(RING_DMA_FADD_UDW);
+ MMIO_RING_D(RING_IPEHR);
+ MMIO_RING_D(RING_INSTPS);
+ MMIO_RING_D(RING_BBADDR_UDW);
+ MMIO_RING_D(RING_BBSTATE);
+ MMIO_RING_D(RING_IPEIR);
+ MMIO_F(SOFT_SCRATCH(0), 16 * 4);
+ MMIO_D(BXT_P_CR_GT_DISP_PWRON);
+ MMIO_D(BXT_RP_STATE_CAP);
+ MMIO_D(BXT_PHY_CTL_FAMILY(DPIO_PHY0));
+ MMIO_D(BXT_PHY_CTL_FAMILY(DPIO_PHY1));
+ MMIO_D(BXT_PHY_CTL(PORT_A));
+ MMIO_D(BXT_PHY_CTL(PORT_B));
+ MMIO_D(BXT_PHY_CTL(PORT_C));
+ MMIO_D(BXT_PORT_PLL_ENABLE(PORT_A));
+ MMIO_D(BXT_PORT_PLL_ENABLE(PORT_B));
+ MMIO_D(BXT_PORT_PLL_ENABLE(PORT_C));
+ MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0));
+ MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0));
+ MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0));
+ MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0));
+ MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0));
+ MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0));
+ MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0));
+ MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0));
+ MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0));
+ MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1));
+ MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1));
+ MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1));
+ MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1));
+ MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1));
+ MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1));
+ MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1));
+ MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1));
+ MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1));
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10));
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10));
+ MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2));
+ MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9));
+ MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10));
+ MMIO_D(BXT_DE_PLL_CTL);
+ MMIO_D(BXT_DE_PLL_ENABLE);
+ MMIO_D(BXT_DSI_PLL_CTL);
+ MMIO_D(BXT_DSI_PLL_ENABLE);
+ MMIO_D(GEN9_CLKGATE_DIS_0);
+ MMIO_D(GEN9_CLKGATE_DIS_4);
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A));
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B));
+ MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C));
+ MMIO_D(RC6_CTX_BASE);
+ MMIO_D(GEN8_PUSHBUS_CONTROL);
+ MMIO_D(GEN8_PUSHBUS_ENABLE);
+ MMIO_D(GEN8_PUSHBUS_SHIFT);
+ MMIO_D(GEN6_GFXPAUSE);
+ MMIO_D(GEN8_L3SQCREG1);
+ MMIO_D(GEN8_L3CNTLREG);
+ MMIO_D(_MMIO(0x20D8));
+ MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40);
+ MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40);
+ MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40);
+ MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40);
+ MMIO_D(GEN9_CTX_PREEMPT_REG);
+ MMIO_D(GEN8_PRIVATE_PAT_LO);
+
+ return 0;
+}
+
+/**
+ * intel_gvt_iterate_mmio_table - Iterate the GVT MMIO table
+ * @iter: the interator
+ *
+ * This function is called for iterating the GVT MMIO table when i915 is
+ * taking the snapshot of the HW and GVT is building MMIO tracking table.
+ */
+int intel_gvt_iterate_mmio_table(struct intel_gvt_mmio_table_iter *iter)
+{
+ struct drm_i915_private *i915 = iter->i915;
+ int ret;
+
+ ret = iterate_generic_mmio(iter);
+ if (ret)
+ goto err;
+
+ if (IS_BROADWELL(i915)) {
+ ret = iterate_bdw_only_mmio(iter);
+ if (ret)
+ goto err;
+ ret = iterate_bdw_plus_mmio(iter);
+ if (ret)
+ goto err;
+ ret = iterate_pre_skl_mmio(iter);
+ if (ret)
+ goto err;
+ } else if (IS_SKYLAKE(i915) ||
+ IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915) ||
+ IS_COMETLAKE(i915)) {
+ ret = iterate_bdw_plus_mmio(iter);
+ if (ret)
+ goto err;
+ ret = iterate_skl_plus_mmio(iter);
+ if (ret)
+ goto err;
+ } else if (IS_BROXTON(i915)) {
+ ret = iterate_bdw_plus_mmio(iter);
+ if (ret)
+ goto err;
+ ret = iterate_skl_plus_mmio(iter);
+ if (ret)
+ goto err;
+ ret = iterate_bxt_mmio(iter);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(intel_gvt_iterate_mmio_table, I915_GVT);
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
index ac727546868e..a234d9b4ed14 100644
--- a/drivers/gpu/drm/i915/intel_pcode.c
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -52,14 +52,12 @@ static int gen7_check_mailbox_status(u32 mbox)
}
}
-static int __snb_pcode_rw(struct drm_i915_private *i915, u32 mbox,
+static int __snb_pcode_rw(struct intel_uncore *uncore, u32 mbox,
u32 *val, u32 *val1,
int fast_timeout_us, int slow_timeout_ms,
bool is_read)
{
- struct intel_uncore *uncore = &i915->uncore;
-
- lockdep_assert_held(&i915->sb_lock);
+ lockdep_assert_held(&uncore->i915->sb_lock);
/*
* GEN6_PCODE_* are outside of the forcewake domain, we can use
@@ -88,22 +86,22 @@ static int __snb_pcode_rw(struct drm_i915_private *i915, u32 mbox,
if (is_read && val1)
*val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
- if (GRAPHICS_VER(i915) > 6)
+ if (GRAPHICS_VER(uncore->i915) > 6)
return gen7_check_mailbox_status(mbox);
else
return gen6_check_mailbox_status(mbox);
}
-int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1)
+int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
{
int err;
- mutex_lock(&i915->sb_lock);
- err = __snb_pcode_rw(i915, mbox, val, val1, 500, 20, true);
- mutex_unlock(&i915->sb_lock);
+ mutex_lock(&uncore->i915->sb_lock);
+ err = __snb_pcode_rw(uncore, mbox, val, val1, 500, 20, true);
+ mutex_unlock(&uncore->i915->sb_lock);
if (err) {
- drm_dbg(&i915->drm,
+ drm_dbg(&uncore->i915->drm,
"warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
mbox, __builtin_return_address(0), err);
}
@@ -111,18 +109,18 @@ int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1)
return err;
}
-int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val,
+int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
int fast_timeout_us, int slow_timeout_ms)
{
int err;
- mutex_lock(&i915->sb_lock);
- err = __snb_pcode_rw(i915, mbox, &val, NULL,
+ mutex_lock(&uncore->i915->sb_lock);
+ err = __snb_pcode_rw(uncore, mbox, &val, NULL,
fast_timeout_us, slow_timeout_ms, false);
- mutex_unlock(&i915->sb_lock);
+ mutex_unlock(&uncore->i915->sb_lock);
if (err) {
- drm_dbg(&i915->drm,
+ drm_dbg(&uncore->i915->drm,
"warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
val, mbox, __builtin_return_address(0), err);
}
@@ -130,18 +128,18 @@ int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val,
return err;
}
-static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
+static bool skl_pcode_try_request(struct intel_uncore *uncore, u32 mbox,
u32 request, u32 reply_mask, u32 reply,
u32 *status)
{
- *status = __snb_pcode_rw(i915, mbox, &request, NULL, 500, 0, true);
+ *status = __snb_pcode_rw(uncore, mbox, &request, NULL, 500, 0, true);
return (*status == 0) && ((request & reply_mask) == reply);
}
/**
* skl_pcode_request - send PCODE request until acknowledgment
- * @i915: device private
+ * @uncore: uncore
* @mbox: PCODE mailbox ID the request is targeted for
* @request: request ID
* @reply_mask: mask used to check for request acknowledgment
@@ -158,16 +156,16 @@ static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
* Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
* other error as reported by PCODE.
*/
-int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms)
{
u32 status;
int ret;
- mutex_lock(&i915->sb_lock);
+ mutex_lock(&uncore->i915->sb_lock);
#define COND \
- skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
+ skl_pcode_try_request(uncore, mbox, request, reply_mask, reply, &status)
/*
* Prime the PCODE by doing a request first. Normally it guarantees
@@ -193,35 +191,58 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
* requests, and for any quirks of the PCODE firmware that delays
* the request completion.
*/
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(&uncore->i915->drm,
"PCODE timeout, retrying with preemption disabled\n");
- drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3);
+ drm_WARN_ON_ONCE(&uncore->i915->drm, timeout_base_ms > 3);
preempt_disable();
ret = wait_for_atomic(COND, 50);
preempt_enable();
out:
- mutex_unlock(&i915->sb_lock);
+ mutex_unlock(&uncore->i915->sb_lock);
return status ? status : ret;
#undef COND
}
-int intel_pcode_init(struct drm_i915_private *i915)
+int intel_pcode_init(struct intel_uncore *uncore)
{
- int ret = 0;
+ if (!IS_DGFX(uncore->i915))
+ return 0;
+
+ return skl_pcode_request(uncore, DG1_PCODE_STATUS,
+ DG1_UNCORE_GET_INIT_STATUS,
+ DG1_UNCORE_INIT_STATUS_COMPLETE,
+ DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
+}
+
+int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val)
+{
+ intel_wakeref_t wakeref;
+ u32 mbox;
+ int err;
- if (!IS_DGFX(i915))
- return ret;
+ mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
+ | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
+ | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
- ret = skl_pcode_request(i915, DG1_PCODE_STATUS,
- DG1_UNCORE_GET_INIT_STATUS,
- DG1_UNCORE_INIT_STATUS_COMPLETE,
- DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
+ with_intel_runtime_pm(uncore->rpm, wakeref)
+ err = snb_pcode_read(uncore, mbox, val, NULL);
- drm_dbg(&i915->drm, "PCODE init status %d\n", ret);
+ return err;
+}
- if (ret)
- drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n");
+int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val)
+{
+ intel_wakeref_t wakeref;
+ u32 mbox;
+ int err;
- return ret;
+ mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
+ | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
+ | REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
+
+ with_intel_runtime_pm(uncore->rpm, wakeref)
+ err = snb_pcode_write(uncore, mbox, val);
+
+ return err;
}
diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h
index 0962a17fac48..8d2198e29422 100644
--- a/drivers/gpu/drm/i915/intel_pcode.h
+++ b/drivers/gpu/drm/i915/intel_pcode.h
@@ -8,17 +8,23 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_uncore;
-int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1);
-int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val,
+int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1);
+int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
int fast_timeout_us, int slow_timeout_ms);
-#define snb_pcode_write(i915, mbox, val) \
- snb_pcode_write_timeout(i915, mbox, val, 500, 0)
+#define snb_pcode_write(uncore, mbox, val) \
+ snb_pcode_write_timeout(uncore, mbox, val, 500, 0)
-int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms);
-int intel_pcode_init(struct drm_i915_private *i915);
+int intel_pcode_init(struct intel_uncore *uncore);
+
+/*
+ * Helpers for dGfx PCODE mailbox command formatting
+ */
+int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val);
+int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val);
#endif /* _INTEL_PCODE_H */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b3a5c3f5ce14..42db41c8e3b3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2874,7 +2874,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
- ret = snb_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY,
+ ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY,
&val, NULL);
if (ret) {
@@ -2893,7 +2893,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
- ret = snb_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY,
+ ret = snb_pcode_read(&dev_priv->uncore, GEN9_PCODE_READ_MEM_LATENCY,
&val, NULL);
if (ret) {
drm_err(&dev_priv->drm,
@@ -3679,7 +3679,7 @@ intel_sagv_block_time(struct drm_i915_private *dev_priv)
u32 val = 0;
int ret;
- ret = snb_pcode_read(dev_priv,
+ ret = snb_pcode_read(&dev_priv->uncore,
GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
&val, NULL);
if (ret) {
@@ -3748,7 +3748,7 @@ static void skl_sagv_enable(struct drm_i915_private *dev_priv)
return;
drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n");
- ret = snb_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+ ret = snb_pcode_write(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_ENABLE);
/* We don't need to wait for SAGV when enabling */
@@ -3781,7 +3781,7 @@ static void skl_sagv_disable(struct drm_i915_private *dev_priv)
drm_dbg_kms(&dev_priv->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
- ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
+ ret = skl_pcode_request(&dev_priv->uncore, GEN9_PCODE_SAGV_CONTROL,
GEN9_SAGV_DISABLE,
GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
1);
@@ -5475,6 +5475,25 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
}
blocks = fixed16_to_u32_round_up(selected_result) + 1;
+ /*
+ * Lets have blocks at minimum equivalent to plane_blocks_per_line
+ * as there will be at minimum one line for lines configuration. This
+ * is a work around for FIFO underruns observed with resolutions like
+ * 4k 60 Hz in single channel DRAM configurations.
+ *
+ * As per the Bspec 49325, if the ddb allocation can hold at least
+ * one plane_blocks_per_line, we should have selected method2 in
+ * the above logic. Assuming that modern versions have enough dbuf
+ * and method2 guarantees blocks equivalent to at least 1 line,
+ * select the blocks as plane_blocks_per_line.
+ *
+ * TODO: Revisit the logic when we have better understanding on DRAM
+ * channels' impact on the level 0 memory latency and the relevant
+ * wm calculations.
+ */
+ if (skl_wm_has_lines(dev_priv, level))
+ blocks = max(blocks,
+ fixed16_to_u32_round_up(wp->plane_blocks_per_line));
lines = div_round_up_fixed16(selected_result,
wp->plane_blocks_per_line);
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 6114e013092b..73eb53edb8de 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -1056,7 +1056,8 @@ static int igt_lmem_write_cpu(void *arg)
obj->mm.pages->sgl, I915_CACHE_NONE,
true, 0xdeadbeaf, &rq);
if (rq) {
- dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
+ dma_resv_add_fence(obj->base.resv, &rq->fence,
+ DMA_RESV_USAGE_WRITE);
i915_request_put(rq);
}
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index 001f59fb06d5..090830bcbde7 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -24,4 +24,13 @@ config DRM_INGENIC_IPU
The Image Processing Unit (IPU) will appear as a second primary plane.
+config DRM_INGENIC_DW_HDMI
+ tristate "Ingenic specific support for Synopsys DW HDMI"
+ depends on MACH_JZ4780
+ select DRM_DW_HDMI
+ help
+ Choose this option to enable Synopsys DesignWare HDMI based driver.
+ If you want to enable HDMI on Ingenic JZ4780 based SoC, you should
+ select this option.
+
endif
diff --git a/drivers/gpu/drm/ingenic/Makefile b/drivers/gpu/drm/ingenic/Makefile
index d313326bdddb..f10cc1c5a5f2 100644
--- a/drivers/gpu/drm/ingenic/Makefile
+++ b/drivers/gpu/drm/ingenic/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_DRM_INGENIC) += ingenic-drm.o
ingenic-drm-y = ingenic-drm-drv.o
ingenic-drm-$(CONFIG_DRM_INGENIC_IPU) += ingenic-ipu.o
+obj-$(CONFIG_DRM_INGENIC_DW_HDMI) += ingenic-dw-hdmi.o
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index a4f5a323f490..8eb0ad501a7b 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -833,6 +833,32 @@ static int ingenic_drm_bridge_atomic_check(struct drm_bridge *bridge,
}
}
+static u32 *
+ingenic_drm_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ switch (output_fmt) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ case MEDIA_BUS_FMT_RGB888_3X8:
+ case MEDIA_BUS_FMT_RGB888_3X8_DELTA:
+ break;
+ default:
+ *num_input_fmts = 0;
+ return NULL;
+ }
+
+ return drm_atomic_helper_bridge_propagate_bus_fmt(bridge, bridge_state,
+ crtc_state, conn_state,
+ output_fmt,
+ num_input_fmts);
+}
+
static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg)
{
struct ingenic_drm *priv = drm_device_get_priv(arg);
@@ -984,7 +1010,7 @@ static const struct drm_bridge_funcs ingenic_drm_bridge_funcs = {
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
- .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt,
+ .atomic_get_input_bus_fmts = ingenic_drm_bridge_atomic_get_input_bus_fmts,
};
static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/ingenic/ingenic-dw-hdmi.c b/drivers/gpu/drm/ingenic/ingenic-dw-hdmi.c
new file mode 100644
index 000000000000..72f8b44998a5
--- /dev/null
+++ b/drivers/gpu/drm/ingenic/ingenic-dw-hdmi.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2019, 2020 Paul Boddie <paul@boddie.org.uk>
+ *
+ * Derived from dw_hdmi-imx.c with i.MX portions removed.
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+
+static const struct dw_hdmi_mpll_config ingenic_mpll_cfg[] = {
+ { 45250000, { { 0x01e0, 0x0000 }, { 0x21e1, 0x0000 }, { 0x41e2, 0x0000 } } },
+ { 92500000, { { 0x0140, 0x0005 }, { 0x2141, 0x0005 }, { 0x4142, 0x0005 } } },
+ { 148500000, { { 0x00a0, 0x000a }, { 0x20a1, 0x000a }, { 0x40a2, 0x000a } } },
+ { 216000000, { { 0x00a0, 0x000a }, { 0x2001, 0x000f }, { 0x4002, 0x000f } } },
+ { ~0UL, { { 0x0000, 0x0000 }, { 0x0000, 0x0000 }, { 0x0000, 0x0000 } } }
+};
+
+static const struct dw_hdmi_curr_ctrl ingenic_cur_ctr[] = {
+ /*pixelclk bpp8 bpp10 bpp12 */
+ { 54000000, { 0x091c, 0x091c, 0x06dc } },
+ { 58400000, { 0x091c, 0x06dc, 0x06dc } },
+ { 72000000, { 0x06dc, 0x06dc, 0x091c } },
+ { 74250000, { 0x06dc, 0x0b5c, 0x091c } },
+ { 118800000, { 0x091c, 0x091c, 0x06dc } },
+ { 216000000, { 0x06dc, 0x0b5c, 0x091c } },
+ { ~0UL, { 0x0000, 0x0000, 0x0000 } },
+};
+
+/*
+ * Resistance term 133Ohm Cfg
+ * PREEMP config 0.00
+ * TX/CK level 10
+ */
+static const struct dw_hdmi_phy_config ingenic_phy_config[] = {
+ /*pixelclk symbol term vlev */
+ { 216000000, 0x800d, 0x0005, 0x01ad},
+ { ~0UL, 0x0000, 0x0000, 0x0000}
+};
+
+static enum drm_mode_status
+ingenic_dw_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ if (mode->clock < 13500)
+ return MODE_CLOCK_LOW;
+ /* FIXME: Hardware is capable of 270MHz, but setup data is missing. */
+ if (mode->clock > 216000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static struct dw_hdmi_plat_data ingenic_dw_hdmi_plat_data = {
+ .mpll_cfg = ingenic_mpll_cfg,
+ .cur_ctr = ingenic_cur_ctr,
+ .phy_config = ingenic_phy_config,
+ .mode_valid = ingenic_dw_hdmi_mode_valid,
+ .output_port = 1,
+};
+
+static const struct of_device_id ingenic_dw_hdmi_dt_ids[] = {
+ { .compatible = "ingenic,jz4780-dw-hdmi" },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ingenic_dw_hdmi_dt_ids);
+
+static void ingenic_dw_hdmi_cleanup(void *data)
+{
+ struct dw_hdmi *hdmi = (struct dw_hdmi *)data;
+
+ dw_hdmi_remove(hdmi);
+}
+
+static int ingenic_dw_hdmi_probe(struct platform_device *pdev)
+{
+ struct dw_hdmi *hdmi;
+
+ hdmi = dw_hdmi_probe(pdev, &ingenic_dw_hdmi_plat_data);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
+
+ return devm_add_action_or_reset(&pdev->dev, ingenic_dw_hdmi_cleanup, hdmi);
+}
+
+static struct platform_driver ingenic_dw_hdmi_driver = {
+ .probe = ingenic_dw_hdmi_probe,
+ .driver = {
+ .name = "dw-hdmi-ingenic",
+ .of_match_table = ingenic_dw_hdmi_dt_ids,
+ },
+};
+module_platform_driver(ingenic_dw_hdmi_driver);
+
+MODULE_DESCRIPTION("JZ4780 Specific DW-HDMI Driver Extension");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dw-hdmi-ingenic");
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index e0a11ee0e86d..0f1ca0b0db49 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -364,10 +364,9 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
fence = lima_sched_context_queue_task(submit->task);
for (i = 0; i < submit->nr_bos; i++) {
- if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence);
- else
- dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence);
+ dma_resv_add_fence(lima_bo_resv(bos[i]), fence,
+ submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
}
drm_gem_unlock_reservations((struct drm_gem_object **)bos,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 02b9ae65a96a..01bbb5f2d462 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -848,7 +848,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
- ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
+ ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
+ true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
else if (ret < 0)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3164db8be893..8d1eef914ba8 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -395,9 +395,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
struct drm_gem_object *obj = &submit->bos[i].obj->base;
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(obj->resv, submit->user_fence);
+ dma_resv_add_fence(obj->resv, submit->user_fence,
+ DMA_RESV_USAGE_WRITE);
else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
- dma_resv_add_shared_fence(obj->resv, submit->user_fence);
+ dma_resv_add_fence(obj->resv, submit->user_fence,
+ DMA_RESV_USAGE_READ);
}
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base917c.c b/drivers/gpu/drm/nouveau/dispnv50/base917c.c
index a1baed4fe0e9..ca260509a4f1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base917c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base917c.c
@@ -22,7 +22,7 @@
#include "base.h"
#include "atom.h"
-const u32
+static const u32
base917c_format[] = {
DRM_FORMAT_C8,
DRM_FORMAT_XRGB8888,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index e2faf92e4831..8642b84ea20c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -558,7 +558,8 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
asyw->image.handle[0] = ctxdma->object.handle;
}
- ret = dma_resv_get_singleton(nvbo->bo.base.resv, false,
+ ret = dma_resv_get_singleton(nvbo->bo.base.resv,
+ DMA_RESV_USAGE_WRITE,
&asyw->state.fence);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 74f8652d2bd3..05076e530e7d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -962,11 +962,11 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
struct dma_fence *fence;
int ret;
- /* TODO: This is actually a memory management dependency */
- ret = dma_resv_get_singleton(bo->base.resv, false, &fence);
+ ret = dma_resv_get_singleton(bo->base.resv, DMA_RESV_USAGE_WRITE,
+ &fence);
if (ret)
- dma_resv_wait_timeout(bo->base.resv, false, false,
- MAX_SCHEDULE_TIMEOUT);
+ dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_WRITE,
+ false, MAX_SCHEDULE_TIMEOUT);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
@@ -1308,10 +1308,11 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool excl
{
struct dma_resv *resv = nvbo->bo.base.resv;
- if (exclusive)
- dma_resv_add_excl_fence(resv, &fence->base);
- else if (fence)
- dma_resv_add_shared_fence(resv, &fence->base);
+ if (!fence)
+ return;
+
+ dma_resv_add_fence(resv, &fence->base, exclusive ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 0268259e97eb..7f01dcf81fab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -350,17 +350,21 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
if (ret)
return ret;
- /* Waiting for the exclusive fence first causes performance regressions
- * under some circumstances. So manually wait for the shared ones first.
+ /* Waiting for the writes first causes performance regressions
+ * under some circumstances. So manually wait for the reads first.
*/
for (i = 0; i < 2; ++i) {
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_for_each_fence(&cursor, resv, exclusive, fence) {
+ dma_resv_for_each_fence(&cursor, resv,
+ dma_resv_usage_rw(exclusive),
+ fence) {
+ enum dma_resv_usage usage;
struct nouveau_fence *f;
- if (i == 0 && dma_resv_iter_is_exclusive(&cursor))
+ usage = dma_resv_iter_usage(&cursor);
+ if (i == 0 && usage == DMA_RESV_USAGE_WRITE)
continue;
f = nouveau_local_fence(fence, chan->drm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 9416bee92141..fab542a758ff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -962,7 +962,8 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true,
+ lret = dma_resv_wait_timeout(nvbo->bo.base.resv,
+ dma_resv_usage_rw(write), true,
no_wait ? 0 : 30 * HZ);
if (!lret)
ret = -EBUSY;
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 60019d0532fc..347488685f74 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -93,22 +93,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
if (ret)
return -EINVAL;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
- if (ret)
- goto error;
-
- if (nvbo->bo.moving)
- ret = dma_fence_wait(nvbo->bo.moving, true);
-
- ttm_bo_unreserve(&nvbo->bo);
- if (ret)
- goto error;
-
- return ret;
-
-error:
- nouveau_bo_unpin(nvbo);
- return ret;
+ return 0;
}
void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 030640bb3dca..ab3760e804b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -143,7 +143,7 @@ gf108_gr = {
}
};
-const struct gf100_gr_fwif
+static const struct gf100_gr_fwif
gf108_gr_fwif[] = {
{ -1, gf100_gr_load, &gf108_gr },
{ -1, gf100_gr_nofw, &gf108_gr },
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 27a1c9923b09..eca067e78579 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -30,9 +30,9 @@ struct panel_lvds {
const char *label;
unsigned int width;
unsigned int height;
- struct videomode video_mode;
+ struct drm_display_mode dmode;
+ u32 bus_flags;
unsigned int bus_format;
- bool data_mirror;
struct regulator *supply;
@@ -87,21 +87,18 @@ static int panel_lvds_get_modes(struct drm_panel *panel,
struct panel_lvds *lvds = to_panel_lvds(panel);
struct drm_display_mode *mode;
- mode = drm_mode_create(connector->dev);
+ mode = drm_mode_duplicate(connector->dev, &lvds->dmode);
if (!mode)
return 0;
- drm_display_mode_from_videomode(&lvds->video_mode, mode);
mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
- connector->display_info.width_mm = lvds->width;
- connector->display_info.height_mm = lvds->height;
+ connector->display_info.width_mm = lvds->dmode.width_mm;
+ connector->display_info.height_mm = lvds->dmode.height_mm;
drm_display_info_set_bus_formats(&connector->display_info,
&lvds->bus_format, 1);
- connector->display_info.bus_flags = lvds->data_mirror
- ? DRM_BUS_FLAG_DATA_LSB_TO_MSB
- : DRM_BUS_FLAG_DATA_MSB_TO_LSB;
+ connector->display_info.bus_flags = lvds->bus_flags;
drm_connector_set_panel_orientation(connector, lvds->orientation);
return 1;
@@ -116,7 +113,6 @@ static const struct drm_panel_funcs panel_lvds_funcs = {
static int panel_lvds_parse_dt(struct panel_lvds *lvds)
{
struct device_node *np = lvds->dev->of_node;
- struct display_timing timing;
int ret;
ret = of_drm_get_panel_orientation(np, &lvds->orientation);
@@ -125,23 +121,20 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
return ret;
}
- ret = of_get_display_timing(np, "panel-timing", &timing);
+ ret = of_get_drm_panel_display_mode(np, &lvds->dmode, &lvds->bus_flags);
if (ret < 0) {
dev_err(lvds->dev, "%pOF: problems parsing panel-timing (%d)\n",
np, ret);
return ret;
}
- videomode_from_timing(&timing, &lvds->video_mode);
-
- ret = of_property_read_u32(np, "width-mm", &lvds->width);
- if (ret < 0) {
+ if (lvds->dmode.width_mm == 0) {
dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n",
np, "width-mm");
return -ENODEV;
}
- ret = of_property_read_u32(np, "height-mm", &lvds->height);
- if (ret < 0) {
+
+ if (lvds->dmode.height_mm == 0) {
dev_err(lvds->dev, "%pOF: invalid or missing %s DT property\n",
np, "height-mm");
return -ENODEV;
@@ -158,7 +151,9 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
lvds->bus_format = ret;
- lvds->data_mirror = of_property_read_bool(np, "data-mirror");
+ lvds->bus_flags |= of_property_read_bool(np, "data-mirror") ?
+ DRM_BUS_FLAG_DATA_LSB_TO_MSB :
+ DRM_BUS_FLAG_DATA_MSB_TO_LSB;
return 0;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 94b6f0a19c83..7fcbc2a5b6cd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -316,7 +316,8 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
if (!gem_obj)
return -ENOENT;
- ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout);
+ ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_READ,
+ true, timeout);
if (!ret)
ret = timeout ? -ETIMEDOUT : -EBUSY;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index c34114560e49..fda5871aebe3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -268,7 +268,7 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos,
int i;
for (i = 0; i < bo_count; i++)
- dma_resv_add_excl_fence(bos[i]->resv, fence);
+ dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE);
}
int panfrost_job_push(struct panfrost_job *job)
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 6a36b0fd845c..2d9ed3b94574 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -61,7 +61,8 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
struct dma_fence *fence;
int rel = 0;
- dma_resv_iter_begin(&cursor, bo->tbo.base.resv, true);
+ dma_resv_iter_begin(&cursor, bo->tbo.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (dma_resv_iter_is_restarted(&cursor))
rel = 0;
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index cde1e8ddaeaa..368d26da0d6a 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -429,7 +429,8 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
- dma_resv_add_shared_fence(bo->base.resv, &release->base);
+ dma_resv_add_fence(bo->base.resv, &release->base,
+ DMA_RESV_USAGE_READ);
ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 9ba871bd19b1..ee95001e6b5e 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -222,41 +222,14 @@ void qxl_ttm_fini(struct qxl_device *qdev)
DRM_INFO("qxl: ttm finalized\n");
}
-#define QXL_DEBUGFS_MEM_TYPES 2
-
-#if defined(CONFIG_DEBUG_FS)
-static int qxl_mm_dump_table(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct ttm_resource_manager *man = (struct ttm_resource_manager *)node->info_ent->data;
- struct drm_printer p = drm_seq_file_printer(m);
-
- ttm_resource_manager_debug(man, &p);
- return 0;
-}
-#endif
-
void qxl_ttm_debugfs_init(struct qxl_device *qdev)
{
#if defined(CONFIG_DEBUG_FS)
- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
- unsigned int i;
-
- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
- if (i == 0)
- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
- else
- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
- qxl_mem_types_list[i].driver_features = 0;
- if (i == 0)
- qxl_mem_types_list[i].data = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
- else
- qxl_mem_types_list[i].data = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
-
- }
- qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&qdev->mman.bdev,
+ TTM_PL_VRAM),
+ qdev->ddev.primary->debugfs_root, "qxl_mem_mm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&qdev->mman.bdev,
+ TTM_PL_PRIV),
+ qdev->ddev.primary->debugfs_root, "qxl_surf_mm");
#endif
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index f60e826cd292..57ff2b723c87 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,8 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- r = dma_resv_get_singleton(new_rbo->tbo.base.resv, false, &work->fence);
+ r = dma_resv_get_singleton(new_rbo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
+ &work->fence);
if (r) {
radeon_bo_unreserve(new_rbo);
DRM_ERROR("failed to get new rbo buffer fences\n");
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index f563284a7fac..8c01a7f0e027 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -162,7 +162,9 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
+ r = dma_resv_wait_timeout(robj->tbo.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP,
+ true, 30 * HZ);
if (!r)
r = -EBUSY;
@@ -524,7 +526,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- r = dma_resv_test_signaled(robj->tbo.base.resv, true);
+ r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ);
if (r == 0)
r = -EBUSY;
else
@@ -553,7 +555,8 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ);
+ ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
+ true, 30 * HZ);
if (ret == 0)
r = -EBUSY;
else if (ret < 0)
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 9fa88549c89e..29fe8423bd90 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -66,8 +66,8 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
return true;
}
- r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7ffd2e90f325..6c4a6802ca96 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -219,7 +219,12 @@ int radeon_bo_create(struct radeon_device *rdev,
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
{
bool is_iomem;
- int r;
+ long r;
+
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ return r;
if (bo->kptr) {
if (ptr) {
@@ -791,8 +796,6 @@ void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
return;
}
- if (shared)
- dma_resv_add_shared_fence(resv, &fence->base);
- else
- dma_resv_add_excl_fence(resv, &fence->base);
+ dma_resv_add_fence(resv, &fence->base, shared ?
+ DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
}
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 4a90807351e7..42a87948e28c 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -77,19 +77,9 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
- if (unlikely(ret))
- goto error;
-
- if (bo->tbo.moving) {
- ret = dma_fence_wait(bo->tbo.moving, false);
- if (unlikely(ret)) {
- radeon_bo_unpin(bo);
- goto error;
- }
- }
-
- bo->prime_shared_count++;
-error:
+ if (likely(ret == 0))
+ bo->prime_shared_count++;
+
radeon_bo_unreserve(bo);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index b991ba1bcd51..49bbb2266c0f 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -96,7 +96,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
struct dma_fence *f;
int r = 0;
- dma_resv_for_each_fence(&cursor, resv, shared, f) {
+ dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(shared), f) {
fence = to_radeon_fence(f);
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 44594d16611f..d33fec488713 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -781,17 +781,6 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
#if defined(CONFIG_DEBUG_FS)
-static int radeon_mm_vram_dump_table_show(struct seq_file *m, void *unused)
-{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
- struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev,
- TTM_PL_VRAM);
- struct drm_printer p = drm_seq_file_printer(m);
-
- ttm_resource_manager_debug(man, &p);
- return 0;
-}
-
static int radeon_ttm_page_pool_show(struct seq_file *m, void *data)
{
struct radeon_device *rdev = (struct radeon_device *)m->private;
@@ -799,19 +788,6 @@ static int radeon_ttm_page_pool_show(struct seq_file *m, void *data)
return ttm_pool_debugfs(&rdev->mman.bdev.pool, m);
}
-static int radeon_mm_gtt_dump_table_show(struct seq_file *m, void *unused)
-{
- struct radeon_device *rdev = (struct radeon_device *)m->private;
- struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev,
- TTM_PL_TT);
- struct drm_printer p = drm_seq_file_printer(m);
-
- ttm_resource_manager_debug(man, &p);
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(radeon_mm_vram_dump_table);
-DEFINE_SHOW_ATTRIBUTE(radeon_mm_gtt_dump_table);
DEFINE_SHOW_ATTRIBUTE(radeon_ttm_page_pool);
static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
@@ -930,15 +906,15 @@ static void radeon_ttm_debugfs_init(struct radeon_device *rdev)
debugfs_create_file("radeon_vram", 0444, root, rdev,
&radeon_ttm_vram_fops);
-
debugfs_create_file("radeon_gtt", 0444, root, rdev,
&radeon_ttm_gtt_fops);
-
- debugfs_create_file("radeon_vram_mm", 0444, root, rdev,
- &radeon_mm_vram_dump_table_fops);
- debugfs_create_file("radeon_gtt_mm", 0444, root, rdev,
- &radeon_mm_gtt_dump_table_fops);
debugfs_create_file("ttm_page_pool", 0444, root, rdev,
&radeon_ttm_page_pool_fops);
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&rdev->mman.bdev,
+ TTM_PL_VRAM),
+ root, "radeon_vram_mm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&rdev->mman.bdev,
+ TTM_PL_TT),
+ root, "radeon_gtt_mm");
#endif
}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index bc0f44299bb9..a2cda184b2b2 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -470,24 +470,16 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
void *ptr;
- long r;
- int i;
+ int i, r;
if (offset & 0x3F) {
DRM_ERROR("UVD messages must be 64 byte aligned!\n");
return -EINVAL;
}
- r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
- MAX_SCHEDULE_TIMEOUT);
- if (r <= 0) {
- DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
- return r ? r : -ETIME;
- }
-
r = radeon_bo_kmap(bo, &ptr);
if (r) {
- DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
+ DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
return r;
}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index c5660b066554..76fd2904c7c6 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -705,7 +705,8 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
dma_resv_assert_held(obj->resv);
- dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
+ dma_resv_for_each_fence(&cursor, obj->resv, dma_resv_usage_rw(write),
+ fence) {
/* Make sure to grab an additional ref on the added fence */
dma_fence_get(fence);
ret = drm_sched_job_add_dependency(job, fence);
diff --git a/drivers/gpu/drm/solomon/Kconfig b/drivers/gpu/drm/solomon/Kconfig
index 6230369505c9..e170716d976b 100644
--- a/drivers/gpu/drm/solomon/Kconfig
+++ b/drivers/gpu/drm/solomon/Kconfig
@@ -5,9 +5,9 @@ config DRM_SSD130X
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
- DRM driver for the SSD1305, SSD1306, SSD1307 and SSD1309 Solomon
- OLED controllers. This is only for the core driver, a driver for
- the appropriate bus transport in your chip also must be selected.
+ DRM driver for the SSD130x Solomon and SINO WEALTH SH110x OLED
+ controllers. This is only for the core driver, a driver for the
+ appropriate bus transport in your chip also must be selected.
If M is selected the module will be called ssd130x.
@@ -16,6 +16,16 @@ config DRM_SSD130X_I2C
depends on DRM_SSD130X && I2C
select REGMAP_I2C
help
- Say Y here if the SSD130x OLED display is connected via I2C bus.
+ Say Y here if the SSD130x or SH110x OLED display is connected via
+ I2C bus.
If M is selected the module will be called ssd130x-i2c.
+
+config DRM_SSD130X_SPI
+ tristate "DRM support for Solomon SSD130X OLED displays (SPI bus)"
+ depends on DRM_SSD130X && SPI
+ select REGMAP
+ help
+ Say Y here if the SSD130x OLED display is connected via SPI bus.
+
+ If M is selected the module will be called ssd130x-spi.
diff --git a/drivers/gpu/drm/solomon/Makefile b/drivers/gpu/drm/solomon/Makefile
index 4bfc5acb0447..b5fc792257d7 100644
--- a/drivers/gpu/drm/solomon/Makefile
+++ b/drivers/gpu/drm/solomon/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_DRM_SSD130X) += ssd130x.o
obj-$(CONFIG_DRM_SSD130X_I2C) += ssd130x-i2c.o
+obj-$(CONFIG_DRM_SSD130X_SPI) += ssd130x-spi.o
diff --git a/drivers/gpu/drm/solomon/ssd130x-i2c.c b/drivers/gpu/drm/solomon/ssd130x-i2c.c
index 3126aeda4ced..d6835ec71c39 100644
--- a/drivers/gpu/drm/solomon/ssd130x-i2c.c
+++ b/drivers/gpu/drm/solomon/ssd130x-i2c.c
@@ -53,48 +53,43 @@ static void ssd130x_i2c_shutdown(struct i2c_client *client)
ssd130x_shutdown(ssd130x);
}
-static struct ssd130x_deviceinfo ssd130x_ssd1305_deviceinfo = {
- .default_vcomh = 0x34,
- .default_dclk_div = 1,
- .default_dclk_frq = 7,
-};
-
-static struct ssd130x_deviceinfo ssd130x_ssd1306_deviceinfo = {
- .default_vcomh = 0x20,
- .default_dclk_div = 1,
- .default_dclk_frq = 8,
- .need_chargepump = 1,
-};
-
-static struct ssd130x_deviceinfo ssd130x_ssd1307_deviceinfo = {
- .default_vcomh = 0x20,
- .default_dclk_div = 2,
- .default_dclk_frq = 12,
- .need_pwm = 1,
-};
-
-static struct ssd130x_deviceinfo ssd130x_ssd1309_deviceinfo = {
- .default_vcomh = 0x34,
- .default_dclk_div = 1,
- .default_dclk_frq = 10,
-};
-
static const struct of_device_id ssd130x_of_match[] = {
{
+ .compatible = "sinowealth,sh1106",
+ .data = &ssd130x_variants[SH1106_ID],
+ },
+ {
+ .compatible = "solomon,ssd1305",
+ .data = &ssd130x_variants[SSD1305_ID],
+ },
+ {
+ .compatible = "solomon,ssd1306",
+ .data = &ssd130x_variants[SSD1306_ID],
+ },
+ {
+ .compatible = "solomon,ssd1307",
+ .data = &ssd130x_variants[SSD1307_ID],
+ },
+ {
+ .compatible = "solomon,ssd1309",
+ .data = &ssd130x_variants[SSD1309_ID],
+ },
+ /* Deprecated but kept for backward compatibility */
+ {
.compatible = "solomon,ssd1305fb-i2c",
- .data = &ssd130x_ssd1305_deviceinfo,
+ .data = &ssd130x_variants[SSD1305_ID],
},
{
.compatible = "solomon,ssd1306fb-i2c",
- .data = &ssd130x_ssd1306_deviceinfo,
+ .data = &ssd130x_variants[SSD1306_ID],
},
{
.compatible = "solomon,ssd1307fb-i2c",
- .data = &ssd130x_ssd1307_deviceinfo,
+ .data = &ssd130x_variants[SSD1307_ID],
},
{
.compatible = "solomon,ssd1309fb-i2c",
- .data = &ssd130x_ssd1309_deviceinfo,
+ .data = &ssd130x_variants[SSD1309_ID],
},
{ /* sentinel */ }
};
@@ -114,3 +109,4 @@ module_i2c_driver(ssd130x_i2c_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Javier Martinez Canillas <javierm@redhat.com>");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(DRM_SSD130X);
diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c
new file mode 100644
index 000000000000..43722adab1f8
--- /dev/null
+++ b/drivers/gpu/drm/solomon/ssd130x-spi.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DRM driver for Solomon SSD130X OLED displays (SPI bus)
+ *
+ * Copyright 2022 Red Hat Inc.
+ * Authors: Javier Martinez Canillas <javierm@redhat.com>
+ */
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+
+#include "ssd130x.h"
+
+#define DRIVER_NAME "ssd130x-spi"
+#define DRIVER_DESC "DRM driver for Solomon SSD130X OLED displays (SPI)"
+
+struct ssd130x_spi_transport {
+ struct spi_device *spi;
+ struct gpio_desc *dc;
+};
+
+static const struct regmap_config ssd130x_spi_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+/*
+ * The regmap bus .write handler, it is just a wrapper around spi_write()
+ * but toggling the Data/Command control pin (D/C#). Since for 4-wire SPI
+ * a D/C# pin is used, in contrast with I2C where a control byte is sent,
+ * prior to every data byte, that contains a bit with the D/C# value.
+ *
+ * These control bytes are considered registers by the ssd130x core driver
+ * and can be used by the ssd130x SPI driver to determine if the data sent
+ * is for a command register or for the Graphic Display Data RAM (GDDRAM).
+ */
+static int ssd130x_spi_write(void *context, const void *data, size_t count)
+{
+ struct ssd130x_spi_transport *t = context;
+ struct spi_device *spi = t->spi;
+ const u8 *reg = data;
+
+ if (*reg == SSD130X_COMMAND)
+ gpiod_set_value_cansleep(t->dc, 0);
+
+ if (*reg == SSD130X_DATA)
+ gpiod_set_value_cansleep(t->dc, 1);
+
+ /* Remove control byte since is not used in a 4-wire SPI interface */
+ return spi_write(spi, reg + 1, count - 1);
+}
+
+/* The ssd130x driver does not read registers but regmap expects a .read */
+static int ssd130x_spi_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ return -EOPNOTSUPP;
+}
+
+/*
+ * A custom bus is needed due the special write that toggles a D/C# pin,
+ * another option could be to just have a .reg_write() callback but that
+ * will prevent to do data writes in bulk.
+ *
+ * Once the regmap API is extended to support defining a bulk write handler
+ * in the struct regmap_config, this can be simplified and the bus dropped.
+ */
+static struct regmap_bus regmap_ssd130x_spi_bus = {
+ .write = ssd130x_spi_write,
+ .read = ssd130x_spi_read,
+};
+
+static int ssd130x_spi_probe(struct spi_device *spi)
+{
+ struct ssd130x_spi_transport *t;
+ struct ssd130x_device *ssd130x;
+ struct regmap *regmap;
+ struct gpio_desc *dc;
+ struct device *dev = &spi->dev;
+
+ dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc))
+ return dev_err_probe(dev, PTR_ERR(dc),
+ "Failed to get dc gpio\n");
+
+ t = devm_kzalloc(dev, sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return dev_err_probe(dev, -ENOMEM,
+ "Failed to allocate SPI transport data\n");
+
+ t->spi = spi;
+ t->dc = dc;
+
+ regmap = devm_regmap_init(dev, &regmap_ssd130x_spi_bus, t,
+ &ssd130x_spi_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ssd130x = ssd130x_probe(dev, regmap);
+ if (IS_ERR(ssd130x))
+ return PTR_ERR(ssd130x);
+
+ spi_set_drvdata(spi, ssd130x);
+
+ return 0;
+}
+
+static void ssd130x_spi_remove(struct spi_device *spi)
+{
+ struct ssd130x_device *ssd130x = spi_get_drvdata(spi);
+
+ ssd130x_remove(ssd130x);
+}
+
+static void ssd130x_spi_shutdown(struct spi_device *spi)
+{
+ struct ssd130x_device *ssd130x = spi_get_drvdata(spi);
+
+ ssd130x_shutdown(ssd130x);
+}
+
+static const struct of_device_id ssd130x_of_match[] = {
+ {
+ .compatible = "sinowealth,sh1106",
+ .data = &ssd130x_variants[SH1106_ID],
+ },
+ {
+ .compatible = "solomon,ssd1305",
+ .data = &ssd130x_variants[SSD1305_ID],
+ },
+ {
+ .compatible = "solomon,ssd1306",
+ .data = &ssd130x_variants[SSD1306_ID],
+ },
+ {
+ .compatible = "solomon,ssd1307",
+ .data = &ssd130x_variants[SSD1307_ID],
+ },
+ {
+ .compatible = "solomon,ssd1309",
+ .data = &ssd130x_variants[SSD1309_ID],
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ssd130x_of_match);
+
+/*
+ * The SPI core always reports a MODALIAS uevent of the form "spi:<dev>", even
+ * if the device was registered via OF. This means that the module will not be
+ * auto loaded, unless it contains an alias that matches the MODALIAS reported.
+ *
+ * To workaround this issue, add a SPI device ID table. Even when this should
+ * not be needed for this driver to match the registered SPI devices.
+ */
+static const struct spi_device_id ssd130x_spi_table[] = {
+ { "sh1106", SH1106_ID },
+ { "ssd1305", SSD1305_ID },
+ { "ssd1306", SSD1306_ID },
+ { "ssd1307", SSD1307_ID },
+ { "ssd1309", SSD1309_ID },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, ssd130x_spi_table);
+
+static struct spi_driver ssd130x_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = ssd130x_of_match,
+ },
+ .probe = ssd130x_spi_probe,
+ .remove = ssd130x_spi_remove,
+ .shutdown = ssd130x_spi_shutdown,
+};
+module_spi_driver(ssd130x_spi_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR("Javier Martinez Canillas <javierm@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(DRM_SSD130X);
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 38b6c2c14f53..ba2de93d00f0 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -39,9 +39,8 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-#define SSD130X_DATA 0x40
-#define SSD130X_COMMAND 0x80
-
+#define SSD130X_PAGE_COL_START_LOW 0x00
+#define SSD130X_PAGE_COL_START_HIGH 0x10
#define SSD130X_SET_ADDRESS_MODE 0x20
#define SSD130X_SET_COL_RANGE 0x21
#define SSD130X_SET_PAGE_RANGE 0x22
@@ -61,6 +60,11 @@
#define SSD130X_SET_COM_PINS_CONFIG 0xda
#define SSD130X_SET_VCOMH 0xdb
+#define SSD130X_PAGE_COL_START_MASK GENMASK(3, 0)
+#define SSD130X_PAGE_COL_START_HIGH_SET(val) FIELD_PREP(SSD130X_PAGE_COL_START_MASK, (val) >> 4)
+#define SSD130X_PAGE_COL_START_LOW_SET(val) FIELD_PREP(SSD130X_PAGE_COL_START_MASK, (val))
+#define SSD130X_START_PAGE_ADDRESS_MASK GENMASK(2, 0)
+#define SSD130X_START_PAGE_ADDRESS_SET(val) FIELD_PREP(SSD130X_START_PAGE_ADDRESS_MASK, (val))
#define SSD130X_SET_SEG_REMAP_MASK GENMASK(0, 0)
#define SSD130X_SET_SEG_REMAP_SET(val) FIELD_PREP(SSD130X_SET_SEG_REMAP_MASK, (val))
#define SSD130X_SET_COM_SCAN_DIR_MASK GENMASK(3, 3)
@@ -87,6 +91,38 @@
#define MAX_CONTRAST 255
+const struct ssd130x_deviceinfo ssd130x_variants[] = {
+ [SH1106_ID] = {
+ .default_vcomh = 0x40,
+ .default_dclk_div = 1,
+ .default_dclk_frq = 5,
+ .page_mode_only = 1,
+ },
+ [SSD1305_ID] = {
+ .default_vcomh = 0x34,
+ .default_dclk_div = 1,
+ .default_dclk_frq = 7,
+ },
+ [SSD1306_ID] = {
+ .default_vcomh = 0x20,
+ .default_dclk_div = 1,
+ .default_dclk_frq = 8,
+ .need_chargepump = 1,
+ },
+ [SSD1307_ID] = {
+ .default_vcomh = 0x20,
+ .default_dclk_div = 2,
+ .default_dclk_frq = 12,
+ .need_pwm = 1,
+ },
+ [SSD1309_ID] = {
+ .default_vcomh = 0x34,
+ .default_dclk_div = 1,
+ .default_dclk_frq = 10,
+ }
+};
+EXPORT_SYMBOL_NS_GPL(ssd130x_variants, DRM_SSD130X);
+
static inline struct ssd130x_device *drm_to_ssd130x(struct drm_device *drm)
{
return container_of(drm, struct ssd130x_device, drm);
@@ -130,6 +166,7 @@ out_end:
return ret;
}
+/* Set address range for horizontal/vertical addressing modes */
static int ssd130x_set_col_range(struct ssd130x_device *ssd130x,
u8 col_start, u8 cols)
{
@@ -166,6 +203,26 @@ static int ssd130x_set_page_range(struct ssd130x_device *ssd130x,
return 0;
}
+/* Set page and column start address for page addressing mode */
+static int ssd130x_set_page_pos(struct ssd130x_device *ssd130x,
+ u8 page_start, u8 col_start)
+{
+ int ret;
+ u32 page, col_low, col_high;
+
+ page = SSD130X_START_PAGE_ADDRESS |
+ SSD130X_START_PAGE_ADDRESS_SET(page_start);
+ col_low = SSD130X_PAGE_COL_START_LOW |
+ SSD130X_PAGE_COL_START_LOW_SET(col_start);
+ col_high = SSD130X_PAGE_COL_START_HIGH |
+ SSD130X_PAGE_COL_START_HIGH_SET(col_start);
+ ret = ssd130x_write_cmd(ssd130x, 3, page, col_low, col_high);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int ssd130x_pwm_enable(struct ssd130x_device *ssd130x)
{
struct device *dev = ssd130x->dev;
@@ -342,6 +399,11 @@ static int ssd130x_init(struct ssd130x_device *ssd130x)
}
}
+ /* Switch to page addressing mode */
+ if (ssd130x->page_address_mode)
+ return ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_ADDRESS_MODE,
+ SSD130X_SET_ADDRESS_MODE_PAGE);
+
/* Switch to horizontal addressing mode */
return ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_ADDRESS_MODE,
SSD130X_SET_ADDRESS_MODE_HORIZONTAL);
@@ -396,13 +458,16 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf,
* (5) A4 B4 C4 D4 E4 F4 G4 H4
*/
- ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width);
- if (ret < 0)
- goto out_free;
+ if (!ssd130x->page_address_mode) {
+ /* Set address range for horizontal addressing mode */
+ ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width);
+ if (ret < 0)
+ goto out_free;
- ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset + y / 8, pages);
- if (ret < 0)
- goto out_free;
+ ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset + y / 8, pages);
+ if (ret < 0)
+ goto out_free;
+ }
for (i = 0; i < pages; i++) {
int m = 8;
@@ -421,9 +486,29 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x, u8 *buf,
}
data_array[array_idx++] = data;
}
+
+ /*
+ * In page addressing mode, the start address needs to be reset,
+ * and each page then needs to be written out separately.
+ */
+ if (ssd130x->page_address_mode) {
+ ret = ssd130x_set_page_pos(ssd130x,
+ ssd130x->page_offset + i,
+ ssd130x->col_offset + x);
+ if (ret < 0)
+ goto out_free;
+
+ ret = ssd130x_write_data(ssd130x, data_array, width);
+ if (ret < 0)
+ goto out_free;
+
+ array_idx = 0;
+ }
}
- ret = ssd130x_write_data(ssd130x, data_array, width * pages);
+ /* Write out update in one go if we aren't using page addressing mode */
+ if (!ssd130x->page_address_mode)
+ ret = ssd130x_write_data(ssd130x, data_array, width * pages);
out_free:
kfree(data_array);
@@ -806,6 +891,9 @@ struct ssd130x_device *ssd130x_probe(struct device *dev, struct regmap *regmap)
ssd130x->regmap = regmap;
ssd130x->device_info = device_get_match_data(dev);
+ if (ssd130x->device_info->page_mode_only)
+ ssd130x->page_address_mode = 1;
+
ssd130x_parse_properties(ssd130x);
ret = ssd130x_get_resources(ssd130x);
diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h
index cd21cdccb566..d14f78c2eb07 100644
--- a/drivers/gpu/drm/solomon/ssd130x.h
+++ b/drivers/gpu/drm/solomon/ssd130x.h
@@ -18,12 +18,25 @@
#include <linux/regmap.h>
+#define SSD130X_DATA 0x40
+#define SSD130X_COMMAND 0x80
+
+enum ssd130x_variants {
+ SH1106_ID,
+ SSD1305_ID,
+ SSD1306_ID,
+ SSD1307_ID,
+ SSD1309_ID,
+ NR_SSD130X_VARIANTS
+};
+
struct ssd130x_deviceinfo {
u32 default_vcomh;
u32 default_dclk_div;
u32 default_dclk_frq;
int need_pwm;
int need_chargepump;
+ bool page_mode_only;
};
struct ssd130x_device {
@@ -38,6 +51,7 @@ struct ssd130x_device {
const struct ssd130x_deviceinfo *device_info;
+ unsigned page_address_mode : 1;
unsigned area_color_enable : 1;
unsigned com_invdir : 1;
unsigned com_lrremap : 1;
@@ -69,6 +83,8 @@ struct ssd130x_device {
u8 page_end;
};
+extern const struct ssd130x_deviceinfo ssd130x_variants[];
+
struct ssd130x_device *ssd130x_probe(struct device *dev, struct regmap *regmap);
int ssd130x_remove(struct ssd130x_device *ssd130x);
void ssd130x_shutdown(struct ssd130x_device *ssd130x);
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 3db3768a3241..b58415f2e4d8 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -406,7 +406,7 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
(hw_nvn != gdp->node_list[i].top_field_paddr))
return &gdp->node_list[i];
- /* in hazardious cases restart with the first node */
+ /* in hazardous cases restart with the first node */
DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
sti_plane_to_str(&gdp->plane), hw_nvn);
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index f3ace11209dd..b3fbee7eac11 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -183,7 +183,7 @@ void hdmi_write(struct sti_hdmi *hdmi, u32 val, int offset)
writel(val, hdmi->regs + offset);
}
-/**
+/*
* HDMI interrupt handler threaded
*
* @irq: irq number
@@ -215,7 +215,7 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
return IRQ_HANDLED;
}
-/**
+/*
* HDMI interrupt handler
*
* @irq: irq number
@@ -237,7 +237,7 @@ static irqreturn_t hdmi_irq(int irq, void *arg)
return IRQ_WAKE_THREAD;
}
-/**
+/*
* Set hdmi active area depending on the drm display mode selected
*
* @hdmi: pointer on the hdmi internal structure
@@ -258,7 +258,7 @@ static void hdmi_active_area(struct sti_hdmi *hdmi)
hdmi_write(hdmi, ymax, HDMI_ACTIVE_VID_YMAX);
}
-/**
+/*
* Overall hdmi configuration
*
* @hdmi: pointer on the hdmi internal structure
@@ -336,7 +336,7 @@ static void hdmi_infoframe_reset(struct sti_hdmi *hdmi,
hdmi_write(hdmi, 0x0, pack_offset + i);
}
-/**
+/*
* Helper to concatenate infoframe in 32 bits word
*
* @ptr: pointer on the hdmi internal structure
@@ -353,7 +353,7 @@ static inline unsigned int hdmi_infoframe_subpack(const u8 *ptr, size_t size)
return value;
}
-/**
+/*
* Helper to write info frame
*
* @hdmi: pointer on the hdmi internal structure
@@ -423,7 +423,7 @@ static void hdmi_infoframe_write_infopack(struct sti_hdmi *hdmi,
hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
}
-/**
+/*
* Prepare and configure the AVI infoframe
*
* AVI infoframe are transmitted at least once per two video field and
@@ -466,7 +466,7 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
return 0;
}
-/**
+/*
* Prepare and configure the AUDIO infoframe
*
* AUDIO infoframe are transmitted once per frame and
@@ -551,7 +551,7 @@ static int hdmi_vendor_infoframe_config(struct sti_hdmi *hdmi)
#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */
-/**
+/*
* Software reset of the hdmi subsystem
*
* @hdmi: pointer on the hdmi internal structure
@@ -785,7 +785,7 @@ static void sti_hdmi_disable(struct drm_bridge *bridge)
cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID);
}
-/**
+/*
* sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent
* clocks. None-coherent clocks means that audio and TMDS clocks have not the
* same source (drifts between clocks). In this case assumption is that CTS is
@@ -892,7 +892,7 @@ static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
if (clk_prepare_enable(hdmi->clk_tmds))
DRM_ERROR("Failed to prepare/enable hdmi_tmds clk\n");
if (clk_prepare_enable(hdmi->clk_phy))
- DRM_ERROR("Failed to prepare/enable hdmi_rejec_pll clk\n");
+ DRM_ERROR("Failed to prepare/enable hdmi_rejection_pll clk\n");
hdmi->enabled = true;
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 17cc050207f4..6bd45df8f5a7 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -869,8 +869,8 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct drm_device *ddev = crtc->dev;
struct drm_connector_list_iter iter;
struct drm_connector *connector = NULL;
- struct drm_encoder *encoder = NULL;
- struct drm_bridge *bridge = NULL;
+ struct drm_encoder *encoder = NULL, *en_iter;
+ struct drm_bridge *bridge = NULL, *br_iter;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
u32 total_width, total_height;
@@ -880,15 +880,19 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
int ret;
/* get encoder from crtc */
- drm_for_each_encoder(encoder, ddev)
- if (encoder->crtc == crtc)
+ drm_for_each_encoder(en_iter, ddev)
+ if (en_iter->crtc == crtc) {
+ encoder = en_iter;
break;
+ }
if (encoder) {
/* get bridge from encoder */
- list_for_each_entry(bridge, &encoder->bridge_chain, chain_node)
- if (bridge->encoder == encoder)
+ list_for_each_entry(br_iter, &encoder->bridge_chain, chain_node)
+ if (br_iter->encoder == encoder) {
+ bridge = br_iter;
break;
+ }
/* Get the connector from encoder */
drm_connector_list_iter_begin(ddev, &iter);
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 5e2b0175df36..2860e6bff8b7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -135,7 +135,7 @@ static int sun8i_hdmi_phy_config_a83t(struct dw_hdmi *hdmi,
dw_hdmi_phy_gen2_txpwron(hdmi, 0);
dw_hdmi_phy_gen2_pddq(hdmi, 1);
- dw_hdmi_phy_reset(hdmi);
+ dw_hdmi_phy_gen2_reset(hdmi);
dw_hdmi_phy_gen2_pddq(hdmi, 0);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index 60b92df615aa..dae47853b728 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -2650,6 +2650,20 @@ static void dispc_init_errata(struct dispc_device *dispc)
}
}
+static void dispc_softreset(struct dispc_device *dispc)
+{
+ u32 val;
+ int ret = 0;
+
+ /* Soft reset */
+ REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1);
+ /* Wait for reset to complete */
+ ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS,
+ val, val & 1, 100, 5000);
+ if (ret)
+ dev_warn(dispc->dev, "failed to reset dispc\n");
+}
+
int dispc_init(struct tidss_device *tidss)
{
struct device *dev = tidss->dev;
@@ -2709,6 +2723,10 @@ int dispc_init(struct tidss_device *tidss)
return r;
}
+ /* K2G display controller does not support soft reset */
+ if (feat->subrev != DISPC_K2G)
+ dispc_softreset(dispc);
+
for (i = 0; i < dispc->feat->num_vps; i++) {
u32 gamma_size = dispc->feat->vp_feat.color.gamma_size;
u32 *gamma_table;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c49996cf25d0..75d308ec173d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -223,7 +223,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
struct dma_resv_iter cursor;
struct dma_fence *fence;
- dma_resv_iter_begin(&cursor, resv, true);
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (!fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
@@ -252,7 +252,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
struct dma_resv *resv = &bo->base._resv;
int ret;
- if (dma_resv_test_signaled(resv, true))
+ if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP))
ret = 0;
else
ret = -EBUSY;
@@ -264,7 +264,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
dma_resv_unlock(bo->base.resv);
spin_unlock(&bo->bdev->lru_lock);
- lret = dma_resv_wait_timeout(resv, true, interruptible,
+ lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
+ interruptible,
30 * HZ);
if (lret < 0)
@@ -367,7 +368,8 @@ static void ttm_bo_release(struct kref *kref)
/* Last resort, if we fail to allocate memory for the
* fences block for the BO to become idle
*/
- dma_resv_wait_timeout(bo->base.resv, true, false,
+ dma_resv_wait_timeout(bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP, false,
30 * HZ);
}
@@ -378,7 +380,7 @@ static void ttm_bo_release(struct kref *kref)
ttm_mem_io_free(bdev, bo->resource);
}
- if (!dma_resv_test_signaled(bo->base.resv, true) ||
+ if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) ||
!dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
@@ -416,7 +418,6 @@ static void ttm_bo_release(struct kref *kref)
dma_resv_unlock(bo->base.resv);
atomic_dec(&ttm_glob.bo_count);
- dma_fence_put(bo->moving);
bo->destroy(bo);
}
@@ -712,9 +713,8 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo)
EXPORT_SYMBOL(ttm_bo_unpin);
/*
- * Add the last move fence to the BO and reserve a new shared slot. We only use
- * a shared slot to avoid unecessary sync and rely on the subsequent bo move to
- * either stall or use an exclusive fence respectively set bo->moving.
+ * Add the last move fence to the BO as kernel dependency and reserve a new
+ * fence slot.
*/
static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man,
@@ -737,17 +737,11 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
return ret;
}
- dma_resv_add_shared_fence(bo->base.resv, fence);
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
ret = dma_resv_reserve_fences(bo->base.resv, 1);
- if (unlikely(ret)) {
- dma_fence_put(fence);
- return ret;
- }
-
- dma_fence_put(bo->moving);
- bo->moving = fence;
- return 0;
+ dma_fence_put(fence);
+ return ret;
}
/*
@@ -949,7 +943,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
bo->bdev = bdev;
bo->type = type;
bo->page_alignment = page_alignment;
- bo->moving = NULL;
bo->pin_count = 0;
bo->sg = sg;
bo->bulk_move = NULL;
@@ -1044,14 +1037,14 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
long timeout = 15 * HZ;
if (no_wait) {
- if (dma_resv_test_signaled(bo->base.resv, true))
+ if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP))
return 0;
else
return -EBUSY;
}
- timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible,
- timeout);
+ timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ interruptible, timeout);
if (timeout < 0)
return timeout;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1b96b91bf81b..1cbfb00c1d65 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -228,7 +228,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
atomic_inc(&ttm_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy);
- fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
kref_init(&fbo->base.kref);
@@ -500,14 +499,12 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
* operation has completed.
*/
- dma_fence_put(bo->moving);
- bo->moving = dma_fence_get(fence);
-
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
- dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
+ dma_resv_add_fence(&ghost_obj->base._resv, fence,
+ DMA_RESV_USAGE_KERNEL);
/**
* If we're not moving to fixed memory, the TTM object
@@ -545,9 +542,6 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
spin_unlock(&from->move_lock);
ttm_resource_free(bo, &bo->resource);
-
- dma_fence_put(bo->moving);
- bo->moving = dma_fence_get(fence);
}
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
@@ -561,7 +555,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
int ret = 0;
- dma_resv_add_excl_fence(bo->base.resv, fence);
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
if (!evict)
ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
else if (!from->use_tt && pipeline)
@@ -578,6 +572,21 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ struct ttm_device *bdev = bo->bdev;
+ struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
+ int ret;
+
+ ret = ttm_bo_wait_free_node(bo, man->use_tt);
+ if (WARN_ON(ret))
+ return;
+
+ ttm_bo_assign_mem(bo, new_mem);
+}
+EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
+
/**
* ttm_bo_pipeline_gutting - purge the contents of a bo
* @bo: The buffer object
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 08ba083a80d2..5b324f245265 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -46,17 +46,13 @@
static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
- vm_fault_t ret = 0;
- int err = 0;
-
- if (likely(!bo->moving))
- goto out_unlock;
+ long err = 0;
/*
* Quick non-stalling check for idle.
*/
- if (dma_fence_is_signaled(bo->moving))
- goto out_clear;
+ if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL))
+ return 0;
/*
* If possible, avoid waiting for GPU with mmap_lock
@@ -64,34 +60,30 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
* is the first attempt.
*/
if (fault_flag_allow_retry_first(vmf->flags)) {
- ret = VM_FAULT_RETRY;
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
- goto out_unlock;
+ return VM_FAULT_RETRY;
ttm_bo_get(bo);
mmap_read_unlock(vmf->vma->vm_mm);
- (void) dma_fence_wait(bo->moving, true);
+ (void)dma_resv_wait_timeout(bo->base.resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
- goto out_unlock;
+ return VM_FAULT_RETRY;
}
/*
* Ordinary wait.
*/
- err = dma_fence_wait(bo->moving, true);
- if (unlikely(err != 0)) {
- ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+ err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (unlikely(err < 0)) {
+ return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
- goto out_unlock;
}
-out_clear:
- dma_fence_put(bo->moving);
- bo->moving = NULL;
-
-out_unlock:
- return ret;
+ return 0;
}
static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 789c645f004e..dbee34a058df 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -101,7 +101,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
continue;
}
- num_fences = min(entry->num_shared, 1u);
+ num_fences = max(entry->num_shared, 1u);
if (!ret) {
ret = dma_resv_reserve_fences(bo->base.resv,
num_fences);
@@ -154,10 +154,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- if (entry->num_shared)
- dma_resv_add_shared_fence(bo->base.resv, fence);
- else
- dma_resv_add_excl_fence(bo->base.resv, fence);
+ dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ?
+ DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 7d5a438180e9..65889b3caf50 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -644,3 +644,37 @@ ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
ttm_mem_io_free(bdev, mem);
}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int ttm_resource_manager_show(struct seq_file *m, void *unused)
+{
+ struct ttm_resource_manager *man =
+ (struct ttm_resource_manager *)m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ ttm_resource_manager_debug(man, &p);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ttm_resource_manager);
+
+#endif
+
+/**
+ * ttm_resource_manager_create_debugfs - Create debugfs entry for specified
+ * resource manager.
+ * @man: The TTM resource manager for which the debugfs stats file be creates
+ * @parent: debugfs directory in which the file will reside
+ * @name: The filename to create.
+ *
+ * This function setups up a debugfs file that can be used to look
+ * at debug statistics of the specified ttm_resource_manager.
+ */
+void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man,
+ struct dentry * parent,
+ const char *name)
+{
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_create_file(name, 0444, parent, man, &ttm_resource_manager_fops);
+#endif
+}
+EXPORT_SYMBOL(ttm_resource_manager_create_debugfs);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 961812d33827..2352e9640922 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -550,8 +550,8 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
for (i = 0; i < job->bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
- dma_resv_add_excl_fence(job->bo[i]->resv,
- job->done_fence);
+ dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
+ DMA_RESV_USAGE_WRITE);
}
drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 594bd6bb00d2..9eaf304fc20d 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -546,7 +546,8 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
bo = to_vc4_bo(&exec->bo[i]->base);
bo->seqno = seqno;
- dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
+ dma_resv_add_fence(bo->base.base.resv, exec->fence,
+ DMA_RESV_USAGE_READ);
}
list_for_each_entry(bo, &exec->unref_list, unref_head) {
@@ -557,7 +558,8 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
bo->write_seqno = seqno;
- dma_resv_add_excl_fence(bo->base.base.resv, exec->fence);
+ dma_resv_add_fence(bo->base.base.resv, exec->fence,
+ DMA_RESV_USAGE_WRITE);
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 9194cb52e706..2a58fc421cf6 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -611,6 +611,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
struct vc4_hvs *hvs = NULL;
int ret;
u32 dispctrl;
+ u32 reg;
hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
if (!hvs)
@@ -682,6 +683,26 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
vc4->hvs = hvs;
+ reg = HVS_READ(SCALER_DISPECTRL);
+ reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK;
+ HVS_WRITE(SCALER_DISPECTRL,
+ reg | VC4_SET_FIELD(0, SCALER_DISPECTRL_DSP2_MUX));
+
+ reg = HVS_READ(SCALER_DISPCTRL);
+ reg &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
+ HVS_WRITE(SCALER_DISPCTRL,
+ reg | VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX));
+
+ reg = HVS_READ(SCALER_DISPEOLN);
+ reg &= ~SCALER_DISPEOLN_DSP4_MUX_MASK;
+ HVS_WRITE(SCALER_DISPEOLN,
+ reg | VC4_SET_FIELD(3, SCALER_DISPEOLN_DSP4_MUX));
+
+ reg = HVS_READ(SCALER_DISPDITHER);
+ reg &= ~SCALER_DISPDITHER_DSP5_MUX_MASK;
+ HVS_WRITE(SCALER_DISPDITHER,
+ reg | VC4_SET_FIELD(3, SCALER_DISPDITHER_DSP5_MUX));
+
dispctrl = HVS_READ(SCALER_DISPCTRL);
dispctrl |= SCALER_DISPCTRL_ENABLE;
@@ -689,10 +710,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
SCALER_DISPCTRL_DISPEIRQ(1) |
SCALER_DISPCTRL_DISPEIRQ(2);
- /* Set DSP3 (PV1) to use HVS channel 2, which would otherwise
- * be unused.
- */
- dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
SCALER_DISPCTRL_SLVWREIRQ |
SCALER_DISPCTRL_SLVRDEIRQ |
@@ -706,7 +723,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
SCALER_DISPCTRL_DSPEISLUR(1) |
SCALER_DISPCTRL_DSPEISLUR(2) |
SCALER_DISPCTRL_SCLEIRQ);
- dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 747a2d199eca..c169bd72e53b 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -283,13 +283,18 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ unsigned int channel = vc4_state->assigned_channel;
if (!vc4_state->update_muxing)
continue;
switch (vc4_crtc->data->hvs_output) {
case 2:
- mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
+ drm_WARN_ON(&vc4->base,
+ VC4_GET_FIELD(HVS_READ(SCALER_DISPCTRL),
+ SCALER_DISPCTRL_DSP3_MUX) == channel);
+
+ mux = (channel == 2) ? 0 : 1;
reg = HVS_READ(SCALER_DISPECTRL);
HVS_WRITE(SCALER_DISPECTRL,
(reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
@@ -297,10 +302,10 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
break;
case 3:
- if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ if (channel == VC4_HVS_CHANNEL_DISABLED)
mux = 3;
else
- mux = vc4_state->assigned_channel;
+ mux = channel;
reg = HVS_READ(SCALER_DISPCTRL);
HVS_WRITE(SCALER_DISPCTRL,
@@ -309,10 +314,10 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
break;
case 4:
- if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ if (channel == VC4_HVS_CHANNEL_DISABLED)
mux = 3;
else
- mux = vc4_state->assigned_channel;
+ mux = channel;
reg = HVS_READ(SCALER_DISPEOLN);
HVS_WRITE(SCALER_DISPEOLN,
@@ -322,10 +327,10 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
break;
case 5:
- if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ if (channel == VC4_HVS_CHANNEL_DISABLED)
mux = 3;
else
- mux = vc4_state->assigned_channel;
+ mux = channel;
reg = HVS_READ(SCALER_DISPDITHER);
HVS_WRITE(SCALER_DISPDITHER,
@@ -434,6 +439,9 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
* requirements.
*/
clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate);
+
+ drm_dbg(dev, "Core clock actual rate: %lu Hz\n",
+ clk_get_rate(hvs->core_clk));
}
}
@@ -817,9 +825,18 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
unsigned int matching_channels;
unsigned int channel;
+ drm_dbg(dev, "%s: Trying to find a channel.\n", crtc->name);
+
/* Nothing to do here, let's skip it */
- if (old_crtc_state->enable == new_crtc_state->enable)
+ if (old_crtc_state->enable == new_crtc_state->enable) {
+ if (new_crtc_state->enable)
+ drm_dbg(dev, "%s: Already enabled, reusing channel %d.\n",
+ crtc->name, new_vc4_crtc_state->assigned_channel);
+ else
+ drm_dbg(dev, "%s: Disabled, ignoring.\n", crtc->name);
+
continue;
+ }
/* Muxing will need to be modified, mark it as such */
new_vc4_crtc_state->update_muxing = true;
@@ -827,6 +844,10 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
/* If we're disabling our CRTC, we put back our channel */
if (!new_crtc_state->enable) {
channel = old_vc4_crtc_state->assigned_channel;
+
+ drm_dbg(dev, "%s: Disabling, Freeing channel %d\n",
+ crtc->name, channel);
+
hvs_new_state->fifo_state[channel].in_use = false;
new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
continue;
@@ -861,6 +882,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
return -EINVAL;
channel = ffs(matching_channels) - 1;
+
+ drm_dbg(dev, "Assigned HVS channel %d to CRTC %s\n", channel, crtc->name);
new_vc4_crtc_state->assigned_channel = channel;
unassigned_channels &= ~BIT(channel);
hvs_new_state->fifo_state[channel].in_use = true;
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 9809ca3e2945..82beb8c159f2 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -298,12 +298,18 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
if (WARN_ON(i == ARRAY_SIZE(drm_fmts)))
return;
- ctrl = TXP_GO | TXP_VSTART_AT_EOF | TXP_EI |
+ ctrl = TXP_GO | TXP_EI |
VC4_SET_FIELD(0xf, TXP_BYTE_ENABLE) |
VC4_SET_FIELD(txp_fmts[i], TXP_FORMAT);
if (fb->format->has_alpha)
ctrl |= TXP_ALPHA_ENABLE;
+ else
+ /*
+ * If TXP_ALPHA_ENABLE isn't set and TXP_ALPHA_INVERT is, the
+ * hardware will force the output padding to be 0xff.
+ */
+ ctrl |= TXP_ALPHA_INVERT;
gem = drm_fb_cma_get_gem_obj(fb, 0);
TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]);
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 2ddbebca87d9..c2a879734d40 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -130,6 +130,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
struct vgem_file *vfile = file->driver_priv;
struct dma_resv *resv;
struct drm_gem_object *obj;
+ enum dma_resv_usage usage;
struct dma_fence *fence;
int ret;
@@ -151,7 +152,8 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
/* Check for a conflicting fence */
resv = obj->resv;
- if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) {
+ usage = dma_resv_usage_rw(arg->flags & VGEM_FENCE_WRITE);
+ if (!dma_resv_test_signaled(resv, usage)) {
ret = -EBUSY;
goto err_fence;
}
@@ -159,12 +161,9 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
/* Expose the fence via the dma-buf */
dma_resv_lock(resv, NULL);
ret = dma_resv_reserve_fences(resv, 1);
- if (!ret) {
- if (arg->flags & VGEM_FENCE_WRITE)
- dma_resv_add_excl_fence(resv, fence);
- else
- dma_resv_add_shared_fence(resv, fence);
- }
+ if (!ret)
+ dma_resv_add_fence(resv, fence, arg->flags & VGEM_FENCE_WRITE ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
dma_resv_unlock(resv);
/* Record the fence in our idr for later signaling */
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 1820ca6cf673..580a78809836 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -250,7 +250,8 @@ void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs,
int i;
for (i = 0; i < objs->nents; i++)
- dma_resv_add_excl_fence(objs->objs[i]->resv, fence);
+ dma_resv_add_fence(objs->objs[i]->resv, fence,
+ DMA_RESV_USAGE_WRITE);
}
void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 77743fd2c61a..f8d83358d2a0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -518,9 +518,10 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
if (args->flags & VIRTGPU_WAIT_NOWAIT) {
- ret = dma_resv_test_signaled(obj->resv, true);
+ ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
} else {
- ret = dma_resv_wait_timeout(obj->resv, true, true, timeout);
+ ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ,
+ true, timeout);
}
if (ret == 0)
ret = -EBUSY;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index fe13aa8b4a64..408ede1f967f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -528,8 +528,8 @@ static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo,
if (flags & drm_vmw_synccpu_allow_cs) {
long lret;
- lret = dma_resv_wait_timeout(bo->base.resv, true, true,
- nonblock ? 0 :
+ lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
+ true, nonblock ? 0 :
MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
@@ -758,7 +758,8 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (!ret)
- dma_resv_add_excl_fence(bo->base.resv, &fence->base);
+ dma_resv_add_fence(bo->base.resv, &fence->base,
+ DMA_RESV_USAGE_KERNEL);
else
/* Last resort fallback when we are OOM */
dma_fence_wait(&fence->base, false);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 791f9a5f3868..ffd975c14136 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1385,6 +1385,23 @@ static void vmw_remove(struct pci_dev *pdev)
vmw_driver_unload(dev);
}
+static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
+{
+ struct drm_minor *minor = vmw->drm.primary;
+ struct dentry *root = minor->debugfs_root;
+
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_SYSTEM),
+ root, "system_ttm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM),
+ root, "vram_ttm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
+ root, "gmr_ttm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
+ root, "mob_ttm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
+ root, "system_mob_ttm");
+}
+
static unsigned long
vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
unsigned long len, unsigned long pgoff,
@@ -1632,6 +1649,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_unload;
vmw_debugfs_gem_init(vmw);
+ vmw_debugfs_resource_managers_init(vmw);
return 0;
out_unload:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 626067104751..a7d62a4eb47b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1161,11 +1161,6 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
PAGE_SIZE);
vmw_bo_fence_single(bo, NULL);
- if (bo->moving)
- dma_fence_put(bo->moving);
-
- return dma_resv_get_singleton(bo->base.resv, false,
- &bo->moving);
}
return 0;
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index d32cd7538835..fce80a4a5147 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -67,7 +67,8 @@ wait_fence:
* may be not up-to-date. Wait for the exporter to finish
* the migration.
*/
- return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, false,
+ return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL,
false, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index a16b74f32aa9..176ed71d20c0 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -507,7 +507,6 @@ int of_platform_default_populate(struct device_node *root,
}
EXPORT_SYMBOL_GPL(of_platform_default_populate);
-#ifndef CONFIG_PPC
static const struct of_device_id reserved_mem_matches[] = {
{ .compatible = "qcom,rmtfs-mem" },
{ .compatible = "qcom,cmd-db" },
@@ -527,26 +526,73 @@ static int __init of_platform_default_populate_init(void)
if (!of_have_populated_dt())
return -ENODEV;
- /*
- * Handle certain compatibles explicitly, since we don't want to create
- * platform_devices for every node in /reserved-memory with a
- * "compatible",
- */
- for_each_matching_node(node, reserved_mem_matches)
- of_platform_device_create(node, NULL, NULL);
+ if (IS_ENABLED(CONFIG_PPC)) {
+ struct device_node *boot_display = NULL;
+ struct platform_device *dev;
+ int ret;
+
+ /* Check if we have a MacOS display without a node spec */
+ if (of_get_property(of_chosen, "linux,bootx-noscreen", NULL)) {
+ /*
+ * The old code tried to work out which node was the MacOS
+ * display based on the address. I'm dropping that since the
+ * lack of a node spec only happens with old BootX versions
+ * (users can update) and with this code, they'll still get
+ * a display (just not the palette hacks).
+ */
+ dev = platform_device_alloc("bootx-noscreen", 0);
+ if (WARN_ON(!dev))
+ return -ENOMEM;
+ ret = platform_device_add(dev);
+ if (WARN_ON(ret)) {
+ platform_device_put(dev);
+ return ret;
+ }
+ }
- node = of_find_node_by_path("/firmware");
- if (node) {
- of_platform_populate(node, NULL, NULL, NULL);
- of_node_put(node);
- }
+ /*
+ * For OF framebuffers, first create the device for the boot display,
+ * then for the other framebuffers. Only fail for the boot display;
+ * ignore errors for the rest.
+ */
+ for_each_node_by_type(node, "display") {
+ if (!of_get_property(node, "linux,opened", NULL) ||
+ !of_get_property(node, "linux,boot-display", NULL))
+ continue;
+ dev = of_platform_device_create(node, "of-display", NULL);
+ if (WARN_ON(!dev))
+ return -ENOMEM;
+ boot_display = node;
+ break;
+ }
+ for_each_node_by_type(node, "display") {
+ if (!of_get_property(node, "linux,opened", NULL) || node == boot_display)
+ continue;
+ of_platform_device_create(node, "of-display", NULL);
+ }
- node = of_get_compatible_child(of_chosen, "simple-framebuffer");
- of_platform_device_create(node, NULL, NULL);
- of_node_put(node);
+ } else {
+ /*
+ * Handle certain compatibles explicitly, since we don't want to create
+ * platform_devices for every node in /reserved-memory with a
+ * "compatible",
+ */
+ for_each_matching_node(node, reserved_mem_matches)
+ of_platform_device_create(node, NULL, NULL);
+
+ node = of_find_node_by_path("/firmware");
+ if (node) {
+ of_platform_populate(node, NULL, NULL, NULL);
+ of_node_put(node);
+ }
- /* Populate everything else. */
- of_platform_default_populate(NULL, NULL, NULL);
+ node = of_get_compatible_child(of_chosen, "simple-framebuffer");
+ of_platform_device_create(node, NULL, NULL);
+ of_node_put(node);
+
+ /* Populate everything else. */
+ of_platform_default_populate(NULL, NULL, NULL);
+ }
return 0;
}
@@ -558,7 +604,6 @@ static int __init of_platform_sync_state_init(void)
return 0;
}
late_initcall_sync(of_platform_sync_state_init);
-#endif
int of_platform_device_destroy(struct device *dev, void *data)
{
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index d8589afac272..c4d60cdbf247 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -656,17 +656,12 @@ struct mdev_driver vfio_ccw_mdev_driver = {
},
.probe = vfio_ccw_mdev_probe,
.remove = vfio_ccw_mdev_remove,
-};
-
-static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
- .owner = THIS_MODULE,
- .device_driver = &vfio_ccw_mdev_driver,
.supported_type_groups = mdev_type_groups,
};
int vfio_ccw_mdev_reg(struct subchannel *sch)
{
- return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
+ return mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
}
void vfio_ccw_mdev_unreg(struct subchannel *sch)
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 6e08d04b605d..ee0a3bf8f476 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -1496,12 +1496,7 @@ static struct mdev_driver vfio_ap_matrix_driver = {
},
.probe = vfio_ap_mdev_probe,
.remove = vfio_ap_mdev_remove,
-};
-
-static const struct mdev_parent_ops vfio_ap_matrix_ops = {
- .owner = THIS_MODULE,
- .device_driver = &vfio_ap_matrix_driver,
- .supported_type_groups = vfio_ap_mdev_type_groups,
+ .supported_type_groups = vfio_ap_mdev_type_groups,
};
int vfio_ap_mdev_register(void)
@@ -1514,7 +1509,7 @@ int vfio_ap_mdev_register(void)
if (ret)
return ret;
- ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
+ ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_driver);
if (ret)
goto err_driver;
return 0;
diff --git a/drivers/vfio/mdev/Makefile b/drivers/vfio/mdev/Makefile
index ff9ecd802125..7c236ba1b90e 100644
--- a/drivers/vfio/mdev/Makefile
+++ b/drivers/vfio/mdev/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o vfio_mdev.o
+mdev-y := mdev_core.o mdev_sysfs.o mdev_driver.o
obj-$(CONFIG_VFIO_MDEV) += mdev.o
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index b314101237fe..b8b9e7911e55 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -89,17 +89,10 @@ void mdev_release_parent(struct kref *kref)
static void mdev_device_remove_common(struct mdev_device *mdev)
{
struct mdev_parent *parent = mdev->type->parent;
- int ret;
mdev_remove_sysfs_files(mdev);
device_del(&mdev->dev);
lockdep_assert_held(&parent->unreg_sem);
- if (parent->ops->remove) {
- ret = parent->ops->remove(mdev);
- if (ret)
- dev_err(&mdev->dev, "Remove failed: err=%d\n", ret);
- }
-
/* Balances with device_initialize() */
put_device(&mdev->dev);
}
@@ -116,12 +109,12 @@ static int mdev_device_remove_cb(struct device *dev, void *data)
/*
* mdev_register_device : Register a device
* @dev: device structure representing parent device.
- * @ops: Parent device operation structure to be registered.
+ * @mdev_driver: Device driver to bind to the newly created mdev
*
* Add device to list of registered parent devices.
* Returns a negative value on error, otherwise 0.
*/
-int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
+int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver)
{
int ret;
struct mdev_parent *parent;
@@ -129,9 +122,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
char *envp[] = { env_string, NULL };
/* check for mandatory ops */
- if (!ops || !ops->supported_type_groups)
- return -EINVAL;
- if (!ops->device_driver && (!ops->create || !ops->remove))
+ if (!mdev_driver->supported_type_groups)
return -EINVAL;
dev = get_device(dev);
@@ -158,7 +149,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
init_rwsem(&parent->unreg_sem);
parent->dev = dev;
- parent->ops = ops;
+ parent->mdev_driver = mdev_driver;
if (!mdev_bus_compat_class) {
mdev_bus_compat_class = class_compat_register("mdev_bus");
@@ -256,7 +247,7 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
int ret;
struct mdev_device *mdev, *tmp;
struct mdev_parent *parent = type->parent;
- struct mdev_driver *drv = parent->ops->device_driver;
+ struct mdev_driver *drv = parent->mdev_driver;
mutex_lock(&mdev_list_lock);
@@ -278,7 +269,7 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
mdev->dev.parent = parent->dev;
mdev->dev.bus = &mdev_bus_type;
mdev->dev.release = mdev_device_release;
- mdev->dev.groups = parent->ops->mdev_attr_groups;
+ mdev->dev.groups = mdev_device_groups;
mdev->type = type;
/* Pairs with the put in mdev_device_release() */
kobject_get(&type->kobj);
@@ -297,18 +288,10 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
goto out_put_device;
}
- if (parent->ops->create) {
- ret = parent->ops->create(mdev);
- if (ret)
- goto out_unlock;
- }
-
ret = device_add(&mdev->dev);
if (ret)
- goto out_remove;
+ goto out_unlock;
- if (!drv)
- drv = &vfio_mdev_driver;
ret = device_driver_attach(&drv->driver, &mdev->dev);
if (ret)
goto out_del;
@@ -325,9 +308,6 @@ int mdev_device_create(struct mdev_type *type, const guid_t *uuid)
out_del:
device_del(&mdev->dev);
-out_remove:
- if (parent->ops->remove)
- parent->ops->remove(mdev);
out_unlock:
up_read(&parent->unreg_sem);
out_put_device:
@@ -370,28 +350,14 @@ int mdev_device_remove(struct mdev_device *mdev)
static int __init mdev_init(void)
{
- int rc;
-
- rc = mdev_bus_register();
- if (rc)
- return rc;
- rc = mdev_register_driver(&vfio_mdev_driver);
- if (rc)
- goto err_bus;
- return 0;
-err_bus:
- mdev_bus_unregister();
- return rc;
+ return bus_register(&mdev_bus_type);
}
static void __exit mdev_exit(void)
{
- mdev_unregister_driver(&vfio_mdev_driver);
-
if (mdev_bus_compat_class)
class_compat_unregister(mdev_bus_compat_class);
-
- mdev_bus_unregister();
+ bus_unregister(&mdev_bus_type);
}
subsys_initcall(mdev_init)
diff --git a/drivers/vfio/mdev/mdev_driver.c b/drivers/vfio/mdev/mdev_driver.c
index 7927ed4f1711..9c2af59809e2 100644
--- a/drivers/vfio/mdev/mdev_driver.c
+++ b/drivers/vfio/mdev/mdev_driver.c
@@ -74,13 +74,3 @@ void mdev_unregister_driver(struct mdev_driver *drv)
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(mdev_unregister_driver);
-
-int mdev_bus_register(void)
-{
- return bus_register(&mdev_bus_type);
-}
-
-void mdev_bus_unregister(void)
-{
- bus_unregister(&mdev_bus_type);
-}
diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h
index afbad7b0a14a..7c9fc79f3d83 100644
--- a/drivers/vfio/mdev/mdev_private.h
+++ b/drivers/vfio/mdev/mdev_private.h
@@ -15,7 +15,7 @@ void mdev_bus_unregister(void);
struct mdev_parent {
struct device *dev;
- const struct mdev_parent_ops *ops;
+ struct mdev_driver *mdev_driver;
struct kref ref;
struct list_head next;
struct kset *mdev_types_kset;
@@ -32,13 +32,13 @@ struct mdev_type {
unsigned int type_group_id;
};
+extern const struct attribute_group *mdev_device_groups[];
+
#define to_mdev_type_attr(_attr) \
container_of(_attr, struct mdev_type_attribute, attr)
#define to_mdev_type(_kobj) \
container_of(_kobj, struct mdev_type, kobj)
-extern struct mdev_driver vfio_mdev_driver;
-
int parent_create_sysfs_files(struct mdev_parent *parent);
void parent_remove_sysfs_files(struct mdev_parent *parent);
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index f5cf1931c54e..0ccfeb3dda24 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -97,7 +97,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
{
struct mdev_type *type;
struct attribute_group *group =
- parent->ops->supported_type_groups[type_group_id];
+ parent->mdev_driver->supported_type_groups[type_group_id];
int ret;
if (!group->name) {
@@ -154,7 +154,7 @@ attr_create_failed:
static void remove_mdev_supported_type(struct mdev_type *type)
{
struct attribute_group *group =
- type->parent->ops->supported_type_groups[type->type_group_id];
+ type->parent->mdev_driver->supported_type_groups[type->type_group_id];
sysfs_remove_files(&type->kobj,
(const struct attribute **)group->attrs);
@@ -168,7 +168,7 @@ static int add_mdev_supported_type_groups(struct mdev_parent *parent)
{
int i;
- for (i = 0; parent->ops->supported_type_groups[i]; i++) {
+ for (i = 0; parent->mdev_driver->supported_type_groups[i]; i++) {
struct mdev_type *type;
type = add_mdev_supported_type(parent, i);
@@ -197,7 +197,6 @@ void parent_remove_sysfs_files(struct mdev_parent *parent)
remove_mdev_supported_type(type);
}
- sysfs_remove_groups(&parent->dev->kobj, parent->ops->dev_attr_groups);
kset_unregister(parent->mdev_types_kset);
}
@@ -213,17 +212,10 @@ int parent_create_sysfs_files(struct mdev_parent *parent)
INIT_LIST_HEAD(&parent->type_list);
- ret = sysfs_create_groups(&parent->dev->kobj,
- parent->ops->dev_attr_groups);
- if (ret)
- goto create_err;
-
ret = add_mdev_supported_type_groups(parent);
if (ret)
- sysfs_remove_groups(&parent->dev->kobj,
- parent->ops->dev_attr_groups);
- else
- return ret;
+ goto create_err;
+ return 0;
create_err:
kset_unregister(parent->mdev_types_kset);
@@ -252,11 +244,20 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_WO(remove);
-static const struct attribute *mdev_device_attrs[] = {
+static struct attribute *mdev_device_attrs[] = {
&dev_attr_remove.attr,
NULL,
};
+static const struct attribute_group mdev_device_group = {
+ .attrs = mdev_device_attrs,
+};
+
+const struct attribute_group *mdev_device_groups[] = {
+ &mdev_device_group,
+ NULL
+};
+
int mdev_create_sysfs_files(struct mdev_device *mdev)
{
struct mdev_type *type = mdev->type;
@@ -270,15 +271,8 @@ int mdev_create_sysfs_files(struct mdev_device *mdev)
ret = sysfs_create_link(kobj, &type->kobj, "mdev_type");
if (ret)
goto type_link_failed;
-
- ret = sysfs_create_files(kobj, mdev_device_attrs);
- if (ret)
- goto create_files_failed;
-
return ret;
-create_files_failed:
- sysfs_remove_link(kobj, "mdev_type");
type_link_failed:
sysfs_remove_link(mdev->type->devices_kobj, dev_name(&mdev->dev));
return ret;
@@ -288,7 +282,6 @@ void mdev_remove_sysfs_files(struct mdev_device *mdev)
{
struct kobject *kobj = &mdev->dev.kobj;
- sysfs_remove_files(kobj, mdev_device_attrs);
sysfs_remove_link(kobj, "mdev_type");
sysfs_remove_link(mdev->type->devices_kobj, dev_name(&mdev->dev));
}
diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c
deleted file mode 100644
index a90e24b0c851..000000000000
--- a/drivers/vfio/mdev/vfio_mdev.c
+++ /dev/null
@@ -1,152 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * VFIO based driver for Mediated device
- *
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
- * Author: Neo Jia <cjia@nvidia.com>
- * Kirti Wankhede <kwankhede@nvidia.com>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/vfio.h>
-#include <linux/mdev.h>
-
-#include "mdev_private.h"
-
-static int vfio_mdev_open_device(struct vfio_device *core_vdev)
-{
- struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
- struct mdev_parent *parent = mdev->type->parent;
-
- if (unlikely(!parent->ops->open_device))
- return 0;
-
- return parent->ops->open_device(mdev);
-}
-
-static void vfio_mdev_close_device(struct vfio_device *core_vdev)
-{
- struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
- struct mdev_parent *parent = mdev->type->parent;
-
- if (likely(parent->ops->close_device))
- parent->ops->close_device(mdev);
-}
-
-static long vfio_mdev_unlocked_ioctl(struct vfio_device *core_vdev,
- unsigned int cmd, unsigned long arg)
-{
- struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
- struct mdev_parent *parent = mdev->type->parent;
-
- if (unlikely(!parent->ops->ioctl))
- return 0;
-
- return parent->ops->ioctl(mdev, cmd, arg);
-}
-
-static ssize_t vfio_mdev_read(struct vfio_device *core_vdev, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
- struct mdev_parent *parent = mdev->type->parent;
-
- if (unlikely(!parent->ops->read))
- return -EINVAL;
-
- return parent->ops->read(mdev, buf, count, ppos);
-}
-
-static ssize_t vfio_mdev_write(struct vfio_device *core_vdev,
- const char __user *buf, size_t count,
- loff_t *ppos)
-{
- struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
- struct mdev_parent *parent = mdev->type->parent;
-
- if (unlikely(!parent->ops->write))
- return -EINVAL;
-
- return parent->ops->write(mdev, buf, count, ppos);
-}
-
-static int vfio_mdev_mmap(struct vfio_device *core_vdev,
- struct vm_area_struct *vma)
-{
- struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
- struct mdev_parent *parent = mdev->type->parent;
-
- if (unlikely(!parent->ops->mmap))
- return -EINVAL;
-
- return parent->ops->mmap(mdev, vma);
-}
-
-static void vfio_mdev_request(struct vfio_device *core_vdev, unsigned int count)
-{
- struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
- struct mdev_parent *parent = mdev->type->parent;
-
- if (parent->ops->request)
- parent->ops->request(mdev, count);
- else if (count == 0)
- dev_notice(mdev_dev(mdev),
- "No mdev vendor driver request callback support, blocked until released by user\n");
-}
-
-static const struct vfio_device_ops vfio_mdev_dev_ops = {
- .name = "vfio-mdev",
- .open_device = vfio_mdev_open_device,
- .close_device = vfio_mdev_close_device,
- .ioctl = vfio_mdev_unlocked_ioctl,
- .read = vfio_mdev_read,
- .write = vfio_mdev_write,
- .mmap = vfio_mdev_mmap,
- .request = vfio_mdev_request,
-};
-
-static int vfio_mdev_probe(struct mdev_device *mdev)
-{
- struct vfio_device *vdev;
- int ret;
-
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return -ENOMEM;
-
- vfio_init_group_dev(vdev, &mdev->dev, &vfio_mdev_dev_ops);
- ret = vfio_register_emulated_iommu_dev(vdev);
- if (ret)
- goto out_uninit;
-
- dev_set_drvdata(&mdev->dev, vdev);
- return 0;
-
-out_uninit:
- vfio_uninit_group_dev(vdev);
- kfree(vdev);
- return ret;
-}
-
-static void vfio_mdev_remove(struct mdev_device *mdev)
-{
- struct vfio_device *vdev = dev_get_drvdata(&mdev->dev);
-
- vfio_unregister_group_dev(vdev);
- vfio_uninit_group_dev(vdev);
- kfree(vdev);
-}
-
-struct mdev_driver vfio_mdev_driver = {
- .driver = {
- .name = "vfio_mdev",
- .owner = THIS_MODULE,
- .mod_name = KBUILD_MODNAME,
- },
- .probe = vfio_mdev_probe,
- .remove = vfio_mdev_remove,
-};
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 2fc1b80a26ad..c4e91715ef00 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -86,10 +86,6 @@
* - fbcon state itself is protected by the console_lock, and the code does a
* pretty good job at making sure that lock is held everywhere it's needed.
*
- * - access to the registered_fb array is entirely unprotected. This should use
- * proper object lifetime handling, i.e. get/put_fb_info. This also means
- * switching from indices to proper pointers for fb_info everywhere.
- *
* - fbcon doesn't bother with fb_lock/unlock at all. This is buggy, since it
* means concurrent access to the same fbdev from both fbcon and userspace
* will blow up. To fix this all fbcon calls from fbmem.c need to be moved out
@@ -107,9 +103,23 @@ enum {
static struct fbcon_display fb_display[MAX_NR_CONSOLES];
+struct fb_info *fbcon_registered_fb[FB_MAX];
+int fbcon_num_registered_fb;
+
+#define fbcon_for_each_registered_fb(i) \
+ for (i = 0; WARN_CONSOLE_UNLOCKED(), i < FB_MAX; i++) \
+ if (!fbcon_registered_fb[i]) {} else
+
static signed char con2fb_map[MAX_NR_CONSOLES];
static signed char con2fb_map_boot[MAX_NR_CONSOLES];
+static struct fb_info *fbcon_info_from_console(int console)
+{
+ WARN_CONSOLE_UNLOCKED();
+
+ return fbcon_registered_fb[con2fb_map[console]];
+}
+
static int logo_lines;
/* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO
enums. */
@@ -163,39 +173,19 @@ static int fbcon_cursor_noblink;
* Interface used by the world
*/
-static const char *fbcon_startup(void);
-static void fbcon_init(struct vc_data *vc, int init);
-static void fbcon_deinit(struct vc_data *vc);
-static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
- int width);
-static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos);
-static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos);
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only);
-static void fbcon_cursor(struct vc_data *vc, int mode);
-static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
- int height, int width);
-static int fbcon_switch(struct vc_data *vc);
-static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch);
static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
/*
* Internal routines
*/
-static __inline__ void ywrap_up(struct vc_data *vc, int count);
-static __inline__ void ywrap_down(struct vc_data *vc, int count);
-static __inline__ void ypan_up(struct vc_data *vc, int count);
-static __inline__ void ypan_down(struct vc_data *vc, int count);
-static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
- int dy, int dx, int height, int width, u_int y_break);
static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
int unit);
static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
int line, int count, int dy);
static void fbcon_modechanged(struct fb_info *info);
static void fbcon_set_all_vcs(struct fb_info *info);
-static void fbcon_start(void);
-static void fbcon_exit(void);
+
static struct device *fbcon_device;
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_ROTATION
@@ -218,7 +208,7 @@ static void fbcon_rotate(struct fb_info *info, u32 rotate)
if (!ops || ops->currcon == -1)
return;
- fb_info = registered_fb[con2fb_map[ops->currcon]];
+ fb_info = fbcon_info_from_console(ops->currcon);
if (info == fb_info) {
struct fbcon_display *p = &fb_display[ops->currcon];
@@ -245,7 +235,7 @@ static void fbcon_rotate_all(struct fb_info *info, u32 rotate)
for (i = first_fb_vc; i <= last_fb_vc; i++) {
vc = vc_cons[i].d;
if (!vc || vc->vc_mode != KD_TEXT ||
- registered_fb[con2fb_map[i]] != info)
+ fbcon_info_from_console(i) != info)
continue;
p = &fb_display[vc->vc_num];
@@ -357,8 +347,8 @@ static int get_color(struct vc_data *vc, struct fb_info *info,
static void fb_flashcursor(struct work_struct *work)
{
- struct fb_info *info = container_of(work, struct fb_info, queue);
- struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_ops *ops = container_of(work, struct fbcon_ops, cursor_work.work);
+ struct fb_info *info;
struct vc_data *vc = NULL;
int c;
int mode;
@@ -371,11 +361,14 @@ static void fb_flashcursor(struct work_struct *work)
if (ret == 0)
return;
- if (ops && ops->currcon != -1)
+ /* protected by console_lock */
+ info = ops->info;
+
+ if (ops->currcon != -1)
vc = vc_cons[ops->currcon].d;
if (!vc || !con_is_visible(vc) ||
- registered_fb[con2fb_map[vc->vc_num]] != info ||
+ fbcon_info_from_console(vc->vc_num) != info ||
vc->vc_deccm != 1) {
console_unlock();
return;
@@ -387,42 +380,25 @@ static void fb_flashcursor(struct work_struct *work)
ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
console_unlock();
-}
-static void cursor_timer_handler(struct timer_list *t)
-{
- struct fbcon_ops *ops = from_timer(ops, t, cursor_timer);
- struct fb_info *info = ops->info;
-
- queue_work(system_power_efficient_wq, &info->queue);
- mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies);
+ queue_delayed_work(system_power_efficient_wq, &ops->cursor_work,
+ ops->cur_blink_jiffies);
}
-static void fbcon_add_cursor_timer(struct fb_info *info)
+static void fbcon_add_cursor_work(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
- if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
- !(ops->flags & FBCON_FLAGS_CURSOR_TIMER) &&
- !fbcon_cursor_noblink) {
- if (!info->queue.func)
- INIT_WORK(&info->queue, fb_flashcursor);
-
- timer_setup(&ops->cursor_timer, cursor_timer_handler, 0);
- mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies);
- ops->flags |= FBCON_FLAGS_CURSOR_TIMER;
- }
+ if (!fbcon_cursor_noblink)
+ queue_delayed_work(system_power_efficient_wq, &ops->cursor_work,
+ ops->cur_blink_jiffies);
}
-static void fbcon_del_cursor_timer(struct fb_info *info)
+static void fbcon_del_cursor_work(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
- if (info->queue.func == fb_flashcursor &&
- ops->flags & FBCON_FLAGS_CURSOR_TIMER) {
- del_timer_sync(&ops->cursor_timer);
- ops->flags &= ~FBCON_FLAGS_CURSOR_TIMER;
- }
+ cancel_delayed_work_sync(&ops->cursor_work);
}
#ifndef MODULE
@@ -540,7 +516,7 @@ static int do_fbcon_takeover(int show_logo)
{
int err, i;
- if (!num_registered_fb)
+ if (!fbcon_num_registered_fb)
return -ENODEV;
if (!show_logo)
@@ -602,7 +578,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
save = kmalloc(array3_size(logo_lines, new_cols, 2),
GFP_KERNEL);
if (save) {
- int i = cols < new_cols ? cols : new_cols;
+ int i = min(cols, new_cols);
scr_memsetw(save, erase, array3_size(logo_lines, new_cols, 2));
r = q - step;
for (cnt = 0; cnt < logo_lines; cnt++, r += i)
@@ -703,87 +679,95 @@ static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount)
#endif /* CONFIG_MISC_TILEBLITTING */
-
-static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
- int unit, int oldidx)
+static void fbcon_release(struct fb_info *info)
{
- struct fbcon_ops *ops = NULL;
- int err = 0;
+ lock_fb_info(info);
+ if (info->fbops->fb_release)
+ info->fbops->fb_release(info, 0);
+ unlock_fb_info(info);
- if (!try_module_get(info->fbops->owner))
- err = -ENODEV;
+ module_put(info->fbops->owner);
- if (!err && info->fbops->fb_open &&
- info->fbops->fb_open(info, 0))
- err = -ENODEV;
+ if (info->fbcon_par) {
+ struct fbcon_ops *ops = info->fbcon_par;
- if (!err) {
- ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
- if (!ops)
- err = -ENOMEM;
+ fbcon_del_cursor_work(info);
+ kfree(ops->cursor_state.mask);
+ kfree(ops->cursor_data);
+ kfree(ops->cursor_src);
+ kfree(ops->fontbuffer);
+ kfree(info->fbcon_par);
+ info->fbcon_par = NULL;
}
+}
- if (!err) {
- ops->cur_blink_jiffies = HZ / 5;
- ops->info = info;
- info->fbcon_par = ops;
+static int fbcon_open(struct fb_info *info)
+{
+ struct fbcon_ops *ops;
- if (vc)
- set_blitting_type(vc, info);
- }
+ if (!try_module_get(info->fbops->owner))
+ return -ENODEV;
- if (err) {
- con2fb_map[unit] = oldidx;
+ lock_fb_info(info);
+ if (info->fbops->fb_open &&
+ info->fbops->fb_open(info, 0)) {
+ unlock_fb_info(info);
module_put(info->fbops->owner);
+ return -ENODEV;
+ }
+ unlock_fb_info(info);
+
+ ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
+ if (!ops) {
+ fbcon_release(info);
+ return -ENOMEM;
}
+ INIT_DELAYED_WORK(&ops->cursor_work, fb_flashcursor);
+ ops->info = info;
+ info->fbcon_par = ops;
+ ops->cur_blink_jiffies = HZ / 5;
+
+ return 0;
+}
+
+static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
+ int unit)
+{
+ int err;
+
+ err = fbcon_open(info);
+ if (err)
+ return err;
+
+ if (vc)
+ set_blitting_type(vc, info);
+
return err;
}
-static int con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo,
- struct fb_info *newinfo, int unit,
- int oldidx, int found)
+static void con2fb_release_oldinfo(struct vc_data *vc, struct fb_info *oldinfo,
+ struct fb_info *newinfo)
{
- struct fbcon_ops *ops = oldinfo->fbcon_par;
- int err = 0, ret;
+ int ret;
- if (oldinfo->fbops->fb_release &&
- oldinfo->fbops->fb_release(oldinfo, 0)) {
- con2fb_map[unit] = oldidx;
- if (!found && newinfo->fbops->fb_release)
- newinfo->fbops->fb_release(newinfo, 0);
- if (!found)
- module_put(newinfo->fbops->owner);
- err = -ENODEV;
- }
+ fbcon_release(oldinfo);
- if (!err) {
- fbcon_del_cursor_timer(oldinfo);
- kfree(ops->cursor_state.mask);
- kfree(ops->cursor_data);
- kfree(ops->cursor_src);
- kfree(ops->fontbuffer);
- kfree(oldinfo->fbcon_par);
- oldinfo->fbcon_par = NULL;
- module_put(oldinfo->fbops->owner);
- /*
- If oldinfo and newinfo are driving the same hardware,
- the fb_release() method of oldinfo may attempt to
- restore the hardware state. This will leave the
- newinfo in an undefined state. Thus, a call to
- fb_set_par() may be needed for the newinfo.
- */
- if (newinfo && newinfo->fbops->fb_set_par) {
- ret = newinfo->fbops->fb_set_par(newinfo);
+ /*
+ If oldinfo and newinfo are driving the same hardware,
+ the fb_release() method of oldinfo may attempt to
+ restore the hardware state. This will leave the
+ newinfo in an undefined state. Thus, a call to
+ fb_set_par() may be needed for the newinfo.
+ */
+ if (newinfo && newinfo->fbops->fb_set_par) {
+ ret = newinfo->fbops->fb_set_par(newinfo);
- if (ret)
- printk(KERN_ERR "con2fb_release_oldinfo: "
- "detected unhandled fb_set_par error, "
- "error code %d\n", ret);
- }
+ if (ret)
+ printk(KERN_ERR "con2fb_release_oldinfo: "
+ "detected unhandled fb_set_par error, "
+ "error code %d\n", ret);
}
-
- return err;
}
static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
@@ -794,7 +778,7 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
ops->currcon = fg_console;
- if (info->fbops->fb_set_par && !(ops->flags & FBCON_FLAGS_INIT)) {
+ if (info->fbops->fb_set_par && !ops->initialized) {
ret = info->fbops->fb_set_par(info);
if (ret)
@@ -803,14 +787,14 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
"error code %d\n", ret);
}
- ops->flags |= FBCON_FLAGS_INIT;
+ ops->initialized = true;
ops->graphics = 0;
fbcon_set_disp(info, &info->var, unit);
if (show_logo) {
struct vc_data *fg_vc = vc_cons[fg_console].d;
struct fb_info *fg_info =
- registered_fb[con2fb_map[fg_console]];
+ fbcon_info_from_console(fg_console);
fbcon_prepare_logo(fg_vc, fg_info, fg_vc->vc_cols,
fg_vc->vc_rows, fg_vc->vc_cols,
@@ -835,9 +819,9 @@ static int set_con2fb_map(int unit, int newidx, int user)
{
struct vc_data *vc = vc_cons[unit].d;
int oldidx = con2fb_map[unit];
- struct fb_info *info = registered_fb[newidx];
+ struct fb_info *info = fbcon_registered_fb[newidx];
struct fb_info *oldinfo = NULL;
- int found, err = 0;
+ int found, err = 0, show_logo;
WARN_CONSOLE_UNLOCKED();
@@ -853,31 +837,30 @@ static int set_con2fb_map(int unit, int newidx, int user)
}
if (oldidx != -1)
- oldinfo = registered_fb[oldidx];
+ oldinfo = fbcon_registered_fb[oldidx];
found = search_fb_in_map(newidx);
- con2fb_map[unit] = newidx;
- if (!err && !found)
- err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
+ if (!err && !found) {
+ err = con2fb_acquire_newinfo(vc, info, unit);
+ if (!err)
+ con2fb_map[unit] = newidx;
+ }
/*
* If old fb is not mapped to any of the consoles,
* fbcon should release it.
*/
if (!err && oldinfo && !search_fb_in_map(oldidx))
- err = con2fb_release_oldinfo(vc, oldinfo, info, unit, oldidx,
- found);
+ con2fb_release_oldinfo(vc, oldinfo, info);
- if (!err) {
- int show_logo = (fg_console == 0 && !user &&
- logo_shown != FBCON_LOGO_DONTSHOW);
+ show_logo = (fg_console == 0 && !user &&
+ logo_shown != FBCON_LOGO_DONTSHOW);
- if (!found)
- fbcon_add_cursor_timer(info);
- con2fb_map_boot[unit] = newidx;
- con2fb_init_display(vc, info, unit, show_logo);
- }
+ if (!found)
+ fbcon_add_cursor_work(info);
+ con2fb_map_boot[unit] = newidx;
+ con2fb_init_display(vc, info, unit, show_logo);
if (!search_fb_in_map(info_idx))
info_idx = newidx;
@@ -938,7 +921,6 @@ static const char *fbcon_startup(void)
struct fbcon_display *p = &fb_display[fg_console];
struct vc_data *vc = vc_cons[fg_console].d;
const struct font_desc *font = NULL;
- struct module *owner;
struct fb_info *info = NULL;
struct fbcon_ops *ops;
int rows, cols;
@@ -947,36 +929,23 @@ static const char *fbcon_startup(void)
* If num_registered_fb is zero, this is a call for the dummy part.
* The frame buffer devices weren't initialized yet.
*/
- if (!num_registered_fb || info_idx == -1)
+ if (!fbcon_num_registered_fb || info_idx == -1)
return display_desc;
/*
* Instead of blindly using registered_fb[0], we use info_idx, set by
- * fb_console_init();
+ * fbcon_fb_registered();
*/
- info = registered_fb[info_idx];
+ info = fbcon_registered_fb[info_idx];
if (!info)
return NULL;
- owner = info->fbops->owner;
- if (!try_module_get(owner))
- return NULL;
- if (info->fbops->fb_open && info->fbops->fb_open(info, 0)) {
- module_put(owner);
+ if (fbcon_open(info))
return NULL;
- }
-
- ops = kzalloc(sizeof(struct fbcon_ops), GFP_KERNEL);
- if (!ops) {
- module_put(owner);
- return NULL;
- }
+ ops = info->fbcon_par;
ops->currcon = -1;
ops->graphics = 1;
ops->cur_rotate = -1;
- ops->cur_blink_jiffies = HZ / 5;
- ops->info = info;
- info->fbcon_par = ops;
p->con_rotate = initial_rotation;
if (p->con_rotate == -1)
@@ -1013,7 +982,7 @@ static const char *fbcon_startup(void)
info->var.yres,
info->var.bits_per_pixel);
- fbcon_add_cursor_timer(info);
+ fbcon_add_cursor_work(info);
return display_desc;
}
@@ -1033,7 +1002,7 @@ static void fbcon_init(struct vc_data *vc, int init)
if (con2fb_map[vc->vc_num] == -1)
con2fb_map[vc->vc_num] = info_idx;
- info = registered_fb[con2fb_map[vc->vc_num]];
+ info = fbcon_info_from_console(vc->vc_num);
if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET)
logo_shown = FBCON_LOGO_DONTSHOW;
@@ -1046,7 +1015,7 @@ static void fbcon_init(struct vc_data *vc, int init)
return;
if (!info->fbcon_par)
- con2fb_acquire_newinfo(vc, info, vc->vc_num, -1);
+ con2fb_acquire_newinfo(vc, info, vc->vc_num);
/* If we are not the first console on this
fb, copy the font from that console */
@@ -1120,8 +1089,7 @@ static void fbcon_init(struct vc_data *vc, int init)
* We need to do it in fbcon_init() to prevent screen corruption.
*/
if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
- if (info->fbops->fb_set_par &&
- !(ops->flags & FBCON_FLAGS_INIT)) {
+ if (info->fbops->fb_set_par && !ops->initialized) {
ret = info->fbops->fb_set_par(info);
if (ret)
@@ -1130,7 +1098,7 @@ static void fbcon_init(struct vc_data *vc, int init)
"error code %d\n", ret);
}
- ops->flags |= FBCON_FLAGS_INIT;
+ ops->initialized = true;
}
ops->graphics = 0;
@@ -1175,6 +1143,27 @@ static void fbcon_free_font(struct fbcon_display *p, bool freefont)
static void set_vc_hi_font(struct vc_data *vc, bool set);
+static void fbcon_release_all(void)
+{
+ struct fb_info *info;
+ int i, j, mapped;
+
+ fbcon_for_each_registered_fb(i) {
+ mapped = 0;
+ info = fbcon_registered_fb[i];
+
+ for (j = first_fb_vc; j <= last_fb_vc; j++) {
+ if (con2fb_map[j] == i) {
+ mapped = 1;
+ con2fb_map[j] = -1;
+ }
+ }
+
+ if (mapped)
+ fbcon_release(info);
+ }
+}
+
static void fbcon_deinit(struct vc_data *vc)
{
struct fbcon_display *p = &fb_display[vc->vc_num];
@@ -1188,7 +1177,7 @@ static void fbcon_deinit(struct vc_data *vc)
if (idx == -1)
goto finished;
- info = registered_fb[idx];
+ info = fbcon_registered_fb[idx];
if (!info)
goto finished;
@@ -1201,9 +1190,9 @@ static void fbcon_deinit(struct vc_data *vc)
goto finished;
if (con_is_visible(vc))
- fbcon_del_cursor_timer(info);
+ fbcon_del_cursor_work(info);
- ops->flags &= ~FBCON_FLAGS_INIT;
+ ops->initialized = false;
finished:
fbcon_free_font(p, free_font);
@@ -1214,7 +1203,7 @@ finished:
set_vc_hi_font(vc, false);
if (!con_is_bound(&fb_con))
- fbcon_exit();
+ fbcon_release_all();
if (vc->vc_num == logo_shown)
logo_shown = FBCON_LOGO_CANSHOW;
@@ -1250,7 +1239,7 @@ finished:
static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
int width)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
@@ -1288,7 +1277,7 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
int count, int ypos, int xpos)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
struct fbcon_ops *ops = info->fbcon_par;
@@ -1308,7 +1297,7 @@ static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
if (!fbcon_is_inactive(vc, info))
@@ -1317,7 +1306,7 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
static void fbcon_cursor(struct vc_data *vc, int mode)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
int c = scr_readw((u16 *) vc->vc_pos);
@@ -1327,9 +1316,9 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
return;
if (vc->vc_cursor_type & CUR_SW)
- fbcon_del_cursor_timer(info);
+ fbcon_del_cursor_work(info);
else
- fbcon_add_cursor_timer(info);
+ fbcon_add_cursor_work(info);
ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
@@ -1411,7 +1400,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
static __inline__ void ywrap_up(struct vc_data *vc, int count)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
@@ -1430,7 +1419,7 @@ static __inline__ void ywrap_up(struct vc_data *vc, int count)
static __inline__ void ywrap_down(struct vc_data *vc, int count)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
@@ -1449,7 +1438,7 @@ static __inline__ void ywrap_down(struct vc_data *vc, int count)
static __inline__ void ypan_up(struct vc_data *vc, int count)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
struct fbcon_ops *ops = info->fbcon_par;
@@ -1473,7 +1462,7 @@ static __inline__ void ypan_up(struct vc_data *vc, int count)
static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
@@ -1497,7 +1486,7 @@ static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
static __inline__ void ypan_down(struct vc_data *vc, int count)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
struct fbcon_ops *ops = info->fbcon_par;
@@ -1521,7 +1510,7 @@ static __inline__ void ypan_down(struct vc_data *vc, int count)
static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
@@ -1682,10 +1671,75 @@ static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p,
}
}
+static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
+ int dy, int dx, int height, int width, u_int y_break)
+{
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
+ struct fbcon_ops *ops = info->fbcon_par;
+ u_int b;
+
+ if (sy < y_break && sy + height > y_break) {
+ b = y_break - sy;
+ if (dy < sy) { /* Avoid trashing self */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ } else {
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ }
+ return;
+ }
+
+ if (dy < y_break && dy + height > y_break) {
+ b = y_break - dy;
+ if (dy < sy) { /* Avoid trashing self */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ } else {
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ }
+ return;
+ }
+ ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ height, width);
+}
+
+static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
+ int height, int width)
+{
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ if (fbcon_is_inactive(vc, info))
+ return;
+
+ if (!width || !height)
+ return;
+
+ /* Split blits that cross physical y_wrap case.
+ * Pathological case involves 4 blits, better to use recursive
+ * code rather than unrolled case
+ *
+ * Recursive invocations don't need to erase the cursor over and
+ * over again, so we use fbcon_bmove_rec()
+ */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width,
+ p->vrows - p->yscroll);
+}
+
static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
enum con_scroll dir, unsigned int count)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
@@ -1882,71 +1936,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
}
-static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
- int height, int width)
-{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_display *p = &fb_display[vc->vc_num];
-
- if (fbcon_is_inactive(vc, info))
- return;
-
- if (!width || !height)
- return;
-
- /* Split blits that cross physical y_wrap case.
- * Pathological case involves 4 blits, better to use recursive
- * code rather than unrolled case
- *
- * Recursive invocations don't need to erase the cursor over and
- * over again, so we use fbcon_bmove_rec()
- */
- fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width,
- p->vrows - p->yscroll);
-}
-
-static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
- int dy, int dx, int height, int width, u_int y_break)
-{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_ops *ops = info->fbcon_par;
- u_int b;
-
- if (sy < y_break && sy + height > y_break) {
- b = y_break - sy;
- if (dy < sy) { /* Avoid trashing self */
- fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
- y_break);
- fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
- height - b, width, y_break);
- } else {
- fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
- height - b, width, y_break);
- fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
- y_break);
- }
- return;
- }
-
- if (dy < y_break && dy + height > y_break) {
- b = y_break - dy;
- if (dy < sy) { /* Avoid trashing self */
- fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
- y_break);
- fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
- height - b, width, y_break);
- } else {
- fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
- height - b, width, y_break);
- fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
- y_break);
- }
- return;
- }
- ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
- height, width);
-}
-
static void updatescrollmode_accel(struct fbcon_display *p,
struct fb_info *info,
struct vc_data *vc)
@@ -2015,7 +2004,7 @@ static void updatescrollmode(struct fbcon_display *p,
static int fbcon_resize(struct vc_data *vc, unsigned int width,
unsigned int height, unsigned int user)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
struct fb_var_screeninfo var = info->var;
@@ -2084,7 +2073,7 @@ static int fbcon_switch(struct vc_data *vc)
struct fb_var_screeninfo var;
int i, ret, prev_console;
- info = registered_fb[con2fb_map[vc->vc_num]];
+ info = fbcon_info_from_console(vc->vc_num);
ops = info->fbcon_par;
if (logo_shown >= 0) {
@@ -2098,7 +2087,7 @@ static int fbcon_switch(struct vc_data *vc)
prev_console = ops->currcon;
if (prev_console != -1)
- old_info = registered_fb[con2fb_map[prev_console]];
+ old_info = fbcon_info_from_console(prev_console);
/*
* FIXME: If we have multiple fbdev's loaded, we need to
* update all info->currcon. Perhaps, we can place this
@@ -2107,9 +2096,9 @@ static int fbcon_switch(struct vc_data *vc)
*
* info->currcon = vc->vc_num;
*/
- for_each_registered_fb(i) {
- if (registered_fb[i]->fbcon_par) {
- struct fbcon_ops *o = registered_fb[i]->fbcon_par;
+ fbcon_for_each_registered_fb(i) {
+ if (fbcon_registered_fb[i]->fbcon_par) {
+ struct fbcon_ops *o = fbcon_registered_fb[i]->fbcon_par;
o->currcon = vc->vc_num;
}
@@ -2139,14 +2128,14 @@ static int fbcon_switch(struct vc_data *vc)
}
if (old_info != info)
- fbcon_del_cursor_timer(old_info);
+ fbcon_del_cursor_work(old_info);
}
if (fbcon_is_inactive(vc, info) ||
ops->blank_state != FB_BLANK_UNBLANK)
- fbcon_del_cursor_timer(info);
+ fbcon_del_cursor_work(info);
else
- fbcon_add_cursor_timer(info);
+ fbcon_add_cursor_work(info);
set_blitting_type(vc, info);
ops->cursor_reset = 1;
@@ -2221,7 +2210,7 @@ static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info,
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
if (mode_switch) {
@@ -2254,16 +2243,16 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
if (mode_switch || fbcon_is_inactive(vc, info) ||
ops->blank_state != FB_BLANK_UNBLANK)
- fbcon_del_cursor_timer(info);
+ fbcon_del_cursor_work(info);
else
- fbcon_add_cursor_timer(info);
+ fbcon_add_cursor_work(info);
return 0;
}
static int fbcon_debug_enter(struct vc_data *vc)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
ops->save_graphics = ops->graphics;
@@ -2276,7 +2265,7 @@ static int fbcon_debug_enter(struct vc_data *vc)
static int fbcon_debug_leave(struct vc_data *vc)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
ops->graphics = ops->save_graphics;
@@ -2412,7 +2401,7 @@ static void set_vc_hi_font(struct vc_data *vc, bool set)
static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
const u8 * data, int userfont)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
int resize;
@@ -2466,7 +2455,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
unsigned int flags)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
unsigned charcount = font->charcount;
int w = font->width;
int h = font->height;
@@ -2530,7 +2519,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font, char *name)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
const struct font_desc *f;
if (!name)
@@ -2554,7 +2543,7 @@ static struct fb_cmap palette_cmap = {
static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fb_info *info = fbcon_info_from_console(vc->vc_num);
int i, j, k, depth;
u8 val;
@@ -2670,7 +2659,7 @@ static void fbcon_modechanged(struct fb_info *info)
return;
vc = vc_cons[ops->currcon].d;
if (vc->vc_mode != KD_TEXT ||
- registered_fb[con2fb_map[ops->currcon]] != info)
+ fbcon_info_from_console(ops->currcon) != info)
return;
p = &fb_display[vc->vc_num];
@@ -2710,7 +2699,7 @@ static void fbcon_set_all_vcs(struct fb_info *info)
for (i = first_fb_vc; i <= last_fb_vc; i++) {
vc = vc_cons[i].d;
if (!vc || vc->vc_mode != KD_TEXT ||
- registered_fb[con2fb_map[i]] != info)
+ fbcon_info_from_console(i) != info)
continue;
if (con_is_visible(vc)) {
@@ -2754,7 +2743,7 @@ int fbcon_mode_deleted(struct fb_info *info,
j = con2fb_map[i];
if (j == -1)
continue;
- fb_info = registered_fb[j];
+ fb_info = fbcon_registered_fb[j];
if (fb_info != info)
continue;
p = &fb_display[i];
@@ -2783,16 +2772,17 @@ static void fbcon_unbind(void)
static inline void fbcon_unbind(void) {}
#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
-/* called with console_lock held */
void fbcon_fb_unbind(struct fb_info *info)
{
- int i, new_idx = -1, ret = 0;
+ int i, new_idx = -1;
int idx = info->node;
- WARN_CONSOLE_UNLOCKED();
+ console_lock();
- if (!fbcon_has_console_bind)
+ if (!fbcon_has_console_bind) {
+ console_unlock();
return;
+ }
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map[i] != idx &&
@@ -2808,7 +2798,7 @@ void fbcon_fb_unbind(struct fb_info *info)
set_con2fb_map(i, new_idx, 0);
}
} else {
- struct fb_info *info = registered_fb[idx];
+ struct fb_info *info = fbcon_registered_fb[idx];
/* This is sort of like set_con2fb_map, except it maps
* the consoles to no device and then releases the
@@ -2820,29 +2810,30 @@ void fbcon_fb_unbind(struct fb_info *info)
if (con2fb_map[i] == idx) {
con2fb_map[i] = -1;
if (!search_fb_in_map(idx)) {
- ret = con2fb_release_oldinfo(vc_cons[i].d,
- info, NULL, i,
- idx, 0);
- if (ret) {
- con2fb_map[i] = idx;
- return;
- }
+ con2fb_release_oldinfo(vc_cons[i].d,
+ info, NULL);
}
}
}
fbcon_unbind();
}
+
+ console_unlock();
}
-/* called with console_lock held */
void fbcon_fb_unregistered(struct fb_info *info)
{
int i, idx;
- WARN_CONSOLE_UNLOCKED();
+ console_lock();
- if (deferred_takeover)
+ fbcon_registered_fb[info->node] = NULL;
+ fbcon_num_registered_fb--;
+
+ if (deferred_takeover) {
+ console_unlock();
return;
+ }
idx = info->node;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
@@ -2853,7 +2844,7 @@ void fbcon_fb_unregistered(struct fb_info *info)
if (idx == info_idx) {
info_idx = -1;
- for_each_registered_fb(i) {
+ fbcon_for_each_registered_fb(i) {
info_idx = i;
break;
}
@@ -2869,8 +2860,9 @@ void fbcon_fb_unregistered(struct fb_info *info)
if (primary_device == idx)
primary_device = -1;
- if (!num_registered_fb)
+ if (!fbcon_num_registered_fb)
do_unregister_con_driver(&fb_con);
+ console_unlock();
}
void fbcon_remap_all(struct fb_info *info)
@@ -2928,13 +2920,21 @@ static inline void fbcon_select_primary(struct fb_info *info)
}
#endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */
+static bool lockless_register_fb;
+module_param_named_unsafe(lockless_register_fb, lockless_register_fb, bool, 0400);
+MODULE_PARM_DESC(lockless_register_fb,
+ "Lockless framebuffer registration for debugging [default=off]");
+
/* called with console_lock held */
-int fbcon_fb_registered(struct fb_info *info)
+static int do_fb_registered(struct fb_info *info)
{
int ret = 0, i, idx;
WARN_CONSOLE_UNLOCKED();
+ fbcon_registered_fb[info->node] = info;
+ fbcon_num_registered_fb++;
+
idx = info->node;
fbcon_select_primary(info);
@@ -2963,6 +2963,25 @@ int fbcon_fb_registered(struct fb_info *info)
return ret;
}
+int fbcon_fb_registered(struct fb_info *info)
+{
+ int ret;
+
+ if (!lockless_register_fb)
+ console_lock();
+ else
+ atomic_inc(&ignore_console_lock_warning);
+
+ ret = do_fb_registered(info);
+
+ if (!lockless_register_fb)
+ console_unlock();
+ else
+ atomic_dec(&ignore_console_lock_warning);
+
+ return ret;
+}
+
void fbcon_fb_blanked(struct fb_info *info, int blank)
{
struct fbcon_ops *ops = info->fbcon_par;
@@ -2973,7 +2992,7 @@ void fbcon_fb_blanked(struct fb_info *info, int blank)
vc = vc_cons[ops->currcon].d;
if (vc->vc_mode != KD_TEXT ||
- registered_fb[con2fb_map[ops->currcon]] != info)
+ fbcon_info_from_console(ops->currcon) != info)
return;
if (con_is_visible(vc)) {
@@ -2993,7 +3012,7 @@ void fbcon_new_modelist(struct fb_info *info)
const struct fb_videomode *mode;
for (i = first_fb_vc; i <= last_fb_vc; i++) {
- if (registered_fb[con2fb_map[i]] != info)
+ if (fbcon_info_from_console(i) != info)
continue;
if (!fb_display[i].mode)
continue;
@@ -3048,9 +3067,9 @@ int fbcon_set_con2fb_map_ioctl(void __user *argp)
return -EINVAL;
if (con2fb.framebuffer >= FB_MAX)
return -EINVAL;
- if (!registered_fb[con2fb.framebuffer])
+ if (!fbcon_registered_fb[con2fb.framebuffer])
request_module("fb%d", con2fb.framebuffer);
- if (!registered_fb[con2fb.framebuffer]) {
+ if (!fbcon_registered_fb[con2fb.framebuffer]) {
return -EINVAL;
}
@@ -3117,10 +3136,10 @@ static ssize_t store_rotate(struct device *device,
console_lock();
idx = con2fb_map[fg_console];
- if (idx == -1 || registered_fb[idx] == NULL)
+ if (idx == -1 || fbcon_registered_fb[idx] == NULL)
goto err;
- info = registered_fb[idx];
+ info = fbcon_registered_fb[idx];
rotate = simple_strtoul(buf, last, 0);
fbcon_rotate(info, rotate);
err:
@@ -3139,10 +3158,10 @@ static ssize_t store_rotate_all(struct device *device,
console_lock();
idx = con2fb_map[fg_console];
- if (idx == -1 || registered_fb[idx] == NULL)
+ if (idx == -1 || fbcon_registered_fb[idx] == NULL)
goto err;
- info = registered_fb[idx];
+ info = fbcon_registered_fb[idx];
rotate = simple_strtoul(buf, last, 0);
fbcon_rotate_all(info, rotate);
err:
@@ -3159,14 +3178,14 @@ static ssize_t show_rotate(struct device *device,
console_lock();
idx = con2fb_map[fg_console];
- if (idx == -1 || registered_fb[idx] == NULL)
+ if (idx == -1 || fbcon_registered_fb[idx] == NULL)
goto err;
- info = registered_fb[idx];
+ info = fbcon_registered_fb[idx];
rotate = fbcon_get_rotate(info);
err:
console_unlock();
- return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
+ return sysfs_emit(buf, "%d\n", rotate);
}
static ssize_t show_cursor_blink(struct device *device,
@@ -3179,19 +3198,19 @@ static ssize_t show_cursor_blink(struct device *device,
console_lock();
idx = con2fb_map[fg_console];
- if (idx == -1 || registered_fb[idx] == NULL)
+ if (idx == -1 || fbcon_registered_fb[idx] == NULL)
goto err;
- info = registered_fb[idx];
+ info = fbcon_registered_fb[idx];
ops = info->fbcon_par;
if (!ops)
goto err;
- blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0;
+ blink = delayed_work_pending(&ops->cursor_work);
err:
console_unlock();
- return snprintf(buf, PAGE_SIZE, "%d\n", blink);
+ return sysfs_emit(buf, "%d\n", blink);
}
static ssize_t store_cursor_blink(struct device *device,
@@ -3205,10 +3224,10 @@ static ssize_t store_cursor_blink(struct device *device,
console_lock();
idx = con2fb_map[fg_console];
- if (idx == -1 || registered_fb[idx] == NULL)
+ if (idx == -1 || fbcon_registered_fb[idx] == NULL)
goto err;
- info = registered_fb[idx];
+ info = fbcon_registered_fb[idx];
if (!info->fbcon_par)
goto err;
@@ -3217,10 +3236,10 @@ static ssize_t store_cursor_blink(struct device *device,
if (blink) {
fbcon_cursor_noblink = 0;
- fbcon_add_cursor_timer(info);
+ fbcon_add_cursor_work(info);
} else {
fbcon_cursor_noblink = 1;
- fbcon_del_cursor_timer(info);
+ fbcon_del_cursor_work(info);
}
err:
@@ -3265,8 +3284,11 @@ static void fbcon_register_existing_fbs(struct work_struct *work)
console_lock();
- for_each_registered_fb(i)
- fbcon_fb_registered(registered_fb[i]);
+ deferred_takeover = false;
+ logo_shown = FBCON_LOGO_DONTSHOW;
+
+ fbcon_for_each_registered_fb(i)
+ do_fb_registered(fbcon_registered_fb[i]);
console_unlock();
}
@@ -3282,8 +3304,6 @@ static int fbcon_output_notifier(struct notifier_block *nb,
pr_info("fbcon: Taking over console\n");
dummycon_unregister_output_notifier(&fbcon_output_nb);
- deferred_takeover = false;
- logo_shown = FBCON_LOGO_DONTSHOW;
/* We may get called in atomic context */
schedule_work(&fbcon_deferred_takeover_work);
@@ -3306,67 +3326,6 @@ static void fbcon_start(void)
return;
}
#endif
-
- if (num_registered_fb) {
- int i;
-
- for_each_registered_fb(i) {
- info_idx = i;
- break;
- }
-
- do_fbcon_takeover(0);
- }
-}
-
-static void fbcon_exit(void)
-{
- struct fb_info *info;
- int i, j, mapped;
-
-#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
- if (deferred_takeover) {
- dummycon_unregister_output_notifier(&fbcon_output_nb);
- deferred_takeover = false;
- }
-#endif
-
- for_each_registered_fb(i) {
- int pending = 0;
-
- mapped = 0;
- info = registered_fb[i];
-
- if (info->queue.func)
- pending = cancel_work_sync(&info->queue);
- pr_debug("fbcon: %s pending work\n", (pending ? "canceled" : "no"));
-
- for (j = first_fb_vc; j <= last_fb_vc; j++) {
- if (con2fb_map[j] == i) {
- mapped = 1;
- con2fb_map[j] = -1;
- }
- }
-
- if (mapped) {
- if (info->fbops->fb_release)
- info->fbops->fb_release(info, 0);
- module_put(info->fbops->owner);
-
- if (info->fbcon_par) {
- struct fbcon_ops *ops = info->fbcon_par;
-
- fbcon_del_cursor_timer(info);
- kfree(ops->cursor_src);
- kfree(ops->cursor_state.mask);
- kfree(info->fbcon_par);
- info->fbcon_par = NULL;
- }
-
- if (info->queue.func == fb_flashcursor)
- info->queue.func = NULL;
- }
- }
}
void __init fb_console_init(void)
@@ -3408,10 +3367,19 @@ static void __exit fbcon_deinit_device(void)
void __exit fb_console_exit(void)
{
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
+ console_lock();
+ if (deferred_takeover)
+ dummycon_unregister_output_notifier(&fbcon_output_nb);
+ console_unlock();
+
+ cancel_work_sync(&fbcon_deferred_takeover_work);
+#endif
+
console_lock();
fbcon_deinit_device();
device_destroy(fb_class, MKDEV(0, 0));
- fbcon_exit();
+
do_unregister_con_driver(&fb_con);
console_unlock();
}
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index 969d41ecede5..0eaf54a21151 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -14,12 +14,10 @@
#include <linux/types.h>
#include <linux/vt_buffer.h>
#include <linux/vt_kern.h>
+#include <linux/workqueue.h>
#include <asm/io.h>
-#define FBCON_FLAGS_INIT 1
-#define FBCON_FLAGS_CURSOR_TIMER 2
-
/*
* This is the interface between the low-level console driver and the
* low-level frame buffer device
@@ -68,7 +66,7 @@ struct fbcon_ops {
int (*update_start)(struct fb_info *info);
int (*rotate_font)(struct fb_info *info, struct vc_data *vc);
struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */
- struct timer_list cursor_timer; /* Cursor timer */
+ struct delayed_work cursor_work; /* Cursor timer */
struct fb_cursor cursor_state;
struct fbcon_display *p;
struct fb_info *info;
@@ -79,7 +77,7 @@ struct fbcon_ops {
int blank_state;
int graphics;
int save_graphics; /* for debug enter/leave */
- int flags;
+ bool initialized;
int rotate;
int cur_rotate;
char *cursor_data;
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index a6bb0e438216..84427470367b 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1577,14 +1577,12 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
* allocate the memory range.
*
* If it's not a platform device, at least print a warning. A
- * fix would add code to remove the device from the system.
+ * fix would add code to remove the device from the system. For
+ * framebuffers without any Linux device, print a warning as
+ * well.
*/
if (!device) {
- /* TODO: Represent each OF framebuffer as its own
- * device in the device hierarchy. For now, offb
- * doesn't have such a device, so unregister the
- * framebuffer as before without warning.
- */
+ pr_warn("fb%d: no device set\n", i);
do_unregister_framebuffer(registered_fb[i]);
} else if (dev_is_platform(device)) {
registered_fb[i]->forced_out = true;
@@ -1597,14 +1595,9 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
}
}
-static bool lockless_register_fb;
-module_param_named_unsafe(lockless_register_fb, lockless_register_fb, bool, 0400);
-MODULE_PARM_DESC(lockless_register_fb,
- "Lockless framebuffer registration for debugging [default=off]");
-
static int do_register_framebuffer(struct fb_info *fb_info)
{
- int i, ret;
+ int i;
struct fb_videomode mode;
if (fb_check_foreignness(fb_info))
@@ -1673,19 +1666,7 @@ static int do_register_framebuffer(struct fb_info *fb_info)
}
#endif
- if (!lockless_register_fb)
- console_lock();
- else
- atomic_inc(&ignore_console_lock_warning);
- lock_fb_info(fb_info);
- ret = fbcon_fb_registered(fb_info);
- unlock_fb_info(fb_info);
-
- if (!lockless_register_fb)
- console_unlock();
- else
- atomic_dec(&ignore_console_lock_warning);
- return ret;
+ return fbcon_fb_registered(fb_info);
}
static void unbind_console(struct fb_info *fb_info)
@@ -1695,11 +1676,7 @@ static void unbind_console(struct fb_info *fb_info)
if (WARN_ON(i < 0 || i >= FB_MAX || registered_fb[i] != fb_info))
return;
- console_lock();
- lock_fb_info(fb_info);
fbcon_fb_unbind(fb_info);
- unlock_fb_info(fb_info);
- console_unlock();
}
static void unlink_framebuffer(struct fb_info *fb_info)
@@ -1742,9 +1719,7 @@ static void do_unregister_framebuffer(struct fb_info *fb_info)
fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
}
#endif
- console_lock();
fbcon_fb_unregistered(fb_info);
- console_unlock();
/* this may free fb info */
put_fb_info(fb_info);
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index 26892940c213..8c1ee9ecec3d 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -91,9 +91,11 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var)
var->activate |= FB_ACTIVATE_FORCE;
console_lock();
+ lock_fb_info(fb_info);
err = fb_set_var(fb_info, var);
if (!err)
fbcon_update_vcs(fb_info, var->activate & FB_ACTIVATE_ALL);
+ unlock_fb_info(fb_info);
console_unlock();
if (err)
return err;
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index afdb6aa48add..b1acb1ebebe9 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -386,10 +386,10 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_STATIC_PSEUDOCOLOR;
}
-static void __init offb_init_fb(const char *name,
- int width, int height, int depth,
- int pitch, unsigned long address,
- int foreign_endian, struct device_node *dp)
+static void offb_init_fb(struct platform_device *parent, const char *name,
+ int width, int height, int depth,
+ int pitch, unsigned long address,
+ int foreign_endian, struct device_node *dp)
{
unsigned long res_size = pitch * height;
struct offb_par *par = &default_par;
@@ -410,12 +410,13 @@ static void __init offb_init_fb(const char *name,
return;
}
- info = framebuffer_alloc(sizeof(u32) * 16, NULL);
+ info = framebuffer_alloc(sizeof(u32) * 16, &parent->dev);
if (!info) {
release_mem_region(res_start, res_size);
return;
}
+ platform_set_drvdata(parent, info);
fix = &info->fix;
var = &info->var;
@@ -535,7 +536,8 @@ out_aper:
}
-static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
+static void offb_init_nodriver(struct platform_device *parent, struct device_node *dp,
+ int no_real_node)
{
unsigned int len;
int i, width = 640, height = 480, depth = 8, pitch = 640;
@@ -650,46 +652,76 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
/* kludge for valkyrie */
if (of_node_name_eq(dp, "valkyrie"))
address += 0x1000;
- offb_init_fb(no_real_node ? "bootx" : NULL,
+ offb_init_fb(parent, no_real_node ? "bootx" : NULL,
width, height, depth, pitch, address,
foreign_endian, no_real_node ? NULL : dp);
}
}
-static int __init offb_init(void)
+static int offb_remove(struct platform_device *pdev)
{
- struct device_node *dp = NULL, *boot_disp = NULL;
+ struct fb_info *info = platform_get_drvdata(pdev);
- if (fb_get_options("offb", NULL))
- return -ENODEV;
+ if (info)
+ unregister_framebuffer(info);
- /* Check if we have a MacOS display without a node spec */
- if (of_get_property(of_chosen, "linux,bootx-noscreen", NULL) != NULL) {
- /* The old code tried to work out which node was the MacOS
- * display based on the address. I'm dropping that since the
- * lack of a node spec only happens with old BootX versions
- * (users can update) and with this code, they'll still get
- * a display (just not the palette hacks).
- */
- offb_init_nodriver(of_chosen, 1);
- }
+ return 0;
+}
- for_each_node_by_type(dp, "display") {
- if (of_get_property(dp, "linux,opened", NULL) &&
- of_get_property(dp, "linux,boot-display", NULL)) {
- boot_disp = dp;
- offb_init_nodriver(dp, 0);
- }
- }
- for_each_node_by_type(dp, "display") {
- if (of_get_property(dp, "linux,opened", NULL) &&
- dp != boot_disp)
- offb_init_nodriver(dp, 0);
- }
+static int offb_probe_bootx_noscreen(struct platform_device *pdev)
+{
+ offb_init_nodriver(pdev, of_chosen, 1);
return 0;
}
+static struct platform_driver offb_driver_bootx_noscreen = {
+ .driver = {
+ .name = "bootx-noscreen",
+ },
+ .probe = offb_probe_bootx_noscreen,
+ .remove = offb_remove,
+};
+
+static int offb_probe_display(struct platform_device *pdev)
+{
+ offb_init_nodriver(pdev, pdev->dev.of_node, 0);
+
+ return 0;
+}
+static const struct of_device_id offb_of_match_display[] = {
+ { .compatible = "display", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, offb_of_match_display);
+
+static struct platform_driver offb_driver_display = {
+ .driver = {
+ .name = "of-display",
+ .of_match_table = offb_of_match_display,
+ },
+ .probe = offb_probe_display,
+ .remove = offb_remove,
+};
+
+static int __init offb_init(void)
+{
+ if (fb_get_options("offb", NULL))
+ return -ENODEV;
+
+ platform_driver_register(&offb_driver_bootx_noscreen);
+ platform_driver_register(&offb_driver_display);
+
+ return 0;
+}
module_init(offb_init);
+
+static void __exit offb_exit(void)
+{
+ platform_driver_unregister(&offb_driver_display);
+ platform_driver_unregister(&offb_driver_bootx_noscreen);
+}
+module_exit(offb_exit);
+
MODULE_LICENSE("GPL");