From 5c27de1df85066f2801f1ec406e641fdde178151 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 24 May 2019 18:35:18 +0200 Subject: drm/i915/dsi: Call drm_connector_cleanup on vlv_dsi_init error exit path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If we exit vlv_dsi_init() because we failed to find a fixed_mode, then we've already called drm_connector_init() and we should call drm_connector_cleanup() to unregister the connector object. Reviewed-by: Ville Syrjälä Signed-off-by: Hans de Goede Link: https://patchwork.freedesktop.org/patch/msgid/20190524163518.17545-1-hdegoede@redhat.com --- drivers/gpu/drm/i915/vlv_dsi.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index 895ea1a72a69..bfe2891eac37 100644 --- a/drivers/gpu/drm/i915/vlv_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c @@ -1793,7 +1793,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) if (!fixed_mode) { DRM_DEBUG_KMS("no fixed mode\n"); - goto err; + goto err_cleanup_connector; } intel_panel_init(&intel_connector->panel, fixed_mode, NULL); @@ -1803,6 +1803,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) return; +err_cleanup_connector: + drm_connector_cleanup(&intel_connector->base); err: drm_encoder_cleanup(&intel_encoder->base); kfree(intel_dsi); -- cgit v1.2.3 From c2df2201b6933e927de24742a314ba2e0d13efd2 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 24 May 2019 22:26:27 +0100 Subject: drm/i915/gtt: set err to -ENOMEM on memory allocation failure Currently when the allocation of ppgtt->work fails the error return path via err_free returns an uninitialized value in err. Fix this by setting err to the appropriate error return of -ENOMEM. Addresses-Coverity: ("Uninitialized scalar variable") Fixes: d3622099c76f ("drm/i915/gtt: Always acquire struct_mutex for gen6_ppgtt_cleanup") Signed-off-by: Colin Ian King Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190524212627.24256-1-colin.king@canonical.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 266baa11df64..bc5ddeb845b0 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2063,8 +2063,10 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; ppgtt->work = kmalloc(sizeof(*ppgtt->work), GFP_KERNEL); - if (!ppgtt->work) + if (!ppgtt->work) { + err = -ENOMEM; goto err_free; + } err = gen6_ppgtt_init_scratch(ppgtt); if (err) -- cgit v1.2.3 From 591d4dc4729004e98509efd1d23b8d8bf0111182 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 24 May 2019 21:52:53 +0300 Subject: drm/i915: make REG_BIT() and REG_GENMASK() work with variables REG_BIT() and REG_GENMASK() were intended to work with both constant expressions and otherwise, with the former having extra compile time checks for the bit ranges. Incredibly, the result of __builtin_constant_p() is not an integer constant expression when given a non-constant expression, leading to errors in BUILD_BUG_ON_ZERO(). Replace __builtin_constant_p() with the __is_constexpr() magic spell. Reported-by: Ville Syrjala Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190524185253.1088-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 49dce04dd688..019c48748dc9 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -126,7 +126,7 @@ */ #define REG_BIT(__n) \ ((u32)(BIT(__n) + \ - BUILD_BUG_ON_ZERO(__builtin_constant_p(__n) && \ + BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \ ((__n) < 0 || (__n) > 31)))) /** @@ -140,8 +140,8 @@ */ #define REG_GENMASK(__high, __low) \ ((u32)(GENMASK(__high, __low) + \ - BUILD_BUG_ON_ZERO(__builtin_constant_p(__high) && \ - __builtin_constant_p(__low) && \ + BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \ + __is_constexpr(__low) && \ ((__low) < 0 || (__high) > 31 || (__low) > (__high))))) /* -- cgit v1.2.3 From 4361ccac2810553d6cfaa6860bd4bd65f4742ed2 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 24 May 2019 20:35:32 +0300 Subject: drm/i915/icl: Fix AUX-B HW not done issue w/o AUX-A MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Atm AUX-B transfers can fail with the following error if AUX-A is not enabled: [ 594.594108] [drm:intel_dp_aux_xfer [i915]] dp_aux_ch timeout status 0x7c2003ff [ 594.615854] [drm:intel_dp_aux_xfer [i915]] *ERROR* dp aux hw did not signal timeout! [ 594.632851] [drm:intel_dp_aux_xfer [i915]] *ERROR* dp aux hw did not signal timeout! [ 594.632915] [drm:intel_dp_aux_xfer [i915]] *ERROR* dp_aux_ch not done status 0xac2003ff [ 594.641786] ------------[ cut here ]------------ [ 594.641790] dp_aux_ch not started status 0xac2003ff [ 594.641874] WARNING: CPU: 4 PID: 1366 at drivers/gpu/drm/i915/intel_dp.c:1268 intel_dp_aux_xfer+0x232/0x890 [i915] Ville noticed this issue already earlier and managed to work around it by keeping AUX-A always powered whenever AUX-B was used. He also reported the issue to HW folks and they have now root caused the problem and updated BSpec with a fix (see internal BSpec/Index/21257, HSD/1607152412). I noticed the same error - even with the WA being applied - while doing AUX transfers with Chamelium being connected with a DP cable to the source but letting Chamelium imitate an unplug. This is probably some unstandard way on Chamelium's behalf of disconnecting itself from the AUX pins. For instance it could still pull on the AUX pins which would prevent the source from detecting AUX timeouts in the proper way, leading to the ERRORs or WARNs seen in the logs in the Reference: bug below. In case I disconnect the sink properly (the cable itself, not via the Chamelium unplug xmlrpc command) then the AUX timeout signaling works properly and so there won't be any ERRORs/WARNs emitted. Reference: https://bugs.freedesktop.org/show_bug.cgi?id=110718 Cc: Ville Syrjälä Reported-by: Ville Syrjälä Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190524173532.6444-1-imre.deak@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 3 +++ drivers/gpu/drm/i915/intel_combo_phy.c | 6 ++++++ 2 files changed, 9 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 019c48748dc9..844ae4529a20 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1846,6 +1846,9 @@ enum i915_power_well_id { #define VOLTAGE_INFO_MASK (3 << 24) #define VOLTAGE_INFO_SHIFT 24 +#define ICL_PORT_COMP_DW8(port) _MMIO(_ICL_PORT_COMP_DW(8, port)) +#define IREFGEN (1 << 24) + #define CNL_PORT_COMP_DW9 _MMIO(0x162124) #define ICL_PORT_COMP_DW9(port) _MMIO(_ICL_PORT_COMP_DW(9, port)) diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c index 19a9333b727a..98213cc58736 100644 --- a/drivers/gpu/drm/i915/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/intel_combo_phy.c @@ -275,6 +275,12 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv) cnl_set_procmon_ref_values(dev_priv, port); + if (port == PORT_A) { + val = I915_READ(ICL_PORT_COMP_DW8(port)); + val |= IREFGEN; + I915_WRITE(ICL_PORT_COMP_DW8(port), val); + } + val = I915_READ(ICL_PORT_COMP_DW0(port)); val |= COMP_INIT; I915_WRITE(ICL_PORT_COMP_DW0(port), val); -- cgit v1.2.3 From d284d5145eb8760b592e64249c1cd85c440fe90f Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 21 May 2019 19:40:24 +0300 Subject: drm/i915: Make sandybridge_pcode_read() deal with the second data register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The pcode mailbox has two data registers. So far we've only ever used the one, but that's about to change. Expose the second data register to the callers of sandybridge_pcode_read(). Signed-off-by: Ville Syrjälä Reviewed-by: Clint Taylor Reviewed-by: Radhakrishna Sripada Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190521164025.30225-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 4 ++-- drivers/gpu/drm/i915/intel_pm.c | 12 +++++++----- drivers/gpu/drm/i915/intel_sideband.c | 15 +++++++++------ drivers/gpu/drm/i915/intel_sideband.h | 3 ++- 4 files changed, 20 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 633a08c0f907..344beab229a0 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1500,7 +1500,7 @@ static int gen6_drpc_info(struct seq_file *m) if (INTEL_GEN(dev_priv) <= 7) sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, - &rc6vids); + &rc6vids, NULL); seq_printf(m, "RC1e Enabled: %s\n", yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); @@ -1783,7 +1783,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ia_freq = gpu_freq; sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_MIN_FREQ_TABLE, - &ia_freq); + &ia_freq, NULL); seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", intel_gpu_freq(dev_priv, (gpu_freq * (IS_GEN9_BC(dev_priv) || diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index decdd79c3805..8f82cb72d3a6 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2822,7 +2822,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, val = 0; /* data0 to be programmed to 0 for first set */ ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY, - &val); + &val, NULL); if (ret) { DRM_ERROR("SKL Mailbox read error = %d\n", ret); @@ -2841,7 +2841,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, val = 1; /* data0 to be programmed to 1 for second set */ ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY, - &val); + &val, NULL); if (ret) { DRM_ERROR("SKL Mailbox read error = %d\n", ret); return; @@ -7072,7 +7072,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) if (sandybridge_pcode_read(dev_priv, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, - &ddcc_status) == 0) + &ddcc_status, NULL) == 0) rps->efficient_freq = clamp_t(u8, ((ddcc_status >> 8) & 0xff), @@ -7419,7 +7419,8 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv) GEN6_RC_CTL_HW_ENABLE); rc6vids = 0; - ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); + ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, + &rc6vids, NULL); if (IS_GEN(dev_priv, 6) && ret) { DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); } else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { @@ -8566,7 +8567,8 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) { u32 params = 0; - sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, ¶ms); + sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, + ¶ms, NULL); if (params & BIT(31)) { /* OC supported */ DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n", (rps->max_freq & 0xff) * 50, diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 87b5a14c7ca8..a115625e980c 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -374,7 +374,7 @@ static inline int gen7_check_mailbox_status(u32 mbox) } static int __sandybridge_pcode_rw(struct drm_i915_private *i915, - u32 mbox, u32 *val, + u32 mbox, u32 *val, u32 *val1, int fast_timeout_us, int slow_timeout_ms, bool is_read) @@ -393,7 +393,7 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915, return -EAGAIN; intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val); - intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, 0); + intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0); intel_uncore_write_fw(uncore, GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); @@ -407,6 +407,8 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915, if (is_read) *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA); + if (is_read && val1) + *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1); if (INTEL_GEN(i915) > 6) return gen7_check_mailbox_status(mbox); @@ -414,12 +416,13 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915, return gen6_check_mailbox_status(mbox); } -int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val) +int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, + u32 *val, u32 *val1) { int err; mutex_lock(&i915->sb_lock); - err = __sandybridge_pcode_rw(i915, mbox, val, + err = __sandybridge_pcode_rw(i915, mbox, val, val1, 500, 0, true); mutex_unlock(&i915->sb_lock); @@ -440,7 +443,7 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, int err; mutex_lock(&i915->sb_lock); - err = __sandybridge_pcode_rw(i915, mbox, &val, + err = __sandybridge_pcode_rw(i915, mbox, &val, NULL, fast_timeout_us, slow_timeout_ms, false); mutex_unlock(&i915->sb_lock); @@ -457,7 +460,7 @@ static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox, u32 request, u32 reply_mask, u32 reply, u32 *status) { - *status = __sandybridge_pcode_rw(i915, mbox, &request, + *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL, 500, 0, true); diff --git a/drivers/gpu/drm/i915/intel_sideband.h b/drivers/gpu/drm/i915/intel_sideband.h index a0907e2c4992..7fb95745a444 100644 --- a/drivers/gpu/drm/i915/intel_sideband.h +++ b/drivers/gpu/drm/i915/intel_sideband.h @@ -127,7 +127,8 @@ u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg, void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value, enum intel_sbi_destination destination); -int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val); +int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, + u32 *val, u32 *val1); int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val, int fast_timeout_us, int slow_timeout_ms); -- cgit v1.2.3 From c457d9cf256e942138a54a2e80349ee7fe20c391 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 24 May 2019 18:36:14 +0300 Subject: drm/i915: Make sure we have enough memory bandwidth on ICL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ICL has so many planes that it can easily exceed the maximum effective memory bandwidth of the system. We must therefore check that we don't exceed that limit. The algorithm is very magic number heavy and lacks sufficient explanation for now. We also have no sane way to query the memory clock and timings, so we must rely on a combination of raw readout from the memory controller and hardcoded assumptions. The memory controller values obviously change as the system jumps between the different SAGV points, so we try to stabilize it first by disabling SAGV for the duration of the readout. The utilized bandwidth is tracked via a device wide atomic private object. That is actually not robust because we can't afford to enforce strict global ordering between the pipes. Thus I think I'll need to change this to simply chop up the available bandwidth between all the active pipes. Each pipe can then do whatever it wants as long as it doesn't exceed its budget. That scheme will also require that we assume that any number of planes could be active at any time. TODO: make it robust and deal with all the open questions v2: Sleep longer after disabling SAGV v3: Poll for the dclk to get raised (seen it take 250ms!) If the system has 2133MT/s memory then we pointlessly wait one full second :( v4: Use the new pcode interface to get the qgv points rather that using hardcoded numbers v5: Move the pcode stuff into intel_bw.c (Matt) s/intel_sagv_info/intel_qgv_info/ Do the NV12/P010 as per spec for now (Matt) s/IS_ICELAKE/IS_GEN11/ v6: Ignore bandwidth limits if the pcode query fails Signed-off-by: Ville Syrjälä Reviewed-by: Matt Roper Acked-by: Clint Taylor Link: https://patchwork.freedesktop.org/patch/msgid/20190524153614.32410-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/i915_drv.c | 2 + drivers/gpu/drm/i915/i915_drv.h | 8 + drivers/gpu/drm/i915/i915_reg.h | 3 + drivers/gpu/drm/i915/intel_atomic_plane.c | 27 ++ drivers/gpu/drm/i915/intel_atomic_plane.h | 2 + drivers/gpu/drm/i915/intel_bw.c | 421 ++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_bw.h | 47 ++++ drivers/gpu/drm/i915/intel_display.c | 40 ++- drivers/gpu/drm/i915/intel_drv.h | 2 + 10 files changed, 552 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/i915/intel_bw.c create mode 100644 drivers/gpu/drm/i915/intel_bw.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 68106fe35a04..139a0fc19390 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -138,6 +138,7 @@ i915-y += intel_audio.o \ intel_atomic.o \ intel_atomic_plane.o \ intel_bios.o \ + intel_bw.o \ intel_cdclk.o \ intel_color.o \ intel_combo_phy.o \ diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 83d2eb9e74cb..6699e3a94272 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -60,6 +60,7 @@ #include "i915_vgpu.h" #include "intel_acpi.h" #include "intel_audio.h" +#include "intel_bw.h" #include "intel_cdclk.h" #include "intel_csr.h" #include "intel_dp.h" @@ -1657,6 +1658,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) */ intel_get_dram_info(dev_priv); + intel_bw_init_hw(dev_priv); return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a2664ea1395b..764347e7cf2d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -54,6 +54,7 @@ #include #include #include +#include #include #include @@ -1841,6 +1842,13 @@ struct drm_i915_private { } type; } dram_info; + struct intel_bw_info { + int num_planes; + int deratedbw[3]; + } max_bw[6]; + + struct drm_private_obj bw_obj; + struct i915_runtime_pm runtime_pm; struct { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 844ae4529a20..edae92f5a45e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -8780,6 +8780,9 @@ enum { #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 #define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 #define GEN6_READ_OC_PARAMS 0xc +#define ICL_PCODE_MEM_SUBSYSYSTEM_INFO 0xd +#define ICL_PCODE_MEM_SS_READ_GLOBAL_INFO (0x0 << 8) +#define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8)) #define GEN6_PCODE_READ_D_COMP 0x10 #define GEN6_PCODE_WRITE_D_COMP 0x11 #define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index d11681d71add..58ea1b672a1a 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -114,6 +114,29 @@ intel_plane_destroy_state(struct drm_plane *plane, drm_atomic_helper_plane_destroy_state(plane, state); } +unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int cpp; + + if (!plane_state->base.visible) + return 0; + + cpp = fb->format->cpp[0]; + + /* + * Based on HSD#:1408715493 + * NV12 cpp == 4, P010 cpp == 8 + * + * FIXME what is the logic behind this? + */ + if (fb->format->is_yuv && fb->format->num_planes > 1) + cpp *= 4; + + return cpp * crtc_state->pixel_rate; +} + int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state, const struct intel_plane_state *old_plane_state, @@ -125,6 +148,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ new_crtc_state->active_planes &= ~BIT(plane->id); new_crtc_state->nv12_planes &= ~BIT(plane->id); new_crtc_state->c8_planes &= ~BIT(plane->id); + new_crtc_state->data_rate[plane->id] = 0; new_plane_state->base.visible = false; if (!new_plane_state->base.crtc && !old_plane_state->base.crtc) @@ -149,6 +173,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ if (new_plane_state->base.visible || old_plane_state->base.visible) new_crtc_state->update_planes |= BIT(plane->id); + new_crtc_state->data_rate[plane->id] = + intel_plane_data_rate(new_crtc_state, new_plane_state); + return intel_plane_atomic_calc_changes(old_crtc_state, &new_crtc_state->base, old_plane_state, diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.h b/drivers/gpu/drm/i915/intel_atomic_plane.h index 14678620440f..0a9651376d0e 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/intel_atomic_plane.h @@ -15,6 +15,8 @@ struct intel_plane_state; extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; +unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); void intel_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); diff --git a/drivers/gpu/drm/i915/intel_bw.c b/drivers/gpu/drm/i915/intel_bw.c new file mode 100644 index 000000000000..753ac3165061 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_bw.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include + +#include "intel_bw.h" +#include "intel_drv.h" +#include "intel_sideband.h" + +/* Parameters for Qclk Geyserville (QGV) */ +struct intel_qgv_point { + u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd; +}; + +struct intel_qgv_info { + struct intel_qgv_point points[3]; + u8 num_points; + u8 num_channels; + u8 t_bl; + enum intel_dram_type dram_type; +}; + +static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv, + struct intel_qgv_info *qi) +{ + u32 val = 0; + int ret; + + ret = sandybridge_pcode_read(dev_priv, + ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, + &val, NULL); + if (ret) + return ret; + + switch (val & 0xf) { + case 0: + qi->dram_type = INTEL_DRAM_DDR4; + break; + case 1: + qi->dram_type = INTEL_DRAM_DDR3; + break; + case 2: + qi->dram_type = INTEL_DRAM_LPDDR3; + break; + case 3: + qi->dram_type = INTEL_DRAM_LPDDR3; + break; + default: + MISSING_CASE(val & 0xf); + break; + } + + qi->num_channels = (val & 0xf0) >> 4; + qi->num_points = (val & 0xf00) >> 8; + + qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8; + + return 0; +} + +static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, + struct intel_qgv_point *sp, + int point) +{ + u32 val = 0, val2; + int ret; + + ret = sandybridge_pcode_read(dev_priv, + ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point), + &val, &val2); + if (ret) + return ret; + + sp->dclk = val & 0xffff; + sp->t_rp = (val & 0xff0000) >> 16; + sp->t_rcd = (val & 0xff000000) >> 24; + + sp->t_rdpre = val2 & 0xff; + sp->t_ras = (val2 & 0xff00) >> 8; + + sp->t_rc = sp->t_rp + sp->t_ras; + + return 0; +} + +static int icl_get_qgv_points(struct drm_i915_private *dev_priv, + struct intel_qgv_info *qi) +{ + int i, ret; + + ret = icl_pcode_read_mem_global_info(dev_priv, qi); + if (ret) + return ret; + + if (WARN_ON(qi->num_points > ARRAY_SIZE(qi->points))) + qi->num_points = ARRAY_SIZE(qi->points); + + for (i = 0; i < qi->num_points; i++) { + struct intel_qgv_point *sp = &qi->points[i]; + + ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i); + if (ret) + return ret; + + DRM_DEBUG_KMS("QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n", + i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras, + sp->t_rcd, sp->t_rc); + } + + return 0; +} + +static int icl_calc_bw(int dclk, int num, int den) +{ + /* multiples of 16.666MHz (100/6) */ + return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6); +} + +static int icl_sagv_max_dclk(const struct intel_qgv_info *qi) +{ + u16 dclk = 0; + int i; + + for (i = 0; i < qi->num_points; i++) + dclk = max(dclk, qi->points[i].dclk); + + return dclk; +} + +struct intel_sa_info { + u8 deburst, mpagesize, deprogbwlimit, displayrtids; +}; + +static const struct intel_sa_info icl_sa_info = { + .deburst = 8, + .mpagesize = 16, + .deprogbwlimit = 25, /* GB/s */ + .displayrtids = 128, +}; + +static int icl_get_bw_info(struct drm_i915_private *dev_priv) +{ + struct intel_qgv_info qi = {}; + const struct intel_sa_info *sa = &icl_sa_info; + bool is_y_tile = true; /* assume y tile may be used */ + int num_channels; + int deinterleave; + int ipqdepth, ipqdepthpch; + int dclk_max; + int maxdebw; + int i, ret; + + ret = icl_get_qgv_points(dev_priv, &qi); + if (ret) { + DRM_DEBUG_KMS("Failed to get memory subsystem information, ignoring bandwidth limits"); + return ret; + } + num_channels = qi.num_channels; + + deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); + dclk_max = icl_sagv_max_dclk(&qi); + + ipqdepthpch = 16; + + maxdebw = min(sa->deprogbwlimit * 1000, + icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */ + ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels); + + for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) { + struct intel_bw_info *bi = &dev_priv->max_bw[i]; + int clpchgroup; + int j; + + clpchgroup = (sa->deburst * deinterleave / num_channels) << i; + bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; + + for (j = 0; j < qi.num_points; j++) { + const struct intel_qgv_point *sp = &qi.points[j]; + int ct, bw; + + /* + * Max row cycle time + * + * FIXME what is the logic behind the + * assumed burst length? + */ + ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd + + (clpchgroup - 1) * qi.t_bl + sp->t_rdpre); + bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct); + + bi->deratedbw[j] = min(maxdebw, + bw * 9 / 10); /* 90% */ + + DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n", + i, j, bi->num_planes, bi->deratedbw[j]); + } + + if (bi->num_planes == 1) + break; + } + + return 0; +} + +static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, + int num_planes, int qgv_point) +{ + int i; + + /* Did we initialize the bw limits successfully? */ + if (dev_priv->max_bw[0].num_planes == 0) + return UINT_MAX; + + for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) { + const struct intel_bw_info *bi = + &dev_priv->max_bw[i]; + + if (num_planes >= bi->num_planes) + return bi->deratedbw[qgv_point]; + } + + return 0; +} + +void intel_bw_init_hw(struct drm_i915_private *dev_priv) +{ + if (IS_GEN(dev_priv, 11)) + icl_get_bw_info(dev_priv); +} + +static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv, + int num_planes) +{ + if (IS_GEN(dev_priv, 11)) + /* + * FIXME with SAGV disabled maybe we can assume + * point 1 will always be used? Seems to match + * the behaviour observed in the wild. + */ + return min3(icl_max_bw(dev_priv, num_planes, 0), + icl_max_bw(dev_priv, num_planes, 1), + icl_max_bw(dev_priv, num_planes, 2)); + else + return UINT_MAX; +} + +static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state) +{ + /* + * We assume cursors are small enough + * to not not cause bandwidth problems. + */ + return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR)); +} + +static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + unsigned int data_rate = 0; + enum plane_id plane_id; + + for_each_plane_id_on_crtc(crtc, plane_id) { + /* + * We assume cursors are small enough + * to not not cause bandwidth problems. + */ + if (plane_id == PLANE_CURSOR) + continue; + + data_rate += crtc_state->data_rate[plane_id]; + } + + return data_rate; +} + +void intel_bw_crtc_update(struct intel_bw_state *bw_state, + const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + + bw_state->data_rate[crtc->pipe] = + intel_bw_crtc_data_rate(crtc_state); + bw_state->num_active_planes[crtc->pipe] = + intel_bw_crtc_num_active_planes(crtc_state); + + DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n", + pipe_name(crtc->pipe), + bw_state->data_rate[crtc->pipe], + bw_state->num_active_planes[crtc->pipe]); +} + +static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv, + const struct intel_bw_state *bw_state) +{ + unsigned int num_active_planes = 0; + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) + num_active_planes += bw_state->num_active_planes[pipe]; + + return num_active_planes; +} + +static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv, + const struct intel_bw_state *bw_state) +{ + unsigned int data_rate = 0; + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) + data_rate += bw_state->data_rate[pipe]; + + return data_rate; +} + +int intel_bw_atomic_check(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *new_crtc_state, *old_crtc_state; + struct intel_bw_state *bw_state = NULL; + unsigned int data_rate, max_data_rate; + unsigned int num_active_planes; + struct intel_crtc *crtc; + int i; + + /* FIXME earlier gens need some checks too */ + if (INTEL_GEN(dev_priv) < 11) + return 0; + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + unsigned int old_data_rate = + intel_bw_crtc_data_rate(old_crtc_state); + unsigned int new_data_rate = + intel_bw_crtc_data_rate(new_crtc_state); + unsigned int old_active_planes = + intel_bw_crtc_num_active_planes(old_crtc_state); + unsigned int new_active_planes = + intel_bw_crtc_num_active_planes(new_crtc_state); + + /* + * Avoid locking the bw state when + * nothing significant has changed. + */ + if (old_data_rate == new_data_rate && + old_active_planes == new_active_planes) + continue; + + bw_state = intel_atomic_get_bw_state(state); + if (IS_ERR(bw_state)) + return PTR_ERR(bw_state); + + bw_state->data_rate[crtc->pipe] = new_data_rate; + bw_state->num_active_planes[crtc->pipe] = new_active_planes; + + DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n", + pipe_name(crtc->pipe), + bw_state->data_rate[crtc->pipe], + bw_state->num_active_planes[crtc->pipe]); + } + + if (!bw_state) + return 0; + + data_rate = intel_bw_data_rate(dev_priv, bw_state); + num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state); + + max_data_rate = intel_max_data_rate(dev_priv, num_active_planes); + + data_rate = DIV_ROUND_UP(data_rate, 1000); + + if (data_rate > max_data_rate) { + DRM_DEBUG_KMS("Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n", + data_rate, max_data_rate, num_active_planes); + return -EINVAL; + } + + return 0; +} + +static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj *obj) +{ + struct intel_bw_state *state; + + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void intel_bw_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + kfree(state); +} + +static const struct drm_private_state_funcs intel_bw_funcs = { + .atomic_duplicate_state = intel_bw_duplicate_state, + .atomic_destroy_state = intel_bw_destroy_state, +}; + +int intel_bw_init(struct drm_i915_private *dev_priv) +{ + struct intel_bw_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + drm_atomic_private_obj_init(&dev_priv->drm, &dev_priv->bw_obj, + &state->base, &intel_bw_funcs); + + return 0; +} diff --git a/drivers/gpu/drm/i915/intel_bw.h b/drivers/gpu/drm/i915/intel_bw.h new file mode 100644 index 000000000000..e9d9c6d63bc3 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_bw.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_BW_H__ +#define __INTEL_BW_H__ + +#include + +#include "i915_drv.h" +#include "intel_display.h" + +struct drm_i915_private; +struct intel_atomic_state; +struct intel_crtc_state; + +struct intel_bw_state { + struct drm_private_state base; + + unsigned int data_rate[I915_MAX_PIPES]; + u8 num_active_planes[I915_MAX_PIPES]; +}; + +#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base) + +static inline struct intel_bw_state * +intel_atomic_get_bw_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct drm_private_state *bw_state; + + bw_state = drm_atomic_get_private_obj_state(&state->base, + &dev_priv->bw_obj); + if (IS_ERR(bw_state)) + return ERR_CAST(bw_state); + + return to_intel_bw_state(bw_state); +} + +void intel_bw_init_hw(struct drm_i915_private *dev_priv); +int intel_bw_init(struct drm_i915_private *dev_priv); +int intel_bw_atomic_check(struct intel_atomic_state *state); +void intel_bw_crtc_update(struct intel_bw_state *bw_state, + const struct intel_crtc_state *crtc_state); + +#endif /* __INTEL_BW_H__ */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d3b2f51e2dc2..c686f0074949 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -50,6 +50,7 @@ #include "intel_acpi.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" +#include "intel_bw.h" #include "intel_color.h" #include "intel_cdclk.h" #include "intel_crt.h" @@ -3155,6 +3156,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, intel_set_plane_visible(crtc_state, plane_state, false); fixup_active_planes(crtc_state); + crtc_state->data_rate[plane->id] = 0; if (plane->id == PLANE_PRIMARY) intel_pre_disable_primary_noatomic(&crtc->base); @@ -6879,6 +6881,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, struct intel_encoder *encoder; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->dev); + struct intel_bw_state *bw_state = + to_intel_bw_state(dev_priv->bw_obj.state); enum intel_display_power_domain domain; struct intel_plane *plane; u64 domains; @@ -6941,6 +6945,9 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); dev_priv->min_cdclk[intel_crtc->pipe] = 0; dev_priv->min_voltage_level[intel_crtc->pipe] = 0; + + bw_state->data_rate[intel_crtc->pipe] = 0; + bw_state->num_active_planes[intel_crtc->pipe] = 0; } /* @@ -11280,6 +11287,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat if (!is_crtc_enabled) { plane_state->visible = visible = false; to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id); + to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0; } if (!was_visible && !visible) @@ -13406,7 +13414,15 @@ static int intel_atomic_check(struct drm_device *dev, return ret; intel_fbc_choose_crtc(dev_priv, intel_state); - return calc_watermark_data(intel_state); + ret = calc_watermark_data(intel_state); + if (ret) + return ret; + + ret = intel_bw_atomic_check(intel_state); + if (ret) + return ret; + + return 0; } static int intel_atomic_prepare_commit(struct drm_device *dev, @@ -15789,6 +15805,10 @@ int intel_modeset_init(struct drm_device *dev) drm_mode_config_init(dev); + ret = intel_bw_init(dev_priv); + if (ret) + return ret; + dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; @@ -16417,8 +16437,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) drm_connector_list_iter_end(&conn_iter); for_each_intel_crtc(dev, crtc) { + struct intel_bw_state *bw_state = + to_intel_bw_state(dev_priv->bw_obj.state); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); + struct intel_plane *plane; int min_cdclk = 0; memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); @@ -16457,6 +16480,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) dev_priv->min_voltage_level[crtc->pipe] = crtc_state->min_voltage_level; + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + + /* + * FIXME don't have the fb yet, so can't + * use intel_plane_data_rate() :( + */ + if (plane_state->base.visible) + crtc_state->data_rate[plane->id] = + 4 * crtc_state->pixel_rate; + } + + intel_bw_crtc_update(bw_state, crtc_state); + intel_pipe_config_sanity_check(dev_priv, crtc_state); } } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0fbdbe559b92..f341042b6c79 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -885,6 +885,8 @@ struct intel_crtc_state { struct intel_crtc_wm_state wm; + u32 data_rate[I915_MAX_PLANES]; + /* Gamma mode programmed on the pipe */ u32 gamma_mode; -- cgit v1.2.3 From b27e35ae5b1804078cfd177082429eea9de01885 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 27 May 2019 12:51:14 +0100 Subject: drm/i915: Keep user GGTT alive for a minimum of 250ms Do not allow runtime pm autosuspend to remove userspace GGTT mmaps too quickly. For example, igt sets the autosuspend delay to 0, and so we immediately attempt to perform runtime suspend upon releasing the wakeref. Unfortunately, that involves tearing down GGTT mmaps as they require an active device. Override the autosuspend for GGTT mmaps, by keeping the wakeref around for 250ms after populating the PTE for a fresh mmap. v2: Prefer refcount_t for its under/overflow error detection v3: Flush the user runtime autosuspend prior to system system. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190527115114.13448-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Kconfig.profile | 14 ++++++++ drivers/gpu/drm/i915/i915_drv.h | 3 ++ drivers/gpu/drm/i915/i915_gem.c | 7 ++++ drivers/gpu/drm/i915/i915_gem_pm.c | 1 + drivers/gpu/drm/i915/intel_wakeref.c | 63 ++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_wakeref.h | 31 ++++++++++++++++++ 6 files changed, 119 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile index 0e5db98da8f3..4fd1ea639d0f 100644 --- a/drivers/gpu/drm/i915/Kconfig.profile +++ b/drivers/gpu/drm/i915/Kconfig.profile @@ -1,3 +1,17 @@ +config DRM_I915_USERFAULT_AUTOSUSPEND + int "Runtime autosuspend delay for userspace GGTT mmaps (ms)" + default 250 # milliseconds + help + On runtime suspend, as we suspend the device, we have to revoke + userspace GGTT mmaps and force userspace to take a pagefault on + their next access. The revocation and subsequent recreation of + the GGTT mmap can be very slow and so we impose a small hysteris + that complements the runtime-pm autosuspend and provides a lower + floor on the autosuspend delay. + + May be 0 to disable the extra delay and solely use the device level + runtime pm autosuspend delay tunable. + config DRM_I915_SPIN_REQUEST int default 5 # microseconds diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 764347e7cf2d..87cc80c7b626 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -875,6 +875,9 @@ struct i915_gem_mm { */ struct list_head userfault_list; + /* Manual runtime pm autosuspend delay for user GGTT mmaps */ + struct intel_wakeref_auto userfault_wakeref; + /** * List of objects which are pending destruction. */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7cafd5612f71..50f72ae39ad8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1834,6 +1834,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) assert_rpm_wakelock_held(dev_priv); if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) list_add(&obj->userfault_link, &dev_priv->mm.userfault_list); + if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) + intel_wakeref_auto(&dev_priv->mm.userfault_wakeref, + msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); GEM_BUG_ON(!obj->userfault_count); i915_vma_set_ggtt_write(vma); @@ -4671,6 +4674,8 @@ void i915_gem_fini(struct drm_i915_private *dev_priv) { GEM_BUG_ON(dev_priv->gt.awake); + intel_wakeref_auto_fini(&dev_priv->mm.userfault_wakeref); + i915_gem_suspend_late(dev_priv); intel_disable_gt_powersave(dev_priv); @@ -4746,7 +4751,9 @@ static void i915_gem_init__mm(struct drm_i915_private *i915) INIT_LIST_HEAD(&i915->mm.unbound_list); INIT_LIST_HEAD(&i915->mm.bound_list); INIT_LIST_HEAD(&i915->mm.fence_list); + INIT_LIST_HEAD(&i915->mm.userfault_list); + intel_wakeref_auto_init(&i915->mm.userfault_wakeref, i915); INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); } diff --git a/drivers/gpu/drm/i915/i915_gem_pm.c b/drivers/gpu/drm/i915/i915_gem_pm.c index fa9c2ebd966a..c0ad19605297 100644 --- a/drivers/gpu/drm/i915/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/i915_gem_pm.c @@ -126,6 +126,7 @@ void i915_gem_suspend(struct drm_i915_private *i915) { GEM_TRACE("\n"); + intel_wakeref_auto(&i915->mm.userfault_wakeref, 0); flush_workqueue(i915->wq); mutex_lock(&i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index 91196d9612bb..c2dda5a375f0 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -73,3 +73,66 @@ void __intel_wakeref_init(struct intel_wakeref *wf, struct lock_class_key *key) atomic_set(&wf->count, 0); wf->wakeref = 0; } + +static void wakeref_auto_timeout(struct timer_list *t) +{ + struct intel_wakeref_auto *wf = from_timer(wf, t, timer); + intel_wakeref_t wakeref; + unsigned long flags; + + if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags)) + return; + + wakeref = fetch_and_zero(&wf->wakeref); + spin_unlock_irqrestore(&wf->lock, flags); + + intel_runtime_pm_put(wf->i915, wakeref); +} + +void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, + struct drm_i915_private *i915) +{ + spin_lock_init(&wf->lock); + timer_setup(&wf->timer, wakeref_auto_timeout, 0); + refcount_set(&wf->count, 0); + wf->wakeref = 0; + wf->i915 = i915; +} + +void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) +{ + unsigned long flags; + + if (!timeout) { + if (del_timer_sync(&wf->timer)) + wakeref_auto_timeout(&wf->timer); + return; + } + + /* Our mission is that we only extend an already active wakeref */ + assert_rpm_wakelock_held(wf->i915); + + if (!refcount_inc_not_zero(&wf->count)) { + spin_lock_irqsave(&wf->lock, flags); + if (!refcount_read(&wf->count)) { + GEM_BUG_ON(wf->wakeref); + wf->wakeref = intel_runtime_pm_get_if_in_use(wf->i915); + } + refcount_inc(&wf->count); + spin_unlock_irqrestore(&wf->lock, flags); + } + + /* + * If we extend a pending timer, we will only get a single timer + * callback and so need to cancel the local inc by running the + * elided callback to keep the wf->count balanced. + */ + if (mod_timer(&wf->timer, jiffies + timeout)) + wakeref_auto_timeout(&wf->timer); +} + +void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf) +{ + intel_wakeref_auto(wf, 0); + GEM_BUG_ON(wf->wakeref); +} diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index db742291211c..8a5f85c000ce 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -9,7 +9,9 @@ #include #include +#include #include +#include struct drm_i915_private; @@ -130,4 +132,33 @@ intel_wakeref_active(struct intel_wakeref *wf) return READ_ONCE(wf->wakeref); } +struct intel_wakeref_auto { + struct drm_i915_private *i915; + struct timer_list timer; + intel_wakeref_t wakeref; + spinlock_t lock; + refcount_t count; +}; + +/** + * intel_wakeref_auto: Delay the runtime-pm autosuspend + * @wf: the wakeref + * @timeout: relative timeout in jiffies + * + * The runtime-pm core uses a suspend delay after the last wakeref + * is released before triggering runtime suspend of the device. That + * delay is configurable via sysfs with little regard to the device + * characteristics. Instead, we want to tune the autosuspend based on our + * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied + * timeout. + * + * Pass @timeout = 0 to cancel a previous autosuspend by executing the + * suspend immediately. + */ +void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout); + +void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, + struct drm_i915_private *i915); +void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf); + #endif /* INTEL_WAKEREF_H */ -- cgit v1.2.3 From cc80b2ef24898dc62242e194270200b01ca758d1 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 27 May 2019 18:35:57 +0000 Subject: drm/i915/guc: Change platform default GuC mode Today our most desired GuC configuration is to only enable HuC if it is available (as we need authenticated HuC firmware to enable all media codecs on the hardware) and we really don't care about having GuC submission enabled. Change platform default GuC mode to match our goal, but note that we still don't change default modparam value (GuC/HuC disabled). v2: add why HuC is so important (Joonas) Signed-off-by: Michal Wajdeczko Cc: Joonas Lahtinen Cc: Chris Wilson Cc: Rodrigo Vivi Cc: Daniele Ceraolo Spurio Cc: John Spotswood Cc: Vinay Belgaumkar Cc: Tony Ye Cc: Anusha Srivatsa Cc: Jeff Mcgee Cc: Antonio Argenziano Cc: Sujaritha Sundaresan Acked-by: Tony Ye Reviewed-by: Sujaritha Sundaresan Reviewed-by: Joonas Lahtinen Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/intel_uc.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 63fc12cbc25d..1a265fbd95c7 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -57,10 +57,8 @@ static int __get_platform_enable_guc(struct drm_i915_private *i915) struct intel_uc_fw *huc_fw = &i915->huc.fw; int enable_guc = 0; - /* Default is to enable GuC/HuC if we know their firmwares */ - if (intel_uc_fw_is_selected(guc_fw)) - enable_guc |= ENABLE_GUC_SUBMISSION; - if (intel_uc_fw_is_selected(huc_fw)) + /* Default is to use HuC if we know GuC and HuC firmwares */ + if (intel_uc_fw_is_selected(guc_fw) && intel_uc_fw_is_selected(huc_fw)) enable_guc |= ENABLE_GUC_LOAD_HUC; /* Any platform specific fine-tuning can be done here */ -- cgit v1.2.3 From a2904ade3dc28cf1a1b7deded41f4369f75e664c Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 27 May 2019 18:35:58 +0000 Subject: drm/i915/guc: Don't allow GuC submission Due to the upcoming changes to the GuC ABI interface, we must disable GuC submission mode until final ABI will be available on all GuC firmwares. To avoid regressions on systems configured to run with no longer supported configuration "enable_guc=3" or "enable_guc=1" clear GuC submission bit. v2: force switch to non-GuC submission mode v3: use GEM_BUG_ON (Joonas) Signed-off-by: Michal Wajdeczko Cc: Joonas Lahtinen Cc: Chris Wilson Cc: Rodrigo Vivi Cc: Daniele Ceraolo Spurio Cc: John Spotswood Cc: Vinay Belgaumkar Cc: Tony Ye Cc: Anusha Srivatsa Cc: Jeff Mcgee Cc: Antonio Argenziano Cc: Sujaritha Sundaresan Cc: Martin Peres Acked-by: Martin Peres Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-3-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/intel_uc.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 1a265fbd95c7..75943ea4e65d 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -130,6 +130,15 @@ static void sanitize_options_early(struct drm_i915_private *i915) "no HuC firmware"); } + /* XXX: GuC submission is unavailable for now */ + if (intel_uc_is_using_guc_submission(i915)) { + DRM_INFO("Incompatible option detected: %s=%d, %s!\n", + "enable_guc", i915_modparams.enable_guc, + "GuC submission not supported"); + DRM_INFO("Switching to non-GuC submission mode!\n"); + i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION; + } + /* A negative value means "use platform/config default" */ if (i915_modparams.guc_log_level < 0) i915_modparams.guc_log_level = @@ -298,6 +307,9 @@ int intel_uc_init(struct drm_i915_private *i915) if (!HAS_GUC(i915)) return -ENODEV; + /* XXX: GuC submission is unavailable for now */ + GEM_BUG_ON(USES_GUC_SUBMISSION(i915)); + ret = intel_guc_init(guc); if (ret) return ret; -- cgit v1.2.3 From ffd5ce22faa4d07a07085b497717d7650f72fd5f Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 27 May 2019 18:35:59 +0000 Subject: drm/i915/guc: Updates for GuC 32.0.3 firmware New GuC 32.0.3 firmware made many changes around its ABI that require driver updates: * FW release version numbering schema now includes patch number * FW release version encoding in CSS header * Boot parameters * Suspend/resume protocol * Sample-forcewake command * Additional Data Structures (ADS) This commit is a squash of patches 3-8 from series [1]. [1] https://patchwork.freedesktop.org/series/58760/ Signed-off-by: Michal Wajdeczko Cc: Joonas Lahtinen Cc: Daniele Ceraolo Spurio Cc: Rodrigo Vivi Cc: Anusha Srivatsa Cc: Jeff Mcgee Cc: John Spotswood Cc: Tvrtko Ursulin Cc: Tomasz Lis Acked-by: Daniele Ceraolo Spurio # numbering schema Acked-by: Daniele Ceraolo Spurio # ccs heaser Acked-by: Daniele Ceraolo Spurio # boot params Acked-by: John Spotswood # suspend/resume Acked-by: Daniele Ceraolo Spurio # sample-forcewake Acked-by: John Spotswood # sample-forcewake Acked-by: Daniele Ceraolo Spurio # ADS Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-4-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/intel_engine.h | 2 + drivers/gpu/drm/i915/gt/intel_engine_cs.c | 9 +- drivers/gpu/drm/i915/intel_guc.c | 88 ++++++-------- drivers/gpu/drm/i915/intel_guc_ads.c | 95 +++++++++------ drivers/gpu/drm/i915/intel_guc_fw.c | 75 ++++++------ drivers/gpu/drm/i915/intel_guc_fwif.h | 191 ++++++++++++++---------------- drivers/gpu/drm/i915/intel_uc_fw.c | 20 ++-- 7 files changed, 237 insertions(+), 243 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 9359b3a7ad9c..1c0db151f0b1 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -526,6 +526,8 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine); struct i915_request * intel_engine_find_active_request(struct intel_engine_cs *engine); +u32 intel_engine_context_size(struct drm_i915_private *i915, u8 class); + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 2590f5904b67..1c83ea9adac0 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -156,7 +156,7 @@ static const struct engine_info intel_engines[] = { }; /** - * ___intel_engine_context_size() - return the size of the context for an engine + * intel_engine_context_size() - return the size of the context for an engine * @dev_priv: i915 device private * @class: engine class * @@ -169,8 +169,7 @@ static const struct engine_info intel_engines[] = { * in LRC mode, but does not include the "shared data page" used with * GuC submission. The caller should account for this if using the GuC. */ -static u32 -__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) +u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) { u32 cxt_size; @@ -327,8 +326,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv, engine->uabi_class = intel_engine_classes[info->class].uabi_class; - engine->context_size = __intel_engine_context_size(dev_priv, - engine->class); + engine->context_size = intel_engine_context_size(dev_priv, + engine->class); if (WARN_ON(engine->context_size > BIT(20))) engine->context_size = 0; if (engine->context_size) diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index c4ac29309fcc..60e6463a3aac 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -250,14 +250,7 @@ void intel_guc_fini(struct intel_guc *guc) static u32 guc_ctl_debug_flags(struct intel_guc *guc) { u32 level = intel_guc_log_get_level(&guc->log); - u32 flags; - u32 ads; - - ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT; - flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED; - - if (!GUC_LOG_LEVEL_IS_ENABLED(level)) - flags |= GUC_LOG_DEFAULT_DISABLED; + u32 flags = 0; if (!GUC_LOG_LEVEL_IS_VERBOSE(level)) flags |= GUC_LOG_DISABLED; @@ -272,11 +265,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc) { u32 flags = 0; - flags |= GUC_CTL_VCS2_ENABLED; - - if (USES_GUC_SUBMISSION(guc_to_i915(guc))) - flags |= GUC_CTL_KERNEL_SUBMISSIONS; - else + if (!USES_GUC_SUBMISSION(guc_to_i915(guc))) flags |= GUC_CTL_DISABLE_SCHEDULER; return flags; @@ -340,6 +329,14 @@ static u32 guc_ctl_log_params_flags(struct intel_guc *guc) return flags; } +static u32 guc_ctl_ads_flags(struct intel_guc *guc) +{ + u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT; + u32 flags = ads << GUC_ADS_ADDR_SHIFT; + + return flags; +} + /* * Initialise the GuC parameter block before starting the firmware * transfer. These parameters are read by the firmware on startup @@ -353,20 +350,11 @@ void intel_guc_init_params(struct intel_guc *guc) memset(params, 0, sizeof(params)); - /* - * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one - * second. This ARAR is calculated by: - * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10 - */ - params[GUC_CTL_ARAT_HIGH] = 0; - params[GUC_CTL_ARAT_LOW] = 100000000; - - params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER; - + params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc); + params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc); - params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); - params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc); + params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc); for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]); @@ -550,25 +538,33 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset) return intel_guc_send(guc, action, ARRAY_SIZE(action)); } -/* - * The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and - * then return, so waiting on the H2G is not enough to guarantee GuC is done. - * When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to - * scratch register 14, so we can poll on that. Note that GuC does not ensure - * that the value in the register is different from - * INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to - * take care of that ourselves as well. +/** + * intel_guc_suspend() - notify GuC entering suspend state + * @guc: the guc */ -static int guc_sleep_state_action(struct intel_guc *guc, - const u32 *action, u32 len) +int intel_guc_suspend(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); int ret; u32 status; + u32 action[] = { + INTEL_GUC_ACTION_ENTER_S_STATE, + GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */ + }; + + /* + * The ENTER_S_STATE action queues the save/restore operation in GuC FW + * and then returns, so waiting on the H2G is not enough to guarantee + * GuC is done. When all the processing is done, GuC writes + * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll + * on that. Note that GuC does not ensure that the value in the register + * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is + * in progress so we need to take care of that ourselves as well. + */ I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK); - ret = intel_guc_send(guc, action, len); + ret = intel_guc_send(guc, action, ARRAY_SIZE(action)); if (ret) return ret; @@ -588,21 +584,6 @@ static int guc_sleep_state_action(struct intel_guc *guc, return 0; } -/** - * intel_guc_suspend() - notify GuC entering suspend state - * @guc: the guc - */ -int intel_guc_suspend(struct intel_guc *guc) -{ - u32 data[] = { - INTEL_GUC_ACTION_ENTER_S_STATE, - GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */ - intel_guc_ggtt_offset(guc, guc->shared_data) - }; - - return guc_sleep_state_action(guc, data, ARRAY_SIZE(data)); -} - /** * intel_guc_reset_engine() - ask GuC to reset an engine * @guc: intel_guc structure @@ -632,13 +613,12 @@ int intel_guc_reset_engine(struct intel_guc *guc, */ int intel_guc_resume(struct intel_guc *guc) { - u32 data[] = { + u32 action[] = { INTEL_GUC_ACTION_EXIT_S_STATE, GUC_POWER_D0, - intel_guc_ggtt_offset(guc, guc->shared_data) }; - return guc_sleep_state_action(guc, data, ARRAY_SIZE(data)); + return intel_guc_send(guc, action, ARRAY_SIZE(action)); } /** diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c index bec62f34b15a..1aa1ec0ff4a1 100644 --- a/drivers/gpu/drm/i915/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/intel_guc_ads.c @@ -51,7 +51,7 @@ static void guc_policies_init(struct guc_policies *policies) policies->max_num_work_items = POLICY_MAX_NUM_WI; for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) { - for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) { + for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++) { policy = &policies->policy[p][i]; guc_policy_init(policy); @@ -61,6 +61,11 @@ static void guc_policies_init(struct guc_policies *policies) policies->is_valid = 1; } +static void guc_ct_pool_entries_init(struct guc_ct_pool_entry *pool, u32 num) +{ + memset(pool, 0, num * sizeof(*pool)); +} + /* * The first 80 dwords of the register state context, containing the * execlists and ppgtt registers. @@ -75,20 +80,21 @@ static void guc_policies_init(struct guc_policies *policies) int intel_guc_ads_create(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct i915_vma *vma, *kernel_ctx_vma; - struct page *page; + struct i915_vma *vma; /* The ads obj includes the struct itself and buffers passed to GuC */ struct { struct guc_ads ads; struct guc_policies policies; struct guc_mmio_reg_state reg_state; + struct guc_gt_system_info system_info; + struct guc_clients_info clients_info; + struct guc_ct_pool_entry ct_pool[GUC_CT_POOL_SIZE]; u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE]; } __packed *blob; - struct intel_engine_cs *engine; - enum intel_engine_id id; - const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE; const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; u32 base; + u8 engine_class; + int ret; GEM_BUG_ON(guc->ads_vma); @@ -98,51 +104,68 @@ int intel_guc_ads_create(struct intel_guc *guc) guc->ads_vma = vma; - page = i915_vma_first_page(vma); - blob = kmap(page); + blob = i915_gem_object_pin_map(guc->ads_vma->obj, I915_MAP_WB); + if (IS_ERR(blob)) { + ret = PTR_ERR(blob); + goto err_vma; + } /* GuC scheduling policies */ guc_policies_init(&blob->policies); - /* MMIO reg state */ - for_each_engine(engine, dev_priv, id) { - blob->reg_state.white_list[engine->guc_id].mmio_start = - engine->mmio_base + GUC_MMIO_WHITE_LIST_START; - - /* Nothing to be saved or restored for now. */ - blob->reg_state.white_list[engine->guc_id].count = 0; - } - /* - * The GuC requires a "Golden Context" when it reinitialises - * engines after a reset. Here we use the Render ring default - * context, which must already exist and be pinned in the GGTT, - * so its address won't change after we've told the GuC where - * to find it. Note that we have to skip our header (1 page), - * because our GuC shared data is there. + * GuC expects a per-engine-class context image and size + * (minus hwsp and ring context). The context image will be + * used to reinitialize engines after a reset. It must exist + * and be pinned in the GGTT, so that the address won't change after + * we have told GuC where to find it. The context size will be used + * to validate that the LRC base + size fall within allowed GGTT. */ - kernel_ctx_vma = dev_priv->engine[RCS0]->kernel_context->state; - blob->ads.golden_context_lrca = - intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset; + for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) { + if (engine_class == OTHER_CLASS) + continue; + /* + * TODO: Set context pointer to default state to allow + * GuC to re-init guilty contexts after internal reset. + */ + blob->ads.golden_context_lrca[engine_class] = 0; + blob->ads.eng_state_size[engine_class] = + intel_engine_context_size(dev_priv, engine_class) - + skipped_size; + } - /* - * The GuC expects us to exclude the portion of the context image that - * it skips from the size it is to read. It starts reading from after - * the execlist context (so skipping the first page [PPHWSP] and 80 - * dwords). Weird guc is weird. - */ - for_each_engine(engine, dev_priv, id) - blob->ads.eng_state_size[engine->guc_id] = - engine->context_size - skipped_size; + /* System info */ + blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask); + blob->system_info.rcs_enabled = 1; + blob->system_info.bcs_enabled = 1; + + blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv); + blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv); + blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; base = intel_guc_ggtt_offset(guc, vma); + + /* Clients info */ + guc_ct_pool_entries_init(blob->ct_pool, ARRAY_SIZE(blob->ct_pool)); + + blob->clients_info.clients_num = 1; + blob->clients_info.ct_pool_addr = base + ptr_offset(blob, ct_pool); + blob->clients_info.ct_pool_count = ARRAY_SIZE(blob->ct_pool); + + /* ADS */ blob->ads.scheduler_policies = base + ptr_offset(blob, policies); blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer); blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state); + blob->ads.gt_system_info = base + ptr_offset(blob, system_info); + blob->ads.clients_info = base + ptr_offset(blob, clients_info); - kunmap(page); + i915_gem_object_unpin_map(guc->ads_vma->obj); return 0; + +err_vma: + i915_vma_unpin_and_release(&guc->ads_vma, 0); + return ret; } void intel_guc_ads_destroy(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c index 8b2dcc70b956..c740bf3731de 100644 --- a/drivers/gpu/drm/i915/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/intel_guc_fw.c @@ -30,53 +30,60 @@ #include "intel_guc_fw.h" #include "i915_drv.h" -#define SKL_FW_MAJOR 9 -#define SKL_FW_MINOR 33 - -#define BXT_FW_MAJOR 9 -#define BXT_FW_MINOR 29 - -#define KBL_FW_MAJOR 9 -#define KBL_FW_MINOR 39 - -#define GUC_FW_PATH(platform, major, minor) \ - "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin" - -#define I915_SKL_GUC_UCODE GUC_FW_PATH(skl, SKL_FW_MAJOR, SKL_FW_MINOR) -MODULE_FIRMWARE(I915_SKL_GUC_UCODE); - -#define I915_BXT_GUC_UCODE GUC_FW_PATH(bxt, BXT_FW_MAJOR, BXT_FW_MINOR) -MODULE_FIRMWARE(I915_BXT_GUC_UCODE); - -#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR) -MODULE_FIRMWARE(I915_KBL_GUC_UCODE); +#define __MAKE_GUC_FW_PATH(KEY) \ + "i915/" \ + __stringify(KEY##_GUC_FW_PREFIX) "_guc_" \ + __stringify(KEY##_GUC_FW_MAJOR) "." \ + __stringify(KEY##_GUC_FW_MINOR) "." \ + __stringify(KEY##_GUC_FW_PATCH) ".bin" + +#define SKL_GUC_FW_PREFIX skl +#define SKL_GUC_FW_MAJOR 32 +#define SKL_GUC_FW_MINOR 0 +#define SKL_GUC_FW_PATCH 3 +#define SKL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(SKL) +MODULE_FIRMWARE(SKL_GUC_FIRMWARE_PATH); + +#define BXT_GUC_FW_PREFIX bxt +#define BXT_GUC_FW_MAJOR 32 +#define BXT_GUC_FW_MINOR 0 +#define BXT_GUC_FW_PATCH 3 +#define BXT_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(BXT) +MODULE_FIRMWARE(BXT_GUC_FIRMWARE_PATH); + +#define KBL_GUC_FW_PREFIX kbl +#define KBL_GUC_FW_MAJOR 32 +#define KBL_GUC_FW_MINOR 0 +#define KBL_GUC_FW_PATCH 3 +#define KBL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(KBL) +MODULE_FIRMWARE(KBL_GUC_FIRMWARE_PATH); static void guc_fw_select(struct intel_uc_fw *guc_fw) { struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_private *i915 = guc_to_i915(guc); GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); - if (!HAS_GUC(dev_priv)) + if (!HAS_GUC(i915)) return; if (i915_modparams.guc_firmware_path) { guc_fw->path = i915_modparams.guc_firmware_path; guc_fw->major_ver_wanted = 0; guc_fw->minor_ver_wanted = 0; - } else if (IS_SKYLAKE(dev_priv)) { - guc_fw->path = I915_SKL_GUC_UCODE; - guc_fw->major_ver_wanted = SKL_FW_MAJOR; - guc_fw->minor_ver_wanted = SKL_FW_MINOR; - } else if (IS_BROXTON(dev_priv)) { - guc_fw->path = I915_BXT_GUC_UCODE; - guc_fw->major_ver_wanted = BXT_FW_MAJOR; - guc_fw->minor_ver_wanted = BXT_FW_MINOR; - } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { - guc_fw->path = I915_KBL_GUC_UCODE; - guc_fw->major_ver_wanted = KBL_FW_MAJOR; - guc_fw->minor_ver_wanted = KBL_FW_MINOR; + } else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) { + guc_fw->path = KBL_GUC_FIRMWARE_PATH; + guc_fw->major_ver_wanted = KBL_GUC_FW_MAJOR; + guc_fw->minor_ver_wanted = KBL_GUC_FW_MINOR; + } else if (IS_BROXTON(i915)) { + guc_fw->path = BXT_GUC_FIRMWARE_PATH; + guc_fw->major_ver_wanted = BXT_GUC_FW_MAJOR; + guc_fw->minor_ver_wanted = BXT_GUC_FW_MINOR; + } else if (IS_SKYLAKE(i915)) { + guc_fw->path = SKL_GUC_FIRMWARE_PATH; + guc_fw->major_ver_wanted = SKL_GUC_FW_MAJOR; + guc_fw->minor_ver_wanted = SKL_GUC_FW_MINOR; } } diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index b2f5148f4f17..fa745a58d38d 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -39,6 +39,14 @@ #define GUC_VIDEO_ENGINE2 4 #define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) +/* + * XXX: Beware that Gen9 firmware 32.x uses wrong definition for + * GUC_MAX_INSTANCES_PER_CLASS (1) but this is harmless for us now + * as we are not enabling GuC submission mode where this will be used + */ +#define GUC_MAX_ENGINE_CLASSES 5 +#define GUC_MAX_INSTANCES_PER_CLASS 4 + #define GUC_DOORBELL_INVALID 256 #define GUC_DB_SIZE (PAGE_SIZE) @@ -73,44 +81,28 @@ #define GUC_STAGE_DESC_ATTR_PCH BIT(6) #define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7) -/* The guc control data is 10 DWORDs */ +/* New GuC control data */ #define GUC_CTL_CTXINFO 0 #define GUC_CTL_CTXNUM_IN16_SHIFT 0 #define GUC_CTL_BASE_ADDR_SHIFT 12 -#define GUC_CTL_ARAT_HIGH 1 -#define GUC_CTL_ARAT_LOW 2 - -#define GUC_CTL_DEVICE_INFO 3 - -#define GUC_CTL_LOG_PARAMS 4 +#define GUC_CTL_LOG_PARAMS 1 #define GUC_LOG_VALID (1 << 0) #define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1) #define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3) #define GUC_LOG_CRASH_SHIFT 4 -#define GUC_LOG_CRASH_MASK (0x1 << GUC_LOG_CRASH_SHIFT) +#define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT) #define GUC_LOG_DPC_SHIFT 6 #define GUC_LOG_DPC_MASK (0x7 << GUC_LOG_DPC_SHIFT) #define GUC_LOG_ISR_SHIFT 9 #define GUC_LOG_ISR_MASK (0x7 << GUC_LOG_ISR_SHIFT) #define GUC_LOG_BUF_ADDR_SHIFT 12 -#define GUC_CTL_PAGE_FAULT_CONTROL 5 +#define GUC_CTL_WA 2 +#define GUC_CTL_FEATURE 3 +#define GUC_CTL_DISABLE_SCHEDULER (1 << 14) -#define GUC_CTL_WA 6 -#define GUC_CTL_WA_UK_BY_DRIVER (1 << 3) - -#define GUC_CTL_FEATURE 7 -#define GUC_CTL_VCS2_ENABLED (1 << 0) -#define GUC_CTL_KERNEL_SUBMISSIONS (1 << 1) -#define GUC_CTL_FEATURE2 (1 << 2) -#define GUC_CTL_POWER_GATING (1 << 3) -#define GUC_CTL_DISABLE_SCHEDULER (1 << 4) -#define GUC_CTL_PREEMPTION_LOG (1 << 5) -#define GUC_CTL_ENABLE_SLPC (1 << 7) -#define GUC_CTL_RESET_ON_PREMPT_FAILURE (1 << 8) - -#define GUC_CTL_DEBUG 8 +#define GUC_CTL_DEBUG 4 #define GUC_LOG_VERBOSITY_SHIFT 0 #define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT) #define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT) @@ -123,13 +115,10 @@ #define GUC_LOG_DESTINATION_MASK (3 << 4) #define GUC_LOG_DISABLED (1 << 6) #define GUC_PROFILE_ENABLED (1 << 7) -#define GUC_WQ_TRACK_ENABLED (1 << 8) -#define GUC_ADS_ENABLED (1 << 9) -#define GUC_LOG_DEFAULT_DISABLED (1 << 10) -#define GUC_ADS_ADDR_SHIFT 11 -#define GUC_ADS_ADDR_MASK 0xfffff800 -#define GUC_CTL_RSRVD 9 +#define GUC_CTL_ADS 5 +#define GUC_ADS_ADDR_SHIFT 1 +#define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT) #define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */ @@ -168,11 +157,7 @@ * in fw. So driver will load a truncated firmware in this case. * * HuC firmware layout is same as GuC firmware. - * - * HuC firmware css header is different. However, the only difference is where - * the version information is saved. The uc_css_header is unified to support - * both. Driver should get HuC version from uc_css_header.huc_sw_version, while - * uc_css_header.guc_sw_version for GuC. + * Only HuC version information is saved in a different way. */ struct uc_css_header { @@ -183,41 +168,27 @@ struct uc_css_header { u32 header_version; u32 module_id; u32 module_vendor; - union { - struct { - u8 day; - u8 month; - u16 year; - }; - u32 date; - }; + u32 date; +#define CSS_DATE_DAY (0xFF << 0) +#define CSS_DATE_MONTH (0xFF << 8) +#define CSS_DATE_YEAR (0xFFFF << 16) u32 size_dw; /* uCode plus header_size_dw */ u32 key_size_dw; u32 modulus_size_dw; u32 exponent_size_dw; - union { - struct { - u8 hour; - u8 min; - u16 sec; - }; - u32 time; - }; - + u32 time; +#define CSS_TIME_HOUR (0xFF << 0) +#define CSS_DATE_MIN (0xFF << 8) +#define CSS_DATE_SEC (0xFFFF << 16) char username[8]; char buildnumber[12]; - union { - struct { - u32 branch_client_version; - u32 sw_version; - } guc; - struct { - u32 sw_version; - u32 reserved; - } huc; - }; - u32 prod_preprod_fw; - u32 reserved[12]; + u32 sw_version; +#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16) +#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8) +#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0) +#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16) +#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0) + u32 reserved[14]; u32 header_info; } __packed; @@ -423,23 +394,19 @@ struct guc_ct_buffer_desc { struct guc_policy { /* Time for one workload to execute. (in micro seconds) */ u32 execution_quantum; - u32 reserved1; - /* Time to wait for a preemption request to completed before issuing a * reset. (in micro seconds). */ u32 preemption_time; - /* How much time to allow to run after the first fault is observed. * Then preempt afterwards. (in micro seconds) */ u32 fault_time; - u32 policy_flags; - u32 reserved[2]; + u32 reserved[8]; } __packed; struct guc_policies { - struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINES_NUM]; - + struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINE_CLASSES]; + u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES]; /* In micro seconds. How much time to allow before DPC processing is * called back via interrupt (to prevent DPC queue drain starving). * Typically 1000s of micro seconds (example only, not granularity). */ @@ -452,57 +419,73 @@ struct guc_policies { * idle. */ u32 max_num_work_items; - u32 reserved[19]; + u32 reserved[4]; } __packed; /* GuC MMIO reg state struct */ -#define GUC_REGSET_FLAGS_NONE 0x0 -#define GUC_REGSET_POWERCYCLE 0x1 -#define GUC_REGSET_MASKED 0x2 -#define GUC_REGSET_ENGINERESET 0x4 -#define GUC_REGSET_SAVE_DEFAULT_VALUE 0x8 -#define GUC_REGSET_SAVE_CURRENT_VALUE 0x10 -#define GUC_REGSET_MAX_REGISTERS 25 -#define GUC_MMIO_WHITE_LIST_START 0x24d0 -#define GUC_MMIO_WHITE_LIST_MAX 12 +#define GUC_REGSET_MAX_REGISTERS 64 #define GUC_S3_SAVE_SPACE_PAGES 10 -struct guc_mmio_regset { - struct __packed { - u32 offset; - u32 value; - u32 flags; - } registers[GUC_REGSET_MAX_REGISTERS]; +struct guc_mmio_reg { + u32 offset; + u32 value; + u32 flags; +#define GUC_REGSET_MASKED (1 << 0) +} __packed; +struct guc_mmio_regset { + struct guc_mmio_reg registers[GUC_REGSET_MAX_REGISTERS]; u32 values_valid; u32 number_of_registers; } __packed; -/* MMIO registers that are set as non privileged */ -struct mmio_white_list { - u32 mmio_start; - u32 offsets[GUC_MMIO_WHITE_LIST_MAX]; - u32 count; +/* GuC register sets */ +struct guc_mmio_reg_state { + struct guc_mmio_regset engine_reg[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; + u32 reserved[98]; } __packed; -struct guc_mmio_reg_state { - struct guc_mmio_regset global_reg; - struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM]; - struct mmio_white_list white_list[GUC_MAX_ENGINES_NUM]; +/* HW info */ +struct guc_gt_system_info { + u32 slice_enabled; + u32 rcs_enabled; + u32 reserved0; + u32 bcs_enabled; + u32 vdbox_enable_mask; + u32 vdbox_sfc_support_mask; + u32 vebox_enable_mask; + u32 reserved[9]; } __packed; -/* GuC Additional Data Struct */ +/* Clients info */ +struct guc_ct_pool_entry { + struct guc_ct_buffer_desc desc; + u32 reserved[7]; +} __packed; +#define GUC_CT_POOL_SIZE 2 + +struct guc_clients_info { + u32 clients_num; + u32 reserved0[13]; + u32 ct_pool_addr; + u32 ct_pool_count; + u32 reserved[4]; +} __packed; + +/* GuC Additional Data Struct */ struct guc_ads { u32 reg_state_addr; u32 reg_state_buffer; - u32 golden_context_lrca; u32 scheduler_policies; - u32 reserved0[3]; - u32 eng_state_size[GUC_MAX_ENGINES_NUM]; - u32 reserved2[4]; + u32 gt_system_info; + u32 clients_info; + u32 control_data; + u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES]; + u32 eng_state_size[GUC_MAX_ENGINE_CLASSES]; + u32 reserved[16]; } __packed; /* GuC logging structures */ @@ -646,7 +629,6 @@ enum intel_guc_action { INTEL_GUC_ACTION_DEFAULT = 0x0, INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2, INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3, - INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x6, INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10, INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, @@ -654,6 +636,7 @@ enum intel_guc_action { INTEL_GUC_ACTION_ENTER_S_STATE = 0x501, INTEL_GUC_ACTION_EXIT_S_STATE = 0x502, INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003, + INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x3005, INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000, INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, @@ -674,9 +657,9 @@ enum intel_guc_report_status { }; enum intel_guc_sleep_state_status { - INTEL_GUC_SLEEP_STATE_SUCCESS = 0x0, - INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x1, - INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x2 + INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1, + INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2, + INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3 #define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000 }; diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c index b9cb6fea9332..eca741a857a5 100644 --- a/drivers/gpu/drm/i915/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/intel_uc_fw.c @@ -22,6 +22,7 @@ * */ +#include #include #include @@ -119,21 +120,20 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, goto fail; } - /* - * The GuC firmware image has the version number embedded at a - * well-known offset within the firmware blob; note that major / minor - * version are TWO bytes each (i.e. u16), although all pointers and - * offsets are defined in terms of bytes (u8). - */ + /* Get version numbers from the CSS header */ switch (uc_fw->type) { case INTEL_UC_FW_TYPE_GUC: - uc_fw->major_ver_found = css->guc.sw_version >> 16; - uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF; + uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR, + css->sw_version); + uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR, + css->sw_version); break; case INTEL_UC_FW_TYPE_HUC: - uc_fw->major_ver_found = css->huc.sw_version >> 16; - uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF; + uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR, + css->sw_version); + uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR, + css->sw_version); break; default: -- cgit v1.2.3 From 386e300fe9fae7e6a15014a1dba28e44f55a8e73 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 27 May 2019 18:36:00 +0000 Subject: drm/i915/guc: Reset GuC ADS during sanitize MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GuC stores some data in there, which might be stale after a reset. Reinitialize whole ADS in case any part of it was corrupted during previous GuC run. v2: s/reinit/init, update functions descriptions (Tomek/Michal) v3: reset ADS right before fw upload Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: MichaĹ Winiarski Cc: Tomasz Lis Reviewed-by: Tomasz Lis #v2 Reviewed-by: MichaĹ Winiarski #v2 Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-5-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/intel_guc_ads.c | 90 +++++++++++++++++++++++------------- drivers/gpu/drm/i915/intel_guc_ads.h | 1 + drivers/gpu/drm/i915/intel_uc.c | 4 +- 3 files changed, 63 insertions(+), 32 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c index 1aa1ec0ff4a1..ecb69fc94218 100644 --- a/drivers/gpu/drm/i915/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/intel_guc_ads.c @@ -72,43 +72,28 @@ static void guc_ct_pool_entries_init(struct guc_ct_pool_entry *pool, u32 num) */ #define LR_HW_CONTEXT_SIZE (80 * sizeof(u32)) -/** - * intel_guc_ads_create() - creates GuC ADS - * @guc: intel_guc struct - * - */ -int intel_guc_ads_create(struct intel_guc *guc) +/* The ads obj includes the struct itself and buffers passed to GuC */ +struct __guc_ads_blob { + struct guc_ads ads; + struct guc_policies policies; + struct guc_mmio_reg_state reg_state; + struct guc_gt_system_info system_info; + struct guc_clients_info clients_info; + struct guc_ct_pool_entry ct_pool[GUC_CT_POOL_SIZE]; + u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE]; +} __packed; + +static int __guc_ads_init(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct i915_vma *vma; - /* The ads obj includes the struct itself and buffers passed to GuC */ - struct { - struct guc_ads ads; - struct guc_policies policies; - struct guc_mmio_reg_state reg_state; - struct guc_gt_system_info system_info; - struct guc_clients_info clients_info; - struct guc_ct_pool_entry ct_pool[GUC_CT_POOL_SIZE]; - u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE]; - } __packed *blob; + struct __guc_ads_blob *blob; const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; u32 base; u8 engine_class; - int ret; - - GEM_BUG_ON(guc->ads_vma); - - vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob))); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - guc->ads_vma = vma; blob = i915_gem_object_pin_map(guc->ads_vma->obj, I915_MAP_WB); - if (IS_ERR(blob)) { - ret = PTR_ERR(blob); - goto err_vma; - } + if (IS_ERR(blob)) + return PTR_ERR(blob); /* GuC scheduling policies */ guc_policies_init(&blob->policies); @@ -143,7 +128,7 @@ int intel_guc_ads_create(struct intel_guc *guc) blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv); blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; - base = intel_guc_ggtt_offset(guc, vma); + base = intel_guc_ggtt_offset(guc, guc->ads_vma); /* Clients info */ guc_ct_pool_entries_init(blob->ct_pool, ARRAY_SIZE(blob->ct_pool)); @@ -162,6 +147,34 @@ int intel_guc_ads_create(struct intel_guc *guc) i915_gem_object_unpin_map(guc->ads_vma->obj); return 0; +} + +/** + * intel_guc_ads_create() - allocates and initializes GuC ADS. + * @guc: intel_guc struct + * + * GuC needs memory block (Additional Data Struct), where it will store + * some data. Allocate and initialize such memory block for GuC use. + */ +int intel_guc_ads_create(struct intel_guc *guc) +{ + const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob)); + struct i915_vma *vma; + int ret; + + GEM_BUG_ON(guc->ads_vma); + + vma = intel_guc_allocate_vma(guc, size); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + guc->ads_vma = vma; + + ret = __guc_ads_init(guc); + if (ret) + goto err_vma; + + return 0; err_vma: i915_vma_unpin_and_release(&guc->ads_vma, 0); @@ -172,3 +185,18 @@ void intel_guc_ads_destroy(struct intel_guc *guc) { i915_vma_unpin_and_release(&guc->ads_vma, 0); } + +/** + * intel_guc_ads_reset() - prepares GuC Additional Data Struct for reuse + * @guc: intel_guc struct + * + * GuC stores some data in ADS, which might be stale after a reset. + * Reinitialize whole ADS in case any part of it was corrupted during + * previous GuC run. + */ +void intel_guc_ads_reset(struct intel_guc *guc) +{ + if (!guc->ads_vma) + return; + __guc_ads_init(guc); +} diff --git a/drivers/gpu/drm/i915/intel_guc_ads.h b/drivers/gpu/drm/i915/intel_guc_ads.h index c4735742c564..7f40f9cd5fb9 100644 --- a/drivers/gpu/drm/i915/intel_guc_ads.h +++ b/drivers/gpu/drm/i915/intel_guc_ads.h @@ -29,5 +29,6 @@ struct intel_guc; int intel_guc_ads_create(struct intel_guc *guc); void intel_guc_ads_destroy(struct intel_guc *guc); +void intel_guc_ads_reset(struct intel_guc *guc); #endif diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 75943ea4e65d..082036164c0c 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -24,8 +24,9 @@ #include "gt/intel_reset.h" #include "intel_uc.h" -#include "intel_guc_submission.h" #include "intel_guc.h" +#include "intel_guc_ads.h" +#include "intel_guc_submission.h" #include "i915_drv.h" static void guc_free_load_err_log(struct intel_guc *guc); @@ -414,6 +415,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915) goto err_out; } + intel_guc_ads_reset(guc); intel_guc_init_params(guc); ret = intel_guc_fw_upload(guc); if (ret == 0) -- cgit v1.2.3 From e958cc908792d0a433c9abf9bf47e97648e4c204 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 27 May 2019 18:36:01 +0000 Subject: drm/i915/guc: Always ask GuC to update power domain states With newer GuC firmware it is always ok to ask GuC to update power domain states. Make it an unconditional initialization step. Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: John Spotswood Reviewed-by: Daniele Ceraolo Spurio Reviewed-by: John Spotswood Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-6-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/intel_guc_submission.c | 4 ---- drivers/gpu/drm/i915/intel_uc.c | 8 ++++---- 2 files changed, 4 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 987ff586d7f9..ffdab22db2b0 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -1426,10 +1426,6 @@ int intel_guc_submission_enable(struct intel_guc *guc) GEM_BUG_ON(!guc->execbuf_client); - err = intel_guc_sample_forcewake(guc); - if (err) - return err; - err = guc_clients_enable(guc); if (err) return err; diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 082036164c0c..3eb4f4320667 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -439,14 +439,14 @@ int intel_uc_init_hw(struct drm_i915_private *i915) goto err_communication; } + ret = intel_guc_sample_forcewake(guc); + if (ret) + goto err_communication; + if (USES_GUC_SUBMISSION(i915)) { ret = intel_guc_submission_enable(guc); if (ret) goto err_communication; - } else if (INTEL_GEN(i915) < 11) { - ret = intel_guc_sample_forcewake(guc); - if (ret) - goto err_communication; } dev_info(i915->drm.dev, "GuC firmware version %u.%u\n", -- cgit v1.2.3 From 415e7f0a7b89758c752da3f61794614e25939e1b Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 27 May 2019 18:36:02 +0000 Subject: drm/i915/guc: Define GuC firmware version for Geminilake Define GuC firmware version for Geminilake. Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Anusha Srivatsa Reviewed-by: Anusha Srivatsa Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-7-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/intel_guc_fw.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c index c740bf3731de..c1e9bb4e04fd 100644 --- a/drivers/gpu/drm/i915/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/intel_guc_fw.c @@ -58,6 +58,13 @@ MODULE_FIRMWARE(BXT_GUC_FIRMWARE_PATH); #define KBL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(KBL) MODULE_FIRMWARE(KBL_GUC_FIRMWARE_PATH); +#define GLK_GUC_FW_PREFIX glk +#define GLK_GUC_FW_MAJOR 32 +#define GLK_GUC_FW_MINOR 0 +#define GLK_GUC_FW_PATCH 3 +#define GLK_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(GLK) +MODULE_FIRMWARE(GLK_GUC_FIRMWARE_PATH); + static void guc_fw_select(struct intel_uc_fw *guc_fw) { struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); @@ -72,6 +79,10 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw) guc_fw->path = i915_modparams.guc_firmware_path; guc_fw->major_ver_wanted = 0; guc_fw->minor_ver_wanted = 0; + } else if (IS_GEMINILAKE(i915)) { + guc_fw->path = GLK_GUC_FIRMWARE_PATH; + guc_fw->major_ver_wanted = GLK_GUC_FW_MAJOR; + guc_fw->minor_ver_wanted = GLK_GUC_FW_MINOR; } else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) { guc_fw->path = KBL_GUC_FIRMWARE_PATH; guc_fw->major_ver_wanted = KBL_GUC_FW_MAJOR; -- cgit v1.2.3 From afac50928403360f481a965b9bbc97bbd06f789f Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 27 May 2019 18:36:03 +0000 Subject: drm/i915/huc: Define HuC firmware version for Geminilake Define HuC firmware version for Geminilake. Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Joonas Lahtinen Cc: Anusha Srivatsa Cc: Tony Ye Reviewed-by: Anusha Srivatsa Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-8-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/intel_huc_fw.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c index 44c559526072..8bac6a051c18 100644 --- a/drivers/gpu/drm/i915/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/intel_huc_fw.c @@ -34,6 +34,10 @@ #define KBL_HUC_FW_MINOR 00 #define KBL_BLD_NUM 1810 +#define GLK_HUC_FW_MAJOR 03 +#define GLK_HUC_FW_MINOR 01 +#define GLK_BLD_NUM 2893 + #define HUC_FW_PATH(platform, major, minor, bld_num) \ "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \ __stringify(minor) "_" __stringify(bld_num) ".bin" @@ -50,6 +54,10 @@ MODULE_FIRMWARE(I915_BXT_HUC_UCODE); KBL_HUC_FW_MINOR, KBL_BLD_NUM) MODULE_FIRMWARE(I915_KBL_HUC_UCODE); +#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \ + GLK_HUC_FW_MINOR, GLK_BLD_NUM) +MODULE_FIRMWARE(I915_GLK_HUC_UCODE); + static void huc_fw_select(struct intel_uc_fw *huc_fw) { struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); @@ -76,6 +84,10 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw) huc_fw->path = I915_KBL_HUC_UCODE; huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR; huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR; + } else if (IS_GEMINILAKE(dev_priv)) { + huc_fw->path = I915_GLK_HUC_UCODE; + huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR; + huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR; } } -- cgit v1.2.3 From 4a1f9dc119163677427fe03231af3bb0bf5adb4b Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 27 May 2019 18:36:04 +0000 Subject: drm/i915/guc: New GuC interrupt register for Gen11 Gen11 defines new more flexible Host-to-GuC interrupt register. Now the host can write any 32-bit payload to trigger an interrupt and GuC can additionally read this payload from the register. Current GuC firmware ignores the payload so we just write 0. Bspec: 21043 Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Joonas Lahtinen Cc: Rodrigo Vivi Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-9-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/intel_guc.c | 14 +++++++++++++- drivers/gpu/drm/i915/intel_guc_reg.h | 1 + 2 files changed, 14 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 60e6463a3aac..888a1e999c8b 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -34,6 +34,13 @@ static void gen8_guc_raise_irq(struct intel_guc *guc) I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER); } +static void gen11_guc_raise_irq(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + + I915_WRITE(GEN11_GUC_HOST_INTERRUPT, 0); +} + static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i) { GEM_BUG_ON(!guc->send_regs.base); @@ -63,6 +70,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc) void intel_guc_init_early(struct intel_guc *guc) { + struct drm_i915_private *i915 = guc_to_i915(guc); + intel_guc_fw_init_early(guc); intel_guc_ct_init_early(&guc->ct); intel_guc_log_init_early(&guc->log); @@ -71,7 +80,10 @@ void intel_guc_init_early(struct intel_guc *guc) spin_lock_init(&guc->irq_lock); guc->send = intel_guc_send_nop; guc->handler = intel_guc_to_host_event_handler_nop; - guc->notify = gen8_guc_raise_irq; + if (INTEL_GEN(i915) >= 11) + guc->notify = gen11_guc_raise_irq; + else + guc->notify = gen8_guc_raise_irq; } static int guc_init_wq(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h index 57e7ad522c2f..aec02eddbaed 100644 --- a/drivers/gpu/drm/i915/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/intel_guc_reg.h @@ -103,6 +103,7 @@ #define GUC_SEND_INTERRUPT _MMIO(0xc4c8) #define GUC_SEND_TRIGGER (1<<0) +#define GEN11_GUC_HOST_INTERRUPT _MMIO(0x1901f0) #define GUC_NUM_DOORBELLS 256 -- cgit v1.2.3 From 2d4ed3a988e6b1ff9729d0edd74bf4890571253e Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 27 May 2019 18:36:05 +0000 Subject: drm/i915/guc: New GuC scratch registers for Gen11 Gen11 adds new set of scratch registers that can be used for MMIO based Host-to-Guc communication. Due to limited number of these registers it is expected that host will use them only for command transport buffers (CTB) communication setup if one is available. Bspec: 21044 Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Joonas Lahtinen Cc: Rodrigo Vivi Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-10-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/intel_guc.c | 12 +++++++++--- drivers/gpu/drm/i915/intel_guc_reg.h | 3 +++ 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 888a1e999c8b..538868a10168 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -56,9 +56,15 @@ void intel_guc_init_send_regs(struct intel_guc *guc) enum forcewake_domains fw_domains = 0; unsigned int i; - guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); - guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN; - BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); + if (HAS_GUC_CT(dev_priv) && INTEL_GEN(dev_priv) >= 11) { + guc->send_regs.base = + i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0)); + guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT; + } else { + guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); + guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN; + BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); + } for (i = 0; i < guc->send_regs.count; i++) { fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore, diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h index aec02eddbaed..d26de5193568 100644 --- a/drivers/gpu/drm/i915/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/intel_guc_reg.h @@ -51,6 +51,9 @@ #define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4) #define SOFT_SCRATCH_COUNT 16 +#define GEN11_SOFT_SCRATCH(n) _MMIO(0x190240 + (n) * 4) +#define GEN11_SOFT_SCRATCH_COUNT 4 + #define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4) #define UOS_RSA_SCRATCH_COUNT 64 -- cgit v1.2.3 From 7c5ae251b048edc540a1dd245654fcc79f63a531 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 27 May 2019 18:36:06 +0000 Subject: drm/i915/huc: New HuC status register for Gen11 Gen11 defines new register for checking HuC authentication status. Look into the right register and bit. v2: use reg/mask/value instead of dedicated functions (Daniele) BSpec: 19686 Signed-off-by: Michal Wajdeczko Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Tony Ye Cc: Vinay Belgaumkar Cc: John Spotswood Cc: Anusha Srivatsa Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-11-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/intel_guc_reg.h | 3 +++ drivers/gpu/drm/i915/intel_huc.c | 26 +++++++++++++++++++------- drivers/gpu/drm/i915/intel_huc.h | 7 +++++++ 3 files changed, 29 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h index d26de5193568..7eba65795b58 100644 --- a/drivers/gpu/drm/i915/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/intel_guc_reg.h @@ -79,6 +79,9 @@ #define HUC_STATUS2 _MMIO(0xD3B0) #define HUC_FW_VERIFIED (1<<7) +#define GEN11_HUC_KERNEL_LOAD_INFO _MMIO(0xC1DC) +#define HUC_LOAD_SUCCESSFUL (1 << 0) + #define GUC_WOPCM_SIZE _MMIO(0xc050) #define GUC_WOPCM_SIZE_LOCKED (1<<0) #define GUC_WOPCM_SIZE_SHIFT 12 diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 1ff1fb015e58..8572a0588efc 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -29,7 +29,19 @@ void intel_huc_init_early(struct intel_huc *huc) { + struct drm_i915_private *i915 = huc_to_i915(huc); + intel_huc_fw_init_early(huc); + + if (INTEL_GEN(i915) >= 11) { + huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO; + huc->status.mask = HUC_LOAD_SUCCESSFUL; + huc->status.value = HUC_LOAD_SUCCESSFUL; + } else { + huc->status.reg = HUC_STATUS2; + huc->status.mask = HUC_FW_VERIFIED; + huc->status.value = HUC_FW_VERIFIED; + } } int intel_huc_init_misc(struct intel_huc *huc) @@ -110,7 +122,6 @@ int intel_huc_auth(struct intel_huc *huc) { struct drm_i915_private *i915 = huc_to_i915(huc); struct intel_guc *guc = &i915->guc; - u32 status; int ret; if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) @@ -125,12 +136,12 @@ int intel_huc_auth(struct intel_huc *huc) /* Check authentication status, it should be done by now */ ret = __intel_wait_for_register(&i915->uncore, - HUC_STATUS2, - HUC_FW_VERIFIED, - HUC_FW_VERIFIED, - 2, 50, &status); + huc->status.reg, + huc->status.mask, + huc->status.value, + 2, 50, NULL); if (ret) { - DRM_ERROR("HuC: Firmware not verified %#x\n", status); + DRM_ERROR("HuC: Firmware not verified %d\n", ret); goto fail; } @@ -164,7 +175,8 @@ int intel_huc_check_status(struct intel_huc *huc) return -ENODEV; with_intel_runtime_pm(dev_priv, wakeref) - status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED; + status = (I915_READ(huc->status.reg) & huc->status.mask) == + huc->status.value; return status; } diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h index a0c21ae02a99..2a6c94e79f17 100644 --- a/drivers/gpu/drm/i915/intel_huc.h +++ b/drivers/gpu/drm/i915/intel_huc.h @@ -25,6 +25,7 @@ #ifndef _INTEL_HUC_H_ #define _INTEL_HUC_H_ +#include "i915_reg.h" #include "intel_uc_fw.h" #include "intel_huc_fw.h" @@ -35,6 +36,12 @@ struct intel_huc { /* HuC-specific additions */ struct i915_vma *rsa_data; void *rsa_data_vaddr; + + struct { + i915_reg_t reg; + u32 mask; + u32 value; + } status; }; void intel_huc_init_early(struct intel_huc *huc); -- cgit v1.2.3 From 1e83e7a66d139e0568a395d9b4bbdee97c5e881b Mon Sep 17 00:00:00 2001 From: Oscar Mateo Date: Mon, 27 May 2019 18:36:07 +0000 Subject: drm/i915/guc: Create vfuncs for the GuC interrupts control functions Controlling and handling of the GuC interrupts is Gen specific. Create virtual functions to avoid redundant runtime Gen checks. Gen-specific versions of these functions will follow. v2: move vfuncs to struct guc (Daniele) v3: rebased Signed-off-by: Oscar Mateo Signed-off-by: Michal Wajdeczko Cc: Rodrigo Vivi Cc: Tvrtko Ursulin Cc: Daniele Ceraolo Spurio Cc: Joonas Lahtinen Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-12-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/i915_irq.c | 6 +++--- drivers/gpu/drm/i915/intel_guc.c | 8 ++++++-- drivers/gpu/drm/i915/intel_guc.h | 8 +++++++- drivers/gpu/drm/i915/intel_uc.c | 21 ++++++++++++++++++--- 4 files changed, 34 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 233211fde0ea..607709a8c229 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -600,10 +600,10 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) assert_rpm_wakelock_held(dev_priv); spin_lock_irq(&dev_priv->irq_lock); - if (!dev_priv->guc.interrupts_enabled) { + if (!dev_priv->guc.interrupts.enabled) { WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_guc_events); - dev_priv->guc.interrupts_enabled = true; + dev_priv->guc.interrupts.enabled = true; gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); } spin_unlock_irq(&dev_priv->irq_lock); @@ -614,7 +614,7 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) assert_rpm_wakelock_held(dev_priv); spin_lock_irq(&dev_priv->irq_lock); - dev_priv->guc.interrupts_enabled = false; + dev_priv->guc.interrupts.enabled = false; gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 538868a10168..28642bf977bd 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -86,10 +86,14 @@ void intel_guc_init_early(struct intel_guc *guc) spin_lock_init(&guc->irq_lock); guc->send = intel_guc_send_nop; guc->handler = intel_guc_to_host_event_handler_nop; - if (INTEL_GEN(i915) >= 11) + if (INTEL_GEN(i915) >= 11) { guc->notify = gen11_guc_raise_irq; - else + } else { guc->notify = gen8_guc_raise_irq; + guc->interrupts.reset = gen9_reset_guc_interrupts; + guc->interrupts.enable = gen9_enable_guc_interrupts; + guc->interrupts.disable = gen9_disable_guc_interrupts; + } } static int guc_init_wq(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index d4b015ab8a36..cbfed7a77c8b 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -55,9 +55,15 @@ struct intel_guc { /* intel_guc_recv interrupt related state */ spinlock_t irq_lock; - bool interrupts_enabled; unsigned int msg_enabled_mask; + struct { + bool enabled; + void (*reset)(struct drm_i915_private *i915); + void (*enable)(struct drm_i915_private *i915); + void (*disable)(struct drm_i915_private *i915); + } interrupts; + struct i915_vma *ads_vma; struct i915_vma *stage_desc_pool; void *stage_desc_pool_vaddr; diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 3eb4f4320667..a5ba0f007959 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -218,11 +218,26 @@ static void guc_free_load_err_log(struct intel_guc *guc) i915_gem_object_put(guc->load_err_log); } +static void guc_reset_interrupts(struct intel_guc *guc) +{ + guc->interrupts.reset(guc_to_i915(guc)); +} + +static void guc_enable_interrupts(struct intel_guc *guc) +{ + guc->interrupts.enable(guc_to_i915(guc)); +} + +static void guc_disable_interrupts(struct intel_guc *guc) +{ + guc->interrupts.disable(guc_to_i915(guc)); +} + static int guc_enable_communication(struct intel_guc *guc) { struct drm_i915_private *i915 = guc_to_i915(guc); - gen9_enable_guc_interrupts(i915); + guc_enable_interrupts(guc); if (HAS_GUC_CT(i915)) return intel_guc_ct_enable(&guc->ct); @@ -250,7 +265,7 @@ static void guc_disable_communication(struct intel_guc *guc) if (HAS_GUC_CT(i915)) intel_guc_ct_disable(&guc->ct); - gen9_disable_guc_interrupts(i915); + guc_disable_interrupts(guc); guc->send = intel_guc_send_nop; guc->handler = intel_guc_to_host_event_handler_nop; @@ -391,7 +406,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915) GEM_BUG_ON(!HAS_GUC(i915)); - gen9_reset_guc_interrupts(i915); + guc_reset_interrupts(guc); /* WaEnableuKernelHeaderValidFix:skl */ /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ -- cgit v1.2.3 From 54c52a8412501fe84bccc28bd443a29cdd3f84a1 Mon Sep 17 00:00:00 2001 From: Oscar Mateo Date: Mon, 27 May 2019 18:36:08 +0000 Subject: drm/i915/guc: Correctly handle GuC interrupts on Gen11 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Starting Gen11 GuC shares interrupt registers with SG unit instead of PM. But for now we don't care about SG interrupts. v2: (Chris) v3: rebased (Michal) v4: more bspec pages, use macros, update commit msg (Michal Wi) Bspec: 19820, 19840, 19841, 20176 Signed-off-by: Oscar Mateo Signed-off-by: Michal Wajdeczko Cc: Tvrtko Ursulin Cc: Daniele Ceraolo Spurio Cc: Joonas Lahtinen Reviewed-by: Michał Winiarski Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-13-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/i915_irq.c | 53 +++++++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/i915_irq.h | 3 ++ drivers/gpu/drm/i915/i915_reg.h | 4 +++ drivers/gpu/drm/i915/intel_guc.c | 3 ++ drivers/gpu/drm/i915/intel_guc_reg.h | 18 ++++++++++++ 5 files changed, 80 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 607709a8c229..ca8f4226e598 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -624,6 +624,42 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) gen9_reset_guc_interrupts(dev_priv); } +void gen11_reset_guc_interrupts(struct drm_i915_private *i915) +{ + spin_lock_irq(&i915->irq_lock); + gen11_reset_one_iir(i915, 0, GEN11_GUC); + spin_unlock_irq(&i915->irq_lock); +} + +void gen11_enable_guc_interrupts(struct drm_i915_private *dev_priv) +{ + spin_lock_irq(&dev_priv->irq_lock); + if (!dev_priv->guc.interrupts.enabled) { + u32 events = REG_FIELD_PREP(ENGINE1_MASK, + GEN11_GUC_INTR_GUC2HOST); + + WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GUC)); + I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events); + I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events); + dev_priv->guc.interrupts.enabled = true; + } + spin_unlock_irq(&dev_priv->irq_lock); +} + +void gen11_disable_guc_interrupts(struct drm_i915_private *dev_priv) +{ + spin_lock_irq(&dev_priv->irq_lock); + dev_priv->guc.interrupts.enabled = false; + + I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0); + I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0); + + spin_unlock_irq(&dev_priv->irq_lock); + synchronize_irq(dev_priv->drm.irq); + + gen11_reset_guc_interrupts(dev_priv); +} + /** * bdw_update_port_irq - update DE port interrupt * @dev_priv: driver private @@ -1893,6 +1929,12 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) intel_guc_to_host_event_handler(&dev_priv->guc); } +static void gen11_guc_irq_handler(struct drm_i915_private *i915, u16 iir) +{ + if (iir & GEN11_GUC_INTR_GUC2HOST) + intel_guc_to_host_event_handler(&i915->guc); +} + static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) { enum pipe pipe; @@ -3015,6 +3057,9 @@ static void gen11_other_irq_handler(struct drm_i915_private * const i915, const u8 instance, const u16 iir) { + if (instance == OTHER_GUC_INSTANCE) + return gen11_guc_irq_handler(i915, iir); + if (instance == OTHER_GTPM_INSTANCE) return gen11_rps_irq_handler(i915, iir); @@ -3545,6 +3590,8 @@ static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); + I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0); + I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0); } static void gen11_irq_reset(struct drm_device *dev) @@ -4200,6 +4247,10 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) dev_priv->pm_imr = ~dev_priv->pm_ier; I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); + + /* Same thing for GuC interrupts */ + I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0); + I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0); } static void icp_irq_postinstall(struct drm_device *dev) @@ -4707,7 +4758,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) for (i = 0; i < MAX_L3_SLICES; ++i) dev_priv->l3_parity.remap_info[i] = NULL; - if (HAS_GUC_SCHED(dev_priv)) + if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11) dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; /* Let's track the enabled rps events */ diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index 0ccd0d90919d..cb25dd213308 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -110,5 +110,8 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv); void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv); void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv); +void gen11_reset_guc_interrupts(struct drm_i915_private *i915); +void gen11_enable_guc_interrupts(struct drm_i915_private *i915); +void gen11_disable_guc_interrupts(struct drm_i915_private *i915); #endif /* __I915_IRQ_H__ */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index edae92f5a45e..73990295d627 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -290,6 +290,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OTHER_CLASS 4 #define MAX_ENGINE_CLASS 4 +#define OTHER_GUC_INSTANCE 0 #define OTHER_GTPM_INSTANCE 1 #define MAX_ENGINE_INSTANCE 3 @@ -7493,6 +7494,9 @@ enum { #define GEN11_CRYPTO_RSVD_INTR_MASK _MMIO(0x1900f0) #define GEN11_GUNIT_CSME_INTR_MASK _MMIO(0x1900f4) +#define ENGINE1_MASK REG_GENMASK(31, 16) +#define ENGINE0_MASK REG_GENMASK(15, 0) + #define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004) /* Required on all Ironlake and Sandybridge according to the B-Spec. */ #define ILK_ELPIN_409_SELECT (1 << 25) diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 28642bf977bd..cbe4b8df15fd 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -88,6 +88,9 @@ void intel_guc_init_early(struct intel_guc *guc) guc->handler = intel_guc_to_host_event_handler_nop; if (INTEL_GEN(i915) >= 11) { guc->notify = gen11_guc_raise_irq; + guc->interrupts.reset = gen11_reset_guc_interrupts; + guc->interrupts.enable = gen11_enable_guc_interrupts; + guc->interrupts.disable = gen11_disable_guc_interrupts; } else { guc->notify = gen8_guc_raise_irq; guc->interrupts.reset = gen9_reset_guc_interrupts; diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h index 7eba65795b58..a214f8b71929 100644 --- a/drivers/gpu/drm/i915/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/intel_guc_reg.h @@ -134,4 +134,22 @@ struct guc_doorbell_info { #define GUC_WD_VECS_IER _MMIO(0xC558) #define GUC_PM_P24C_IER _MMIO(0xC55C) +/* GuC Interrupt Vector */ +#define GEN11_GUC_INTR_GUC2HOST (1 << 15) +#define GEN11_GUC_INTR_EXEC_ERROR (1 << 14) +#define GEN11_GUC_INTR_DISPLAY_EVENT (1 << 13) +#define GEN11_GUC_INTR_SEM_SIG (1 << 12) +#define GEN11_GUC_INTR_IOMMU2GUC (1 << 11) +#define GEN11_GUC_INTR_DOORBELL_RANG (1 << 10) +#define GEN11_GUC_INTR_DMA_DONE (1 << 9) +#define GEN11_GUC_INTR_FATAL_ERROR (1 << 8) +#define GEN11_GUC_INTR_NOTIF_ERROR (1 << 7) +#define GEN11_GUC_INTR_SW_INT_6 (1 << 6) +#define GEN11_GUC_INTR_SW_INT_5 (1 << 5) +#define GEN11_GUC_INTR_SW_INT_4 (1 << 4) +#define GEN11_GUC_INTR_SW_INT_3 (1 << 3) +#define GEN11_GUC_INTR_SW_INT_2 (1 << 2) +#define GEN11_GUC_INTR_SW_INT_1 (1 << 1) +#define GEN11_GUC_INTR_SW_INT_0 (1 << 0) + #endif -- cgit v1.2.3 From 440f136bd3b7b30a27254d810179a9853e8a8ee6 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 27 May 2019 18:36:09 +0000 Subject: drm/i915/guc: Update GuC CTB response definition Current GuC firmwares identify response message in a different way. v2: update comments for other H2G bits (Daniele) Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Kelvin Gardiner Cc: John Spotswood Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-14-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/intel_guc_ct.c | 2 +- drivers/gpu/drm/i915/intel_guc_fwif.h | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c index dde1dc0d6e69..2d5dc2aa22a7 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/intel_guc_ct.c @@ -565,7 +565,7 @@ static inline unsigned int ct_header_get_action(u32 header) static inline bool ct_header_is_response(u32 header) { - return ct_header_get_action(header) == INTEL_GUC_ACTION_DEFAULT; + return !!(header & GUC_CT_MSG_IS_RESPONSE); } static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data) diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index fa745a58d38d..3d1de288d96c 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -355,14 +355,16 @@ struct guc_ct_buffer_desc { * * bit[4..0] message len (in dwords) * bit[7..5] reserved - * bit[8] write fence to desc - * bit[9] write status to H2G buff - * bit[10] send status (via G2H) + * bit[8] response (G2H only) + * bit[8] write fence to desc (H2G only) + * bit[9] write status to H2G buff (H2G only) + * bit[10] send status back via G2H (H2G only) * bit[15..11] reserved * bit[31..16] action code */ #define GUC_CT_MSG_LEN_SHIFT 0 #define GUC_CT_MSG_LEN_MASK 0x1F +#define GUC_CT_MSG_IS_RESPONSE (1 << 8) #define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8) #define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9) #define GUC_CT_MSG_SEND_STATUS (1 << 10) -- cgit v1.2.3 From a18c3d5e4e420ebb45083e5d0761cca321c80f01 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 27 May 2019 18:36:10 +0000 Subject: drm/i915/guc: Enable GuC CTB communication on Gen11 Gen11 GuC firmware expects H2G command messages to be sent over CTB (command transport buffers). Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Joonas Lahtinen Cc: John Spotswood Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-15-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/i915_pci.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index d7c07a947497..fc66d7f348fc 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -746,6 +746,7 @@ static const struct intel_device_info intel_cannonlake_info = { }, \ GEN(11), \ .ddb_size = 2048, \ + .has_guc_ct = 1, \ .has_logical_ring_elsq = 1, \ .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024 } -- cgit v1.2.3 From f4cc89992056f1a09bb9de5fb91f277aeb326641 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 27 May 2019 18:36:11 +0000 Subject: drm/i915/guc: Define GuC firmware version for Icelake Define GuC firmware version for Icelake. Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Anusha Srivatsa Reviewed-by: Anusha Srivatsa Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-16-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/intel_guc_fw.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c index c1e9bb4e04fd..72cdafd9636a 100644 --- a/drivers/gpu/drm/i915/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/intel_guc_fw.c @@ -65,6 +65,13 @@ MODULE_FIRMWARE(KBL_GUC_FIRMWARE_PATH); #define GLK_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(GLK) MODULE_FIRMWARE(GLK_GUC_FIRMWARE_PATH); +#define ICL_GUC_FW_PREFIX icl +#define ICL_GUC_FW_MAJOR 32 +#define ICL_GUC_FW_MINOR 0 +#define ICL_GUC_FW_PATCH 3 +#define ICL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(ICL) +MODULE_FIRMWARE(ICL_GUC_FIRMWARE_PATH); + static void guc_fw_select(struct intel_uc_fw *guc_fw) { struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); @@ -79,6 +86,10 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw) guc_fw->path = i915_modparams.guc_firmware_path; guc_fw->major_ver_wanted = 0; guc_fw->minor_ver_wanted = 0; + } else if (IS_ICELAKE(i915)) { + guc_fw->path = ICL_GUC_FIRMWARE_PATH; + guc_fw->major_ver_wanted = ICL_GUC_FW_MAJOR; + guc_fw->minor_ver_wanted = ICL_GUC_FW_MINOR; } else if (IS_GEMINILAKE(i915)) { guc_fw->path = GLK_GUC_FIRMWARE_PATH; guc_fw->major_ver_wanted = GLK_GUC_FW_MAJOR; -- cgit v1.2.3 From c9e0c8d91ead25b073193b5478deeaf3fd60a10a Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 27 May 2019 18:36:12 +0000 Subject: drm/i915/huc: Define HuC firmware version for Icelake Define HuC firmware version for Icelake. v2: 8.4.3238 is now available Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Joonas Lahtinen Cc: Anusha Srivatsa Cc: Tony Ye Reviewed-by: Tony Ye Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190527183613.17076-17-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/intel_huc_fw.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c index 8bac6a051c18..05cbf8338f53 100644 --- a/drivers/gpu/drm/i915/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/intel_huc_fw.c @@ -38,6 +38,10 @@ #define GLK_HUC_FW_MINOR 01 #define GLK_BLD_NUM 2893 +#define ICL_HUC_FW_MAJOR 8 +#define ICL_HUC_FW_MINOR 4 +#define ICL_BLD_NUM 3238 + #define HUC_FW_PATH(platform, major, minor, bld_num) \ "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \ __stringify(minor) "_" __stringify(bld_num) ".bin" @@ -58,6 +62,10 @@ MODULE_FIRMWARE(I915_KBL_HUC_UCODE); GLK_HUC_FW_MINOR, GLK_BLD_NUM) MODULE_FIRMWARE(I915_GLK_HUC_UCODE); +#define I915_ICL_HUC_UCODE HUC_FW_PATH(icl, ICL_HUC_FW_MAJOR, \ + ICL_HUC_FW_MINOR, ICL_BLD_NUM) +MODULE_FIRMWARE(I915_ICL_HUC_UCODE); + static void huc_fw_select(struct intel_uc_fw *huc_fw) { struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); @@ -88,6 +96,10 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw) huc_fw->path = I915_GLK_HUC_UCODE; huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR; huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR; + } else if (IS_ICELAKE(dev_priv)) { + huc_fw->path = I915_ICL_HUC_UCODE; + huc_fw->major_ver_wanted = ICL_HUC_FW_MAJOR; + huc_fw->minor_ver_wanted = ICL_HUC_FW_MINOR; } } -- cgit v1.2.3 From 86beaea131009a6c0473dad83a87bbb3ec803f9d Mon Sep 17 00:00:00 2001 From: Uma Shankar Date: Thu, 16 May 2019 19:40:14 +0530 Subject: drm/i915: Enabled Modeset when HDR Infoframe changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch enables modeset whenever HDR metadata needs to be updated to sink. v2: Addressed Shashank's review comments. v3: Added Shashank's RB. v4: Addressed Ville's review comments. v5: Addressed Ville's review comments. Signed-off-by: Ville Syrjälä Signed-off-by: Uma Shankar Reviewed-by: Shashank Sharma [mlankhorst: Fix up commit message, reorder] Reviewed-by: Maarten Lankhorst Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1558015817-12025-10-git-send-email-uma.shankar@intel.com --- drivers/gpu/drm/i915/intel_atomic.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 58b8049649a0..6b985e895a97 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -105,6 +105,16 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector, return -EINVAL; } +static bool blob_equal(const struct drm_property_blob *a, + const struct drm_property_blob *b) +{ + if (a && b) + return a->length == b->length && + !memcmp(a->data, b->data, a->length); + + return !a == !b; +} + int intel_digital_connector_atomic_check(struct drm_connector *conn, struct drm_connector_state *new_state) { @@ -132,7 +142,9 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn, new_conn_state->base.colorspace != old_conn_state->base.colorspace || new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio || new_conn_state->base.content_type != old_conn_state->base.content_type || - new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode) + new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode || + !blob_equal(new_conn_state->base.hdr_output_metadata, + old_conn_state->base.hdr_output_metadata)) crtc_state->mode_changed = true; return 0; -- cgit v1.2.3 From c0560fab6d3798cb4a54529cdce0e91a64e978fb Mon Sep 17 00:00:00 2001 From: Uma Shankar Date: Thu, 16 May 2019 19:40:15 +0530 Subject: drm/i915: Add DRM Infoframe handling for BYT/CHT BYT/CHT doesn't support DRM Infoframe. This caused a WARN_ON due to a missing CASE while executing intel_hdmi_infoframes_enabled function. This patch fixes the same. Signed-off-by: Uma Shankar Reviewed-by: Maarten Lankhorst Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1558015817-12025-11-git-send-email-uma.shankar@intel.com --- drivers/gpu/drm/i915/intel_hdmi.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a0b98a0178f6..d571f537501d 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -129,6 +129,8 @@ static u32 g4x_infoframe_enable(unsigned int type) return VIDEO_DIP_ENABLE_SPD; case HDMI_INFOFRAME_TYPE_VENDOR: return VIDEO_DIP_ENABLE_VENDOR; + case HDMI_INFOFRAME_TYPE_DRM: + return 0; default: MISSING_CASE(type); return 0; -- cgit v1.2.3 From 44b42ebfccfd9d6ef377a25e99ae0085b071e868 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 May 2019 21:52:25 +0530 Subject: drm/i915: Enable infoframes on GLK+ for HDR MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch enables infoframes on GLK+ to be used to send HDR metadata to HDMI sink. v2: Addressed Shashank's review comment. v3: Addressed Shashank's review comment. v4: Added Shashank's RB. v5: Dropped hdr_metadata_change check while modeset, as per Ville's suggestion. v6: Removed an unused and duplicate bit defintion, as per Ville's comment. Signed-off-by: Ville Syrjälä Signed-off-by: Uma Shankar Reviewed-by: Shashank Sharma [mlankhorst: Reorder patch series] Reviewed-by: Maarten Lankhorst Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1558110145-3422-1-git-send-email-uma.shankar@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 5 ++++- drivers/gpu/drm/i915/intel_hdmi.c | 19 +++++++++++++++---- 2 files changed, 19 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 73990295d627..07e3f861a92e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4697,7 +4697,7 @@ enum { #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) #define VIDEO_DIP_FREQ_MASK (3 << 16) /* HSW and later: */ -#define DRM_DIP_ENABLE (1 << 28) +#define VIDEO_DIP_ENABLE_DRM_GLK (1 << 28) #define PSR_VSC_BIT_7_SET (1 << 27) #define VSC_SELECT_MASK (0x3 << 25) #define VSC_SELECT_SHIFT 25 @@ -8156,6 +8156,7 @@ enum { #define _HSW_VIDEO_DIP_SPD_DATA_A 0x602A0 #define _HSW_VIDEO_DIP_GMP_DATA_A 0x602E0 #define _HSW_VIDEO_DIP_VSC_DATA_A 0x60320 +#define _GLK_VIDEO_DIP_DRM_DATA_A 0x60440 #define _HSW_VIDEO_DIP_AVI_ECC_A 0x60240 #define _HSW_VIDEO_DIP_VS_ECC_A 0x60280 #define _HSW_VIDEO_DIP_SPD_ECC_A 0x602C0 @@ -8169,6 +8170,7 @@ enum { #define _HSW_VIDEO_DIP_SPD_DATA_B 0x612A0 #define _HSW_VIDEO_DIP_GMP_DATA_B 0x612E0 #define _HSW_VIDEO_DIP_VSC_DATA_B 0x61320 +#define _GLK_VIDEO_DIP_DRM_DATA_B 0x61440 #define _HSW_VIDEO_DIP_BVI_ECC_B 0x61240 #define _HSW_VIDEO_DIP_VS_ECC_B 0x61280 #define _HSW_VIDEO_DIP_SPD_ECC_B 0x612C0 @@ -8194,6 +8196,7 @@ enum { #define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4) #define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4) #define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4) +#define GLK_TVIDEO_DIP_DRM_DATA(trans, i) _MMIO_TRANS2(trans, _GLK_VIDEO_DIP_DRM_DATA_A + (i) * 4) #define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4) #define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index d571f537501d..aba9d48f45f5 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -154,6 +154,8 @@ static u32 hsw_infoframe_enable(unsigned int type) return VIDEO_DIP_ENABLE_SPD_HSW; case HDMI_INFOFRAME_TYPE_VENDOR: return VIDEO_DIP_ENABLE_VS_HSW; + case HDMI_INFOFRAME_TYPE_DRM: + return VIDEO_DIP_ENABLE_DRM_GLK; default: MISSING_CASE(type); return 0; @@ -179,6 +181,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv, return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder, i); case HDMI_INFOFRAME_TYPE_VENDOR: return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i); + case HDMI_INFOFRAME_TYPE_DRM: + return GLK_TVIDEO_DIP_DRM_DATA(cpu_transcoder, i); default: MISSING_CASE(type); return INVALID_MMIO_REG; @@ -552,10 +556,16 @@ static u32 hsw_infoframes_enabled(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder)); + u32 mask; + + mask = (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | + VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | + VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW); + + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + mask |= VIDEO_DIP_ENABLE_DRM_GLK; - return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | - VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | - VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW); + return val & mask; } static const u8 infoframe_type_to_idx[] = { @@ -1149,7 +1159,8 @@ static void hsw_set_infoframes(struct intel_encoder *encoder, val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | - VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW); + VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW | + VIDEO_DIP_ENABLE_DRM_GLK); if (!enable) { I915_WRITE(reg, val); -- cgit v1.2.3 From 5a0200f69dfd36e8316c4fab7a5036f35329b22c Mon Sep 17 00:00:00 2001 From: Uma Shankar Date: Sat, 18 May 2019 16:09:27 +0530 Subject: drm/i915: Write HDR infoframe and send to panel Enable writing of HDR metadata infoframe to panel. The data will be provid by usersapace compositors, based on blending policies and passsed to driver through a blob property. v2: Rebase v3: Fixed a warning message v4: Addressed Shashank's review comments v5: Rebase. Added infoframe calculation in compute config. v6: Addressed Shashank's review comment. Added HDR metadata support from GEN10 onwards as per Shashank's recommendation. v7: Addressed Shashank's review comments v8: Added Shashank's RB. v9: Addressed Ville's review comments. v10: Removed a redundant check as core already handles it, as per Ville's comment. v11: Added the metadata available check to avoid failure in compute_config. Signed-off-by: Uma Shankar Reviewed-by: Shashank Sharma Reviewed-by: Maarten Lankhorst Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1558175967-22068-1-git-send-email-uma.shankar@intel.com --- drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/i915/intel_hdmi.c | 43 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f341042b6c79..952eabb1919a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -912,6 +912,7 @@ struct intel_crtc_state { union hdmi_infoframe avi; union hdmi_infoframe spd; union hdmi_infoframe hdmi; + union hdmi_infoframe drm; } infoframes; /* HDMI scrambling status */ diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index aba9d48f45f5..5293bcfaff74 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -575,6 +575,7 @@ static const u8 infoframe_type_to_idx[] = { HDMI_INFOFRAME_TYPE_AVI, HDMI_INFOFRAME_TYPE_SPD, HDMI_INFOFRAME_TYPE_VENDOR, + HDMI_INFOFRAME_TYPE_DRM, }; u32 intel_hdmi_infoframe_enable(unsigned int type) @@ -797,6 +798,40 @@ intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder, return true; } +static bool +intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct hdmi_drm_infoframe *frame = &crtc_state->infoframes.drm.drm; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + int ret; + + if (!(INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))) + return true; + + if (!crtc_state->has_infoframe) + return true; + + if (!conn_state->hdr_output_metadata) + return true; + + crtc_state->infoframes.enable |= + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM); + + ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state); + if (ret < 0) { + DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n"); + return false; + } + + ret = hdmi_drm_infoframe_check(frame); + if (WARN_ON(ret)) + return false; + + return true; +} + static void g4x_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, @@ -1183,6 +1218,9 @@ static void hsw_set_infoframes(struct intel_encoder *encoder, intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_VENDOR, &crtc_state->infoframes.hdmi); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_DRM, + &crtc_state->infoframes.drm); } void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) @@ -2389,6 +2427,11 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, return -EINVAL; } + if (!intel_hdmi_compute_drm_infoframe(encoder, pipe_config, conn_state)) { + DRM_DEBUG_KMS("bad DRM infoframe\n"); + return -EINVAL; + } + return 0; } -- cgit v1.2.3 From b37f588e4f689d186675575227020e4dfbcafec8 Mon Sep 17 00:00:00 2001 From: Uma Shankar Date: Thu, 16 May 2019 19:40:17 +0530 Subject: drm/i915: Add state readout for DRM infoframe Added state readout for DRM infoframe and enabled state validation for DRM infoframe. v2: Addressed Ville's review comments and dropped the unused drm infoframe read at intel_hdmi_init. v3: Removed a redundant platform check as per Ville's comment. Signed-off-by: Uma Shankar Reviewed-by: Maarten Lankhorst Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1558015817-12025-13-git-send-email-uma.shankar@intel.com --- drivers/gpu/drm/i915/intel_ddi.c | 3 +++ drivers/gpu/drm/i915/intel_display.c | 1 + 2 files changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index df06e5bb4764..350eaf54f01f 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -3844,6 +3844,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder, intel_read_infoframe(encoder, pipe_config, HDMI_INFOFRAME_TYPE_VENDOR, &pipe_config->infoframes.hdmi); + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_DRM, + &pipe_config->infoframes.drm); } static enum intel_output_type diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 909171d3ec25..6a063d3fccee 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12569,6 +12569,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, PIPE_CONF_CHECK_INFOFRAME(avi); PIPE_CONF_CHECK_INFOFRAME(spd); PIPE_CONF_CHECK_INFOFRAME(hdmi); + PIPE_CONF_CHECK_INFOFRAME(drm); #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I -- cgit v1.2.3 From b7bedf31252a94fb741691d2b4a3220bcdcaf9ba Mon Sep 17 00:00:00 2001 From: Uma Shankar Date: Fri, 17 May 2019 21:49:09 +0530 Subject: drm/i915: Attach HDR metadata property to connector Attach HDR metadata property to connector object. v2: Rebase v3: Updated the property name as per updated name while creating hdr metadata property v4: Added platform check as suggested by Ville. Signed-off-by: Uma Shankar Reviewed-by: Shashank Sharma Reviewed-by: Maarten Lankhorst Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/1558109949-3309-1-git-send-email-uma.shankar@intel.com --- drivers/gpu/drm/i915/intel_hdmi.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 5293bcfaff74..1b4f5528cbd0 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -2771,6 +2771,10 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c drm_connector_attach_content_type_property(connector); connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + drm_object_attach_property(&connector->base, + connector->dev->mode_config.hdr_output_metadata_property, 0); + if (!HAS_GMCH(dev_priv)) drm_connector_attach_max_bpc_property(connector, 8, 12); } -- cgit v1.2.3 From 7f6cafb959fc4994b1c65b4433ff14aa4b14802a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:00:01 +0100 Subject: drm/i915: Kill the undead intel_context.c zombie It was moved over to gt/ but the backmerge brought it back from the dead. Signed-off-by: Chris Wilson Cc: Jani Nikula Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190528090001.17248-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_context.c | 270 ----------------------------------- 1 file changed, 270 deletions(-) delete mode 100644 drivers/gpu/drm/i915/intel_context.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c deleted file mode 100644 index 924cc556223a..000000000000 --- a/drivers/gpu/drm/i915/intel_context.c +++ /dev/null @@ -1,270 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2019 Intel Corporation - */ - -#include "i915_drv.h" -#include "i915_gem_context.h" -#include "i915_globals.h" -#include "intel_context.h" -#include "intel_ringbuffer.h" - -static struct i915_global_context { - struct i915_global base; - struct kmem_cache *slab_ce; -} global; - -struct intel_context *intel_context_alloc(void) -{ - return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL); -} - -void intel_context_free(struct intel_context *ce) -{ - kmem_cache_free(global.slab_ce, ce); -} - -struct intel_context * -intel_context_lookup(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) -{ - struct intel_context *ce = NULL; - struct rb_node *p; - - spin_lock(&ctx->hw_contexts_lock); - p = ctx->hw_contexts.rb_node; - while (p) { - struct intel_context *this = - rb_entry(p, struct intel_context, node); - - if (this->engine == engine) { - GEM_BUG_ON(this->gem_context != ctx); - ce = this; - break; - } - - if (this->engine < engine) - p = p->rb_right; - else - p = p->rb_left; - } - spin_unlock(&ctx->hw_contexts_lock); - - return ce; -} - -struct intel_context * -__intel_context_insert(struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - struct intel_context *ce) -{ - struct rb_node **p, *parent; - int err = 0; - - spin_lock(&ctx->hw_contexts_lock); - - parent = NULL; - p = &ctx->hw_contexts.rb_node; - while (*p) { - struct intel_context *this; - - parent = *p; - this = rb_entry(parent, struct intel_context, node); - - if (this->engine == engine) { - err = -EEXIST; - ce = this; - break; - } - - if (this->engine < engine) - p = &parent->rb_right; - else - p = &parent->rb_left; - } - if (!err) { - rb_link_node(&ce->node, parent, p); - rb_insert_color(&ce->node, &ctx->hw_contexts); - } - - spin_unlock(&ctx->hw_contexts_lock); - - return ce; -} - -void __intel_context_remove(struct intel_context *ce) -{ - struct i915_gem_context *ctx = ce->gem_context; - - spin_lock(&ctx->hw_contexts_lock); - rb_erase(&ce->node, &ctx->hw_contexts); - spin_unlock(&ctx->hw_contexts_lock); -} - -static struct intel_context * -intel_context_instance(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) -{ - struct intel_context *ce, *pos; - - ce = intel_context_lookup(ctx, engine); - if (likely(ce)) - return ce; - - ce = intel_context_alloc(); - if (!ce) - return ERR_PTR(-ENOMEM); - - intel_context_init(ce, ctx, engine); - - pos = __intel_context_insert(ctx, engine, ce); - if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */ - intel_context_free(ce); - - GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos); - return pos; -} - -struct intel_context * -intel_context_pin_lock(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) - __acquires(ce->pin_mutex) -{ - struct intel_context *ce; - - ce = intel_context_instance(ctx, engine); - if (IS_ERR(ce)) - return ce; - - if (mutex_lock_interruptible(&ce->pin_mutex)) - return ERR_PTR(-EINTR); - - return ce; -} - -struct intel_context * -intel_context_pin(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) -{ - struct intel_context *ce; - int err; - - ce = intel_context_instance(ctx, engine); - if (IS_ERR(ce)) - return ce; - - if (likely(atomic_inc_not_zero(&ce->pin_count))) - return ce; - - if (mutex_lock_interruptible(&ce->pin_mutex)) - return ERR_PTR(-EINTR); - - if (likely(!atomic_read(&ce->pin_count))) { - err = ce->ops->pin(ce); - if (err) - goto err; - - i915_gem_context_get(ctx); - GEM_BUG_ON(ce->gem_context != ctx); - - mutex_lock(&ctx->mutex); - list_add(&ce->active_link, &ctx->active_engines); - mutex_unlock(&ctx->mutex); - - intel_context_get(ce); - smp_mb__before_atomic(); /* flush pin before it is visible */ - } - - atomic_inc(&ce->pin_count); - GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */ - - mutex_unlock(&ce->pin_mutex); - return ce; - -err: - mutex_unlock(&ce->pin_mutex); - return ERR_PTR(err); -} - -void intel_context_unpin(struct intel_context *ce) -{ - if (likely(atomic_add_unless(&ce->pin_count, -1, 1))) - return; - - /* We may be called from inside intel_context_pin() to evict another */ - intel_context_get(ce); - mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING); - - if (likely(atomic_dec_and_test(&ce->pin_count))) { - ce->ops->unpin(ce); - - mutex_lock(&ce->gem_context->mutex); - list_del(&ce->active_link); - mutex_unlock(&ce->gem_context->mutex); - - i915_gem_context_put(ce->gem_context); - intel_context_put(ce); - } - - mutex_unlock(&ce->pin_mutex); - intel_context_put(ce); -} - -static void intel_context_retire(struct i915_active_request *active, - struct i915_request *rq) -{ - struct intel_context *ce = - container_of(active, typeof(*ce), active_tracker); - - intel_context_unpin(ce); -} - -void -intel_context_init(struct intel_context *ce, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine) -{ - kref_init(&ce->ref); - - ce->gem_context = ctx; - ce->engine = engine; - ce->ops = engine->cops; - ce->saturated = 0; - - INIT_LIST_HEAD(&ce->signal_link); - INIT_LIST_HEAD(&ce->signals); - - mutex_init(&ce->pin_mutex); - - /* Use the whole device by default */ - ce->sseu = intel_device_default_sseu(ctx->i915); - - i915_active_request_init(&ce->active_tracker, - NULL, intel_context_retire); -} - -static void i915_global_context_shrink(void) -{ - kmem_cache_shrink(global.slab_ce); -} - -static void i915_global_context_exit(void) -{ - kmem_cache_destroy(global.slab_ce); -} - -static struct i915_global_context global = { { - .shrink = i915_global_context_shrink, - .exit = i915_global_context_exit, -} }; - -int __init i915_global_context_init(void) -{ - global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN); - if (!global.slab_ce) - return -ENOMEM; - - i915_global_register(&global.base); - return 0; -} -- cgit v1.2.3 From 5e5d2e209e085be73a83f342798eae68f58e7674 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:42 +0100 Subject: drm/i915: Split GEM object type definition to its own header For convenience in avoiding inline spaghetti, keep the type definition as a separate header. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Acked-by: Rodrigo Vivi Acked-by: Jani Nikula Acked-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gem/Makefile | 1 + drivers/gpu/drm/i915/gem/Makefile.header-test | 16 ++ drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 285 ++++++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_engine_types.h | 1 + drivers/gpu/drm/i915/i915_drv.h | 3 +- drivers/gpu/drm/i915/i915_gem_batch_pool.h | 3 +- drivers/gpu/drm/i915/i915_gem_gtt.h | 1 + drivers/gpu/drm/i915/i915_gem_object.h | 295 +---------------------- 9 files changed, 312 insertions(+), 294 deletions(-) create mode 100644 drivers/gpu/drm/i915/gem/Makefile create mode 100644 drivers/gpu/drm/i915/gem/Makefile.header-test create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_object_types.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 139a0fc19390..23a45d7baabb 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -85,6 +85,7 @@ gt-$(CONFIG_DRM_I915_SELFTEST) += \ i915-y += $(gt-y) # GEM (Graphics Execution Management) code +obj-y += gem/ i915-y += \ i915_active.o \ i915_cmd_parser.o \ diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile new file mode 100644 index 000000000000..07e7b8b840ea --- /dev/null +++ b/drivers/gpu/drm/i915/gem/Makefile @@ -0,0 +1 @@ +include $(src)/Makefile.header-test # Extra header tests diff --git a/drivers/gpu/drm/i915/gem/Makefile.header-test b/drivers/gpu/drm/i915/gem/Makefile.header-test new file mode 100644 index 000000000000..61e06cbb4b32 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/Makefile.header-test @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: MIT +# Copyright © 2019 Intel Corporation + +# Test the headers are compilable as standalone units +header_test := $(notdir $(wildcard $(src)/*.h)) + +quiet_cmd_header_test = HDRTEST $@ + cmd_header_test = echo "\#include \"$( $@ + +header_test_%.c: %.h + $(call cmd,header_test) + +extra-$(CONFIG_DRM_I915_WERROR) += \ + $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h))) + +clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h))) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h new file mode 100644 index 000000000000..fe3b2a2775f7 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -0,0 +1,285 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#ifndef __I915_GEM_OBJECT_TYPES_H__ +#define __I915_GEM_OBJECT_TYPES_H__ + +#include + +#include + +#include "i915_active.h" +#include "i915_selftest.h" + +struct drm_i915_gem_object; + +/* + * struct i915_lut_handle tracks the fast lookups from handle to vma used + * for execbuf. Although we use a radixtree for that mapping, in order to + * remove them as the object or context is closed, we need a secondary list + * and a translation entry (i915_lut_handle). + */ +struct i915_lut_handle { + struct list_head obj_link; + struct list_head ctx_link; + struct i915_gem_context *ctx; + u32 handle; +}; + +struct drm_i915_gem_object_ops { + unsigned int flags; +#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0) +#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) +#define I915_GEM_OBJECT_IS_PROXY BIT(2) +#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3) + + /* Interface between the GEM object and its backing storage. + * get_pages() is called once prior to the use of the associated set + * of pages before to binding them into the GTT, and put_pages() is + * called after we no longer need them. As we expect there to be + * associated cost with migrating pages between the backing storage + * and making them available for the GPU (e.g. clflush), we may hold + * onto the pages after they are no longer referenced by the GPU + * in case they may be used again shortly (for example migrating the + * pages to a different memory domain within the GTT). put_pages() + * will therefore most likely be called when the object itself is + * being released or under memory pressure (where we attempt to + * reap pages for the shrinker). + */ + int (*get_pages)(struct drm_i915_gem_object *obj); + void (*put_pages)(struct drm_i915_gem_object *obj, + struct sg_table *pages); + + int (*pwrite)(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_pwrite *arg); + + int (*dmabuf_export)(struct drm_i915_gem_object *obj); + void (*release)(struct drm_i915_gem_object *obj); +}; + +struct drm_i915_gem_object { + struct drm_gem_object base; + + const struct drm_i915_gem_object_ops *ops; + + struct { + /** + * @vma.lock: protect the list/tree of vmas + */ + spinlock_t lock; + + /** + * @vma.list: List of VMAs backed by this object + * + * The VMA on this list are ordered by type, all GGTT vma are + * placed at the head and all ppGTT vma are placed at the tail. + * The different types of GGTT vma are unordered between + * themselves, use the @vma.tree (which has a defined order + * between all VMA) to quickly find an exact match. + */ + struct list_head list; + + /** + * @vma.tree: Ordered tree of VMAs backed by this object + * + * All VMA created for this object are placed in the @vma.tree + * for fast retrieval via a binary search in + * i915_vma_instance(). They are also added to @vma.list for + * easy iteration. + */ + struct rb_root tree; + } vma; + + /** + * @lut_list: List of vma lookup entries in use for this object. + * + * If this object is closed, we need to remove all of its VMA from + * the fast lookup index in associated contexts; @lut_list provides + * this translation from object to context->handles_vma. + */ + struct list_head lut_list; + + /** Stolen memory for this object, instead of being backed by shmem. */ + struct drm_mm_node *stolen; + union { + struct rcu_head rcu; + struct llist_node freed; + }; + + /** + * Whether the object is currently in the GGTT mmap. + */ + unsigned int userfault_count; + struct list_head userfault_link; + + struct list_head batch_pool_link; + I915_SELFTEST_DECLARE(struct list_head st_link); + + unsigned long flags; + + /** + * Have we taken a reference for the object for incomplete GPU + * activity? + */ +#define I915_BO_ACTIVE_REF 0 + + /* + * Is the object to be mapped as read-only to the GPU + * Only honoured if hardware has relevant pte bit + */ + unsigned int cache_level:3; + unsigned int cache_coherent:2; +#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0) +#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1) + unsigned int cache_dirty:1; + + /** + * @read_domains: Read memory domains. + * + * These monitor which caches contain read/write data related to the + * object. When transitioning from one set of domains to another, + * the driver is called to ensure that caches are suitably flushed and + * invalidated. + */ + u16 read_domains; + + /** + * @write_domain: Corresponding unique write memory domain. + */ + u16 write_domain; + + atomic_t frontbuffer_bits; + unsigned int frontbuffer_ggtt_origin; /* write once */ + struct i915_active_request frontbuffer_write; + + /** Current tiling stride for the object, if it's tiled. */ + unsigned int tiling_and_stride; +#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */ +#define TILING_MASK (FENCE_MINIMUM_STRIDE - 1) +#define STRIDE_MASK (~TILING_MASK) + + /** Count of VMA actually bound by this object */ + unsigned int bind_count; + unsigned int active_count; + /** Count of how many global VMA are currently pinned for use by HW */ + unsigned int pin_global; + + struct { + struct mutex lock; /* protects the pages and their use */ + atomic_t pages_pin_count; + + struct sg_table *pages; + void *mapping; + + /* TODO: whack some of this into the error state */ + struct i915_page_sizes { + /** + * The sg mask of the pages sg_table. i.e the mask of + * of the lengths for each sg entry. + */ + unsigned int phys; + + /** + * The gtt page sizes we are allowed to use given the + * sg mask and the supported page sizes. This will + * express the smallest unit we can use for the whole + * object, as well as the larger sizes we may be able + * to use opportunistically. + */ + unsigned int sg; + + /** + * The actual gtt page size usage. Since we can have + * multiple vma associated with this object we need to + * prevent any trampling of state, hence a copy of this + * struct also lives in each vma, therefore the gtt + * value here should only be read/write through the vma. + */ + unsigned int gtt; + } page_sizes; + + I915_SELFTEST_DECLARE(unsigned int page_mask); + + struct i915_gem_object_page_iter { + struct scatterlist *sg_pos; + unsigned int sg_idx; /* in pages, but 32bit eek! */ + + struct radix_tree_root radix; + struct mutex lock; /* protects this cache */ + } get_page; + + /** + * Element within i915->mm.unbound_list or i915->mm.bound_list, + * locked by i915->mm.obj_lock. + */ + struct list_head link; + + /** + * Advice: are the backing pages purgeable? + */ + unsigned int madv:2; + + /** + * This is set if the object has been written to since the + * pages were last acquired. + */ + bool dirty:1; + + /** + * This is set if the object has been pinned due to unknown + * swizzling. + */ + bool quirked:1; + } mm; + + /** Breadcrumb of last rendering to the buffer. + * There can only be one writer, but we allow for multiple readers. + * If there is a writer that necessarily implies that all other + * read requests are complete - but we may only be lazily clearing + * the read requests. A read request is naturally the most recent + * request on a ring, so we may have two different write and read + * requests on one ring where the write request is older than the + * read request. This allows for the CPU to read from an active + * buffer by only waiting for the write to complete. + */ + struct reservation_object *resv; + + /** References from framebuffers, locks out tiling changes. */ + unsigned int framebuffer_references; + + /** Record of address bit 17 of each page at last unbind. */ + unsigned long *bit_17; + + union { + struct i915_gem_userptr { + uintptr_t ptr; + + struct i915_mm_struct *mm; + struct i915_mmu_object *mmu_object; + struct work_struct *work; + } userptr; + + unsigned long scratch; + + void *gvt_info; + }; + + /** for phys allocated objects */ + struct drm_dma_handle *phys_handle; + + struct reservation_object __builtin_resv; +}; + +static inline struct drm_i915_gem_object * +to_intel_bo(struct drm_gem_object *gem) +{ + /* Assert that to_intel_bo(NULL) == NULL */ + BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base)); + + return container_of(gem, struct drm_i915_gem_object, base); +} + +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 40e774acc2cd..01223864237a 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -29,6 +29,7 @@ #define I915_CMD_HASH_ORDER 9 struct dma_fence; +struct drm_i915_gem_object; struct drm_i915_reg_table; struct i915_gem_context; struct i915_request; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 87cc80c7b626..9a744d39d1cd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -82,7 +82,6 @@ #include "i915_gem.h" #include "i915_gem_context.h" #include "i915_gem_fence_reg.h" -#include "i915_gem_object.h" #include "i915_gem_gtt.h" #include "i915_gpu_error.h" #include "i915_request.h" @@ -137,6 +136,8 @@ bool i915_error_injected(void); __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ fmt, ##__VA_ARGS__) +struct drm_i915_gem_object; + enum hpd_pin { HPD_NONE = 0, HPD_TV = HPD_NONE, /* TV is known to be unreliable */ diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h index 56947daaaf65..feeeeeaa54d8 100644 --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.h @@ -9,6 +9,7 @@ #include +struct drm_i915_gem_object; struct intel_engine_cs; struct i915_gem_batch_pool { @@ -19,7 +20,7 @@ struct i915_gem_batch_pool { void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool, struct intel_engine_cs *engine); void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool); -struct drm_i915_gem_object* +struct drm_i915_gem_object * i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size); #endif /* I915_GEM_BATCH_POOL_H */ diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 38496039456b..811fa05c0322 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -61,6 +61,7 @@ struct drm_i915_file_private; struct drm_i915_fence_reg; +struct drm_i915_gem_object; struct i915_vma; typedef u32 gen6_pte_t; diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index ca93a40c0c87..3666b0c5f6ca 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -1,308 +1,19 @@ /* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. + * SPDX-License-Identifier: MIT * + * Copyright © 2016 Intel Corporation */ #ifndef __I915_GEM_OBJECT_H__ #define __I915_GEM_OBJECT_H__ -#include - -#include #include #include #include #include -#include "i915_request.h" -#include "i915_selftest.h" - -struct drm_i915_gem_object; - -/* - * struct i915_lut_handle tracks the fast lookups from handle to vma used - * for execbuf. Although we use a radixtree for that mapping, in order to - * remove them as the object or context is closed, we need a secondary list - * and a translation entry (i915_lut_handle). - */ -struct i915_lut_handle { - struct list_head obj_link; - struct list_head ctx_link; - struct i915_gem_context *ctx; - u32 handle; -}; - -struct drm_i915_gem_object_ops { - unsigned int flags; -#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0) -#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) -#define I915_GEM_OBJECT_IS_PROXY BIT(2) -#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3) - - /* Interface between the GEM object and its backing storage. - * get_pages() is called once prior to the use of the associated set - * of pages before to binding them into the GTT, and put_pages() is - * called after we no longer need them. As we expect there to be - * associated cost with migrating pages between the backing storage - * and making them available for the GPU (e.g. clflush), we may hold - * onto the pages after they are no longer referenced by the GPU - * in case they may be used again shortly (for example migrating the - * pages to a different memory domain within the GTT). put_pages() - * will therefore most likely be called when the object itself is - * being released or under memory pressure (where we attempt to - * reap pages for the shrinker). - */ - int (*get_pages)(struct drm_i915_gem_object *); - void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); - - int (*pwrite)(struct drm_i915_gem_object *, - const struct drm_i915_gem_pwrite *); - - int (*dmabuf_export)(struct drm_i915_gem_object *); - void (*release)(struct drm_i915_gem_object *); -}; - -struct drm_i915_gem_object { - struct drm_gem_object base; - - const struct drm_i915_gem_object_ops *ops; - - struct { - /** - * @vma.lock: protect the list/tree of vmas - */ - spinlock_t lock; - - /** - * @vma.list: List of VMAs backed by this object - * - * The VMA on this list are ordered by type, all GGTT vma are - * placed at the head and all ppGTT vma are placed at the tail. - * The different types of GGTT vma are unordered between - * themselves, use the @vma.tree (which has a defined order - * between all VMA) to quickly find an exact match. - */ - struct list_head list; - - /** - * @vma.tree: Ordered tree of VMAs backed by this object - * - * All VMA created for this object are placed in the @vma.tree - * for fast retrieval via a binary search in - * i915_vma_instance(). They are also added to @vma.list for - * easy iteration. - */ - struct rb_root tree; - } vma; - - /** - * @lut_list: List of vma lookup entries in use for this object. - * - * If this object is closed, we need to remove all of its VMA from - * the fast lookup index in associated contexts; @lut_list provides - * this translation from object to context->handles_vma. - */ - struct list_head lut_list; - - /** Stolen memory for this object, instead of being backed by shmem. */ - struct drm_mm_node *stolen; - union { - struct rcu_head rcu; - struct llist_node freed; - }; - - /** - * Whether the object is currently in the GGTT mmap. - */ - unsigned int userfault_count; - struct list_head userfault_link; - - struct list_head batch_pool_link; - I915_SELFTEST_DECLARE(struct list_head st_link); - - unsigned long flags; - - /** - * Have we taken a reference for the object for incomplete GPU - * activity? - */ -#define I915_BO_ACTIVE_REF 0 - - /* - * Is the object to be mapped as read-only to the GPU - * Only honoured if hardware has relevant pte bit - */ - unsigned int cache_level:3; - unsigned int cache_coherent:2; -#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0) -#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1) - unsigned int cache_dirty:1; - - /** - * @read_domains: Read memory domains. - * - * These monitor which caches contain read/write data related to the - * object. When transitioning from one set of domains to another, - * the driver is called to ensure that caches are suitably flushed and - * invalidated. - */ - u16 read_domains; - - /** - * @write_domain: Corresponding unique write memory domain. - */ - u16 write_domain; - - atomic_t frontbuffer_bits; - unsigned int frontbuffer_ggtt_origin; /* write once */ - struct i915_active_request frontbuffer_write; - - /** Current tiling stride for the object, if it's tiled. */ - unsigned int tiling_and_stride; -#define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */ -#define TILING_MASK (FENCE_MINIMUM_STRIDE-1) -#define STRIDE_MASK (~TILING_MASK) - - /** Count of VMA actually bound by this object */ - unsigned int bind_count; - unsigned int active_count; - /** Count of how many global VMA are currently pinned for use by HW */ - unsigned int pin_global; - - struct { - struct mutex lock; /* protects the pages and their use */ - atomic_t pages_pin_count; - - struct sg_table *pages; - void *mapping; - - /* TODO: whack some of this into the error state */ - struct i915_page_sizes { - /** - * The sg mask of the pages sg_table. i.e the mask of - * of the lengths for each sg entry. - */ - unsigned int phys; - - /** - * The gtt page sizes we are allowed to use given the - * sg mask and the supported page sizes. This will - * express the smallest unit we can use for the whole - * object, as well as the larger sizes we may be able - * to use opportunistically. - */ - unsigned int sg; - - /** - * The actual gtt page size usage. Since we can have - * multiple vma associated with this object we need to - * prevent any trampling of state, hence a copy of this - * struct also lives in each vma, therefore the gtt - * value here should only be read/write through the vma. - */ - unsigned int gtt; - } page_sizes; - - I915_SELFTEST_DECLARE(unsigned int page_mask); - - struct i915_gem_object_page_iter { - struct scatterlist *sg_pos; - unsigned int sg_idx; /* in pages, but 32bit eek! */ - - struct radix_tree_root radix; - struct mutex lock; /* protects this cache */ - } get_page; - - /** - * Element within i915->mm.unbound_list or i915->mm.bound_list, - * locked by i915->mm.obj_lock. - */ - struct list_head link; - - /** - * Advice: are the backing pages purgeable? - */ - unsigned int madv:2; - - /** - * This is set if the object has been written to since the - * pages were last acquired. - */ - bool dirty:1; - - /** - * This is set if the object has been pinned due to unknown - * swizzling. - */ - bool quirked:1; - } mm; - - /** Breadcrumb of last rendering to the buffer. - * There can only be one writer, but we allow for multiple readers. - * If there is a writer that necessarily implies that all other - * read requests are complete - but we may only be lazily clearing - * the read requests. A read request is naturally the most recent - * request on a ring, so we may have two different write and read - * requests on one ring where the write request is older than the - * read request. This allows for the CPU to read from an active - * buffer by only waiting for the write to complete. - */ - struct reservation_object *resv; - - /** References from framebuffers, locks out tiling changes. */ - unsigned int framebuffer_references; - - /** Record of address bit 17 of each page at last unbind. */ - unsigned long *bit_17; - - union { - struct i915_gem_userptr { - uintptr_t ptr; - - struct i915_mm_struct *mm; - struct i915_mmu_object *mmu_object; - struct work_struct *work; - } userptr; - - unsigned long scratch; - - void *gvt_info; - }; - - /** for phys allocated objects */ - struct drm_dma_handle *phys_handle; - - struct reservation_object __builtin_resv; -}; - -static inline struct drm_i915_gem_object * -to_intel_bo(struct drm_gem_object *gem) -{ - /* Assert that to_intel_bo(NULL) == NULL */ - BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base)); - - return container_of(gem, struct drm_i915_gem_object, base); -} +#include "gem/i915_gem_object_types.h" struct drm_i915_gem_object *i915_gem_object_alloc(void); void i915_gem_object_free(struct drm_i915_gem_object *obj); -- cgit v1.2.3 From afa1308596c9dbb8526efd21335ea45aaaafdbe4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:43 +0100 Subject: drm/i915: Pull GEM ioctls interface to its own file Declutter i915_drv/gem.h by moving the ioctl API into its own header. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_ioctls.h | 52 ++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 38 ---------------------- drivers/gpu/drm/i915/i915_gem.c | 1 + drivers/gpu/drm/i915/i915_gem_execbuffer.c | 1 + drivers/gpu/drm/i915/i915_gem_tiling.c | 3 ++ drivers/gpu/drm/i915/i915_gem_userptr.c | 12 ++++--- 7 files changed, 66 insertions(+), 42 deletions(-) create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_ioctls.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h new file mode 100644 index 000000000000..ddc7f2a52b3e --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef I915_GEM_IOCTLS_H +#define I915_GEM_IOCTLS_H + +struct drm_device; +struct drm_file; + +int i915_gem_busy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_pread_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); + +#endif diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6699e3a94272..a1f43dc5a8b5 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -47,6 +47,7 @@ #include #include +#include "gem/i915_gem_ioctls.h" #include "gt/intel_gt_pm.h" #include "gt/intel_reset.h" #include "gt/intel_workarounds.h" diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9a744d39d1cd..189357a9c8c3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2752,46 +2752,8 @@ static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) } /* i915_gem.c */ -int i915_gem_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_pread_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_busy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); -int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); -int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); int i915_gem_init_userptr(struct drm_i915_private *dev_priv); void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); -int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); -int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_wait_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); void i915_gem_sanitize(struct drm_i915_private *i915); int i915_gem_init_early(struct drm_i915_private *dev_priv); void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 902162c04d35..aed9f36db803 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -39,6 +39,7 @@ #include #include +#include "gem/i915_gem_ioctls.h" #include "gt/intel_engine_pm.h" #include "gt/intel_gt_pm.h" #include "gt/intel_mocs.h" diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 8b85c91c3ea4..908fddcc57c3 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -34,6 +34,7 @@ #include #include +#include "gem/i915_gem_ioctls.h" #include "gt/intel_gt_pm.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index a9b5329dae3b..7d5cc9ccf6dd 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -28,6 +28,9 @@ #include #include #include + +#include "gem/i915_gem_ioctls.h" + #include "i915_drv.h" /** diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 8079ea3af103..2c1b6bb7a040 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -22,16 +22,20 @@ * */ -#include -#include "i915_drv.h" -#include "i915_trace.h" -#include "intel_drv.h" #include #include #include #include #include +#include + +#include "gem/i915_gem_ioctls.h" + +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_drv.h" + struct i915_mm_struct { struct mm_struct *mm; struct drm_i915_private *i915; -- cgit v1.2.3 From 98932149aeb992398a58f6361a86a91f9bfc0b04 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:44 +0100 Subject: drm/i915: Move object->pages API to i915_gem_object.[ch] Currently the code for manipulating the pages on an object is still residing in i915_gem.c, move it to i915_gem_object.c Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 4 +- drivers/gpu/drm/i915/gem/i915_gem_object.c | 90 ++++++++ drivers/gpu/drm/i915/gem/i915_gem_object.h | 350 +++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 141 +----------- drivers/gpu/drm/i915/i915_gem_object.c | 90 -------- drivers/gpu/drm/i915/i915_gem_object.h | 220 ------------------ drivers/gpu/drm/i915/i915_globals.c | 2 +- drivers/gpu/drm/i915/i915_vma.h | 2 +- drivers/gpu/drm/i915/intel_frontbuffer.h | 2 +- 9 files changed, 452 insertions(+), 449 deletions(-) create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_object.c create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_object.h delete mode 100644 drivers/gpu/drm/i915/i915_gem_object.c delete mode 100644 drivers/gpu/drm/i915/i915_gem_object.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 23a45d7baabb..f96846846cba 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -86,7 +86,10 @@ i915-y += $(gt-y) # GEM (Graphics Execution Management) code obj-y += gem/ +gem-y += \ + gem/i915_gem_object.o i915-y += \ + $(gem-y) \ i915_active.o \ i915_cmd_parser.o \ i915_gem_batch_pool.o \ @@ -99,7 +102,6 @@ i915-y += \ i915_gem_gtt.o \ i915_gem_internal.o \ i915_gem.o \ - i915_gem_object.o \ i915_gem_pm.o \ i915_gem_render_state.o \ i915_gem_shrinker.o \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c new file mode 100644 index 000000000000..ac6a5ab84586 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -0,0 +1,90 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "i915_drv.h" +#include "i915_gem_object.h" +#include "i915_globals.h" + +static struct i915_global_object { + struct i915_global base; + struct kmem_cache *slab_objects; +} global; + +struct drm_i915_gem_object *i915_gem_object_alloc(void) +{ + return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL); +} + +void i915_gem_object_free(struct drm_i915_gem_object *obj) +{ + return kmem_cache_free(global.slab_objects, obj); +} + +/** + * Mark up the object's coherency levels for a given cache_level + * @obj: #drm_i915_gem_object + * @cache_level: cache level + */ +void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, + unsigned int cache_level) +{ + obj->cache_level = cache_level; + + if (cache_level != I915_CACHE_NONE) + obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ | + I915_BO_CACHE_COHERENT_FOR_WRITE); + else if (HAS_LLC(to_i915(obj->base.dev))) + obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ; + else + obj->cache_coherent = 0; + + obj->cache_dirty = + !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE); +} + +static void i915_global_objects_shrink(void) +{ + kmem_cache_shrink(global.slab_objects); +} + +static void i915_global_objects_exit(void) +{ + kmem_cache_destroy(global.slab_objects); +} + +static struct i915_global_object global = { { + .shrink = i915_global_objects_shrink, + .exit = i915_global_objects_exit, +} }; + +int __init i915_global_objects_init(void) +{ + global.slab_objects = + KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); + if (!global.slab_objects) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h new file mode 100644 index 000000000000..9dfd8b0db725 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -0,0 +1,350 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#ifndef __I915_GEM_OBJECT_H__ +#define __I915_GEM_OBJECT_H__ + +#include +#include +#include + +#include + +#include "i915_gem_object_types.h" + +struct drm_i915_gem_object *i915_gem_object_alloc(void); +void i915_gem_object_free(struct drm_i915_gem_object *obj); + +/** + * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle + * @filp: DRM file private date + * @handle: userspace handle + * + * Returns: + * + * A pointer to the object named by the handle if such exists on @filp, NULL + * otherwise. This object is only valid whilst under the RCU read lock, and + * note carefully the object may be in the process of being destroyed. + */ +static inline struct drm_i915_gem_object * +i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) +{ +#ifdef CONFIG_LOCKDEP + WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); +#endif + return idr_find(&file->object_idr, handle); +} + +static inline struct drm_i915_gem_object * +i915_gem_object_lookup(struct drm_file *file, u32 handle) +{ + struct drm_i915_gem_object *obj; + + rcu_read_lock(); + obj = i915_gem_object_lookup_rcu(file, handle); + if (obj && !kref_get_unless_zero(&obj->base.refcount)) + obj = NULL; + rcu_read_unlock(); + + return obj; +} + +__deprecated +extern struct drm_gem_object * +drm_gem_object_lookup(struct drm_file *file, u32 handle); + +__attribute__((nonnull)) +static inline struct drm_i915_gem_object * +i915_gem_object_get(struct drm_i915_gem_object *obj) +{ + drm_gem_object_get(&obj->base); + return obj; +} + +__attribute__((nonnull)) +static inline void +i915_gem_object_put(struct drm_i915_gem_object *obj) +{ + __drm_gem_object_put(&obj->base); +} + +static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) +{ + reservation_object_lock(obj->resv, NULL); +} + +static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) +{ + reservation_object_unlock(obj->resv); +} + +static inline void +i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) +{ + obj->base.vma_node.readonly = true; +} + +static inline bool +i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) +{ + return obj->base.vma_node.readonly; +} + +static inline bool +i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) +{ + return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; +} + +static inline bool +i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) +{ + return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE; +} + +static inline bool +i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) +{ + return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY; +} + +static inline bool +i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) +{ + return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL; +} + +static inline bool +i915_gem_object_is_active(const struct drm_i915_gem_object *obj) +{ + return obj->active_count; +} + +static inline bool +i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj) +{ + return test_bit(I915_BO_ACTIVE_REF, &obj->flags); +} + +static inline void +i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj) +{ + lockdep_assert_held(&obj->base.dev->struct_mutex); + __set_bit(I915_BO_ACTIVE_REF, &obj->flags); +} + +static inline void +i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj) +{ + lockdep_assert_held(&obj->base.dev->struct_mutex); + __clear_bit(I915_BO_ACTIVE_REF, &obj->flags); +} + +void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj); + +static inline bool +i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) +{ + return READ_ONCE(obj->framebuffer_references); +} + +static inline unsigned int +i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) +{ + return obj->tiling_and_stride & TILING_MASK; +} + +static inline bool +i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) +{ + return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; +} + +static inline unsigned int +i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) +{ + return obj->tiling_and_stride & STRIDE_MASK; +} + +static inline unsigned int +i915_gem_tile_height(unsigned int tiling) +{ + GEM_BUG_ON(!tiling); + return tiling == I915_TILING_Y ? 32 : 8; +} + +static inline unsigned int +i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) +{ + return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); +} + +static inline unsigned int +i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) +{ + return (i915_gem_object_get_stride(obj) * + i915_gem_object_get_tile_height(obj)); +} + +int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, + unsigned int tiling, unsigned int stride); + +struct scatterlist * +i915_gem_object_get_sg(struct drm_i915_gem_object *obj, + unsigned int n, unsigned int *offset); + +struct page * +i915_gem_object_get_page(struct drm_i915_gem_object *obj, + unsigned int n); + +struct page * +i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, + unsigned int n); + +dma_addr_t +i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, + unsigned long n, + unsigned int *len); + +dma_addr_t +i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, + unsigned long n); + +void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages, + unsigned int sg_page_sizes); +int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); + +static inline int __must_check +i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) +{ + might_lock(&obj->mm.lock); + + if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) + return 0; + + return __i915_gem_object_get_pages(obj); +} + +static inline bool +i915_gem_object_has_pages(struct drm_i915_gem_object *obj) +{ + return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)); +} + +static inline void +__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) +{ + GEM_BUG_ON(!i915_gem_object_has_pages(obj)); + + atomic_inc(&obj->mm.pages_pin_count); +} + +static inline bool +i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) +{ + return atomic_read(&obj->mm.pages_pin_count); +} + +static inline void +__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) +{ + GEM_BUG_ON(!i915_gem_object_has_pages(obj)); + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + + atomic_dec(&obj->mm.pages_pin_count); +} + +static inline void +i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) +{ + __i915_gem_object_unpin_pages(obj); +} + +enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ + I915_MM_NORMAL = 0, + I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */ +}; + +int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, + enum i915_mm_subclass subclass); +void __i915_gem_object_truncate(struct drm_i915_gem_object *obj); + +enum i915_map_type { + I915_MAP_WB = 0, + I915_MAP_WC, +#define I915_MAP_OVERRIDE BIT(31) + I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, + I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, +}; + +/** + * i915_gem_object_pin_map - return a contiguous mapping of the entire object + * @obj: the object to map into kernel address space + * @type: the type of mapping, used to select pgprot_t + * + * Calls i915_gem_object_pin_pages() to prevent reaping of the object's + * pages and then returns a contiguous mapping of the backing storage into + * the kernel address space. Based on the @type of mapping, the PTE will be + * set to either WriteBack or WriteCombine (via pgprot_t). + * + * The caller is responsible for calling i915_gem_object_unpin_map() when the + * mapping is no longer required. + * + * Returns the pointer through which to access the mapped object, or an + * ERR_PTR() on error. + */ +void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, + enum i915_map_type type); + +void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, + unsigned long offset, + unsigned long size); +static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) +{ + __i915_gem_object_flush_map(obj, 0, obj->base.size); +} + +/** + * i915_gem_object_unpin_map - releases an earlier mapping + * @obj: the object to unmap + * + * After pinning the object and mapping its pages, once you are finished + * with your access, call i915_gem_object_unpin_map() to release the pin + * upon the mapping. Once the pin count reaches zero, that mapping may be + * removed. + */ +static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) +{ + i915_gem_object_unpin_pages(obj); +} + +static inline struct intel_engine_cs * +i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) +{ + struct intel_engine_cs *engine = NULL; + struct dma_fence *fence; + + rcu_read_lock(); + fence = reservation_object_get_excl_rcu(obj->resv); + rcu_read_unlock(); + + if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) + engine = to_request(fence)->engine; + dma_fence_put(fence); + + return engine; +} + +void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, + unsigned int cache_level); +void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); + +void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, + struct sg_table *pages, + bool needs_clflush); + +#endif diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 189357a9c8c3..0b853643b5de 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2825,141 +2825,6 @@ static inline int __sg_page_count(const struct scatterlist *sg) return sg->length >> PAGE_SHIFT; } -struct scatterlist * -i915_gem_object_get_sg(struct drm_i915_gem_object *obj, - unsigned int n, unsigned int *offset); - -struct page * -i915_gem_object_get_page(struct drm_i915_gem_object *obj, - unsigned int n); - -struct page * -i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, - unsigned int n); - -dma_addr_t -i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, - unsigned long n, - unsigned int *len); -dma_addr_t -i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, - unsigned long n); - -void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, - struct sg_table *pages, - unsigned int sg_page_sizes); -int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); - -static inline int __must_check -i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) -{ - might_lock(&obj->mm.lock); - - if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) - return 0; - - return __i915_gem_object_get_pages(obj); -} - -static inline bool -i915_gem_object_has_pages(struct drm_i915_gem_object *obj) -{ - return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages)); -} - -static inline void -__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) -{ - GEM_BUG_ON(!i915_gem_object_has_pages(obj)); - - atomic_inc(&obj->mm.pages_pin_count); -} - -static inline bool -i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) -{ - return atomic_read(&obj->mm.pages_pin_count); -} - -static inline void -__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) -{ - GEM_BUG_ON(!i915_gem_object_has_pages(obj)); - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - - atomic_dec(&obj->mm.pages_pin_count); -} - -static inline void -i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) -{ - __i915_gem_object_unpin_pages(obj); -} - -enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ - I915_MM_NORMAL = 0, - I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */ -}; - -int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, - enum i915_mm_subclass subclass); -void __i915_gem_object_truncate(struct drm_i915_gem_object *obj); - -enum i915_map_type { - I915_MAP_WB = 0, - I915_MAP_WC, -#define I915_MAP_OVERRIDE BIT(31) - I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, - I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, -}; - -static inline enum i915_map_type -i915_coherent_map_type(struct drm_i915_private *i915) -{ - return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; -} - -/** - * i915_gem_object_pin_map - return a contiguous mapping of the entire object - * @obj: the object to map into kernel address space - * @type: the type of mapping, used to select pgprot_t - * - * Calls i915_gem_object_pin_pages() to prevent reaping of the object's - * pages and then returns a contiguous mapping of the backing storage into - * the kernel address space. Based on the @type of mapping, the PTE will be - * set to either WriteBack or WriteCombine (via pgprot_t). - * - * The caller is responsible for calling i915_gem_object_unpin_map() when the - * mapping is no longer required. - * - * Returns the pointer through which to access the mapped object, or an - * ERR_PTR() on error. - */ -void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, - enum i915_map_type type); - -void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, - unsigned long offset, - unsigned long size); -static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) -{ - __i915_gem_object_flush_map(obj, 0, obj->base.size); -} - -/** - * i915_gem_object_unpin_map - releases an earlier mapping - * @obj: the object to unmap - * - * After pinning the object and mapping its pages, once you are finished - * with your access, call i915_gem_object_unpin_map() to release the pin - * upon the mapping. Once the pin count reaches zero, that mapping may be - * removed. - */ -static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) -{ - i915_gem_object_unpin_pages(obj); -} - int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, unsigned int *needs_clflush); int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, @@ -3358,6 +3223,12 @@ static inline u32 i915_scratch_offset(const struct drm_i915_private *i915) return i915_ggtt_offset(i915->gt.scratch); } +static inline enum i915_map_type +i915_coherent_map_type(struct drm_i915_private *i915) +{ + return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; +} + static inline void add_taint_for_CI(unsigned int taint) { /* diff --git a/drivers/gpu/drm/i915/i915_gem_object.c b/drivers/gpu/drm/i915/i915_gem_object.c deleted file mode 100644 index ac6a5ab84586..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_object.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright © 2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "i915_drv.h" -#include "i915_gem_object.h" -#include "i915_globals.h" - -static struct i915_global_object { - struct i915_global base; - struct kmem_cache *slab_objects; -} global; - -struct drm_i915_gem_object *i915_gem_object_alloc(void) -{ - return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL); -} - -void i915_gem_object_free(struct drm_i915_gem_object *obj) -{ - return kmem_cache_free(global.slab_objects, obj); -} - -/** - * Mark up the object's coherency levels for a given cache_level - * @obj: #drm_i915_gem_object - * @cache_level: cache level - */ -void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, - unsigned int cache_level) -{ - obj->cache_level = cache_level; - - if (cache_level != I915_CACHE_NONE) - obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ | - I915_BO_CACHE_COHERENT_FOR_WRITE); - else if (HAS_LLC(to_i915(obj->base.dev))) - obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ; - else - obj->cache_coherent = 0; - - obj->cache_dirty = - !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE); -} - -static void i915_global_objects_shrink(void) -{ - kmem_cache_shrink(global.slab_objects); -} - -static void i915_global_objects_exit(void) -{ - kmem_cache_destroy(global.slab_objects); -} - -static struct i915_global_object global = { { - .shrink = i915_global_objects_shrink, - .exit = i915_global_objects_exit, -} }; - -int __init i915_global_objects_init(void) -{ - global.slab_objects = - KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); - if (!global.slab_objects) - return -ENOMEM; - - i915_global_register(&global.base); - return 0; -} diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h deleted file mode 100644 index 3666b0c5f6ca..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ /dev/null @@ -1,220 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2016 Intel Corporation - */ - -#ifndef __I915_GEM_OBJECT_H__ -#define __I915_GEM_OBJECT_H__ - -#include -#include -#include - -#include - -#include "gem/i915_gem_object_types.h" - -struct drm_i915_gem_object *i915_gem_object_alloc(void); -void i915_gem_object_free(struct drm_i915_gem_object *obj); - -/** - * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle - * @filp: DRM file private date - * @handle: userspace handle - * - * Returns: - * - * A pointer to the object named by the handle if such exists on @filp, NULL - * otherwise. This object is only valid whilst under the RCU read lock, and - * note carefully the object may be in the process of being destroyed. - */ -static inline struct drm_i915_gem_object * -i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle) -{ -#ifdef CONFIG_LOCKDEP - WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map)); -#endif - return idr_find(&file->object_idr, handle); -} - -static inline struct drm_i915_gem_object * -i915_gem_object_lookup(struct drm_file *file, u32 handle) -{ - struct drm_i915_gem_object *obj; - - rcu_read_lock(); - obj = i915_gem_object_lookup_rcu(file, handle); - if (obj && !kref_get_unless_zero(&obj->base.refcount)) - obj = NULL; - rcu_read_unlock(); - - return obj; -} - -__deprecated -extern struct drm_gem_object * -drm_gem_object_lookup(struct drm_file *file, u32 handle); - -__attribute__((nonnull)) -static inline struct drm_i915_gem_object * -i915_gem_object_get(struct drm_i915_gem_object *obj) -{ - drm_gem_object_get(&obj->base); - return obj; -} - -__attribute__((nonnull)) -static inline void -i915_gem_object_put(struct drm_i915_gem_object *obj) -{ - __drm_gem_object_put(&obj->base); -} - -static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) -{ - reservation_object_lock(obj->resv, NULL); -} - -static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) -{ - reservation_object_unlock(obj->resv); -} - -static inline void -i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) -{ - obj->base.vma_node.readonly = true; -} - -static inline bool -i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) -{ - return obj->base.vma_node.readonly; -} - -static inline bool -i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) -{ - return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; -} - -static inline bool -i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) -{ - return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE; -} - -static inline bool -i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) -{ - return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY; -} - -static inline bool -i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) -{ - return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL; -} - -static inline bool -i915_gem_object_is_active(const struct drm_i915_gem_object *obj) -{ - return obj->active_count; -} - -static inline bool -i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj) -{ - return test_bit(I915_BO_ACTIVE_REF, &obj->flags); -} - -static inline void -i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj) -{ - lockdep_assert_held(&obj->base.dev->struct_mutex); - __set_bit(I915_BO_ACTIVE_REF, &obj->flags); -} - -static inline void -i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj) -{ - lockdep_assert_held(&obj->base.dev->struct_mutex); - __clear_bit(I915_BO_ACTIVE_REF, &obj->flags); -} - -void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj); - -static inline bool -i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) -{ - return READ_ONCE(obj->framebuffer_references); -} - -static inline unsigned int -i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj) -{ - return obj->tiling_and_stride & TILING_MASK; -} - -static inline bool -i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj) -{ - return i915_gem_object_get_tiling(obj) != I915_TILING_NONE; -} - -static inline unsigned int -i915_gem_object_get_stride(const struct drm_i915_gem_object *obj) -{ - return obj->tiling_and_stride & STRIDE_MASK; -} - -static inline unsigned int -i915_gem_tile_height(unsigned int tiling) -{ - GEM_BUG_ON(!tiling); - return tiling == I915_TILING_Y ? 32 : 8; -} - -static inline unsigned int -i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj) -{ - return i915_gem_tile_height(i915_gem_object_get_tiling(obj)); -} - -static inline unsigned int -i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj) -{ - return (i915_gem_object_get_stride(obj) * - i915_gem_object_get_tile_height(obj)); -} - -int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, - unsigned int tiling, unsigned int stride); - -static inline struct intel_engine_cs * -i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) -{ - struct intel_engine_cs *engine = NULL; - struct dma_fence *fence; - - rcu_read_lock(); - fence = reservation_object_get_excl_rcu(obj->resv); - rcu_read_unlock(); - - if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) - engine = to_request(fence)->engine; - dma_fence_put(fence); - - return engine; -} - -void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, - unsigned int cache_level); -void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); - -void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, - struct sg_table *pages, - bool needs_clflush); - -#endif diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c index 81e5c2ce336b..db52a58eadcc 100644 --- a/drivers/gpu/drm/i915/i915_globals.c +++ b/drivers/gpu/drm/i915/i915_globals.c @@ -9,7 +9,7 @@ #include "i915_active.h" #include "i915_gem_context.h" -#include "i915_gem_object.h" +#include "gem/i915_gem_object.h" #include "i915_globals.h" #include "i915_request.h" #include "i915_scheduler.h" diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 8543d2953cd1..754a762d90b4 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -32,7 +32,7 @@ #include "i915_gem_gtt.h" #include "i915_gem_fence_reg.h" -#include "i915_gem_object.h" +#include "gem/i915_gem_object.h" #include "i915_active.h" #include "i915_request.h" diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h index d5894666f658..5727320c8084 100644 --- a/drivers/gpu/drm/i915/intel_frontbuffer.h +++ b/drivers/gpu/drm/i915/intel_frontbuffer.h @@ -24,7 +24,7 @@ #ifndef __INTEL_FRONTBUFFER_H__ #define __INTEL_FRONTBUFFER_H__ -#include "i915_gem_object.h" +#include "gem/i915_gem_object.h" struct drm_i915_private; struct drm_i915_gem_object; -- cgit v1.2.3 From 8475355f7a2645a022288301c03555c31fb4de17 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:45 +0100 Subject: drm/i915: Move shmem object setup to its own file Split the plain old shmem object into its own file to start decluttering i915_gem.c v2: Lose the confusing, hysterical raisins, suffix of _gtt. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 3 +- drivers/gpu/drm/i915/gem/i915_gem_object.c | 298 ++++++++ drivers/gpu/drm/i915/gem/i915_gem_object.h | 41 +- drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 511 ++++++++++++++ drivers/gpu/drm/i915/gt/intel_lrc.c | 4 +- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 2 +- drivers/gpu/drm/i915/gvt/cmd_parser.c | 21 +- drivers/gpu/drm/i915/i915_drv.h | 10 - drivers/gpu/drm/i915/i915_gem.c | 826 +---------------------- drivers/gpu/drm/i915/i915_perf.c | 2 +- drivers/gpu/drm/i915/intel_fbdev.c | 2 +- drivers/gpu/drm/i915/intel_guc.c | 2 +- drivers/gpu/drm/i915/intel_uc_fw.c | 3 +- drivers/gpu/drm/i915/selftests/huge_pages.c | 6 +- drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c | 8 +- drivers/gpu/drm/i915/selftests/i915_gem_object.c | 4 +- 16 files changed, 882 insertions(+), 861 deletions(-) create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_shmem.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index f96846846cba..ccf0520e8ad4 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -87,7 +87,8 @@ i915-y += $(gt-y) # GEM (Graphics Execution Management) code obj-y += gem/ gem-y += \ - gem/i915_gem_object.o + gem/i915_gem_object.o \ + gem/i915_gem_shmem.o i915-y += \ $(gem-y) \ i915_active.o \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index ac6a5ab84586..3000b26b4bbf 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -25,6 +25,7 @@ #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_globals.h" +#include "intel_frontbuffer.h" static struct i915_global_object { struct i915_global base; @@ -41,6 +42,64 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj) return kmem_cache_free(global.slab_objects, obj); } +/* some bookkeeping */ +static void i915_gem_info_add_obj(struct drm_i915_private *i915, + u64 size) +{ + spin_lock(&i915->mm.object_stat_lock); + i915->mm.object_count++; + i915->mm.object_memory += size; + spin_unlock(&i915->mm.object_stat_lock); +} + +static void i915_gem_info_remove_obj(struct drm_i915_private *i915, + u64 size) +{ + spin_lock(&i915->mm.object_stat_lock); + i915->mm.object_count--; + i915->mm.object_memory -= size; + spin_unlock(&i915->mm.object_stat_lock); +} + +static void +frontbuffer_retire(struct i915_active_request *active, + struct i915_request *request) +{ + struct drm_i915_gem_object *obj = + container_of(active, typeof(*obj), frontbuffer_write); + + intel_fb_obj_flush(obj, ORIGIN_CS); +} + +void i915_gem_object_init(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_object_ops *ops) +{ + mutex_init(&obj->mm.lock); + + spin_lock_init(&obj->vma.lock); + INIT_LIST_HEAD(&obj->vma.list); + + INIT_LIST_HEAD(&obj->lut_list); + INIT_LIST_HEAD(&obj->batch_pool_link); + + init_rcu_head(&obj->rcu); + + obj->ops = ops; + + reservation_object_init(&obj->__builtin_resv); + obj->resv = &obj->__builtin_resv; + + obj->frontbuffer_ggtt_origin = ORIGIN_GTT; + i915_active_request_init(&obj->frontbuffer_write, + NULL, frontbuffer_retire); + + obj->mm.madv = I915_MADV_WILLNEED; + INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); + mutex_init(&obj->mm.get_page.lock); + + i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); +} + /** * Mark up the object's coherency levels for a given cache_level * @obj: #drm_i915_gem_object @@ -63,6 +122,245 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE); } +void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) +{ + struct drm_i915_private *i915 = to_i915(gem->dev); + struct drm_i915_gem_object *obj = to_intel_bo(gem); + struct drm_i915_file_private *fpriv = file->driver_priv; + struct i915_lut_handle *lut, *ln; + + mutex_lock(&i915->drm.struct_mutex); + + list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { + struct i915_gem_context *ctx = lut->ctx; + struct i915_vma *vma; + + GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF)); + if (ctx->file_priv != fpriv) + continue; + + vma = radix_tree_delete(&ctx->handles_vma, lut->handle); + GEM_BUG_ON(vma->obj != obj); + + /* We allow the process to have multiple handles to the same + * vma, in the same fd namespace, by virtue of flink/open. + */ + GEM_BUG_ON(!vma->open_count); + if (!--vma->open_count && !i915_vma_is_ggtt(vma)) + i915_vma_close(vma); + + list_del(&lut->obj_link); + list_del(&lut->ctx_link); + + i915_lut_handle_free(lut); + __i915_gem_object_release_unless_active(obj); + } + + mutex_unlock(&i915->drm.struct_mutex); +} + +static bool discard_backing_storage(struct drm_i915_gem_object *obj) +{ + /* If we are the last user of the backing storage (be it shmemfs + * pages or stolen etc), we know that the pages are going to be + * immediately released. In this case, we can then skip copying + * back the contents from the GPU. + */ + + if (obj->mm.madv != I915_MADV_WILLNEED) + return false; + + if (!obj->base.filp) + return true; + + /* At first glance, this looks racy, but then again so would be + * userspace racing mmap against close. However, the first external + * reference to the filp can only be obtained through the + * i915_gem_mmap_ioctl() which safeguards us against the user + * acquiring such a reference whilst we are in the middle of + * freeing the object. + */ + return file_count(obj->base.filp) == 1; +} + +static void __i915_gem_free_objects(struct drm_i915_private *i915, + struct llist_node *freed) +{ + struct drm_i915_gem_object *obj, *on; + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(i915); + llist_for_each_entry_safe(obj, on, freed, freed) { + struct i915_vma *vma, *vn; + + trace_i915_gem_object_destroy(obj); + + mutex_lock(&i915->drm.struct_mutex); + + GEM_BUG_ON(i915_gem_object_is_active(obj)); + list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) { + GEM_BUG_ON(i915_vma_is_active(vma)); + vma->flags &= ~I915_VMA_PIN_MASK; + i915_vma_destroy(vma); + } + GEM_BUG_ON(!list_empty(&obj->vma.list)); + GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree)); + + /* This serializes freeing with the shrinker. Since the free + * is delayed, first by RCU then by the workqueue, we want the + * shrinker to be able to free pages of unreferenced objects, + * or else we may oom whilst there are plenty of deferred + * freed objects. + */ + if (i915_gem_object_has_pages(obj)) { + spin_lock(&i915->mm.obj_lock); + list_del_init(&obj->mm.link); + spin_unlock(&i915->mm.obj_lock); + } + + mutex_unlock(&i915->drm.struct_mutex); + + GEM_BUG_ON(obj->bind_count); + GEM_BUG_ON(obj->userfault_count); + GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); + GEM_BUG_ON(!list_empty(&obj->lut_list)); + + if (obj->ops->release) + obj->ops->release(obj); + + if (WARN_ON(i915_gem_object_has_pinned_pages(obj))) + atomic_set(&obj->mm.pages_pin_count, 0); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + GEM_BUG_ON(i915_gem_object_has_pages(obj)); + + if (obj->base.import_attach) + drm_prime_gem_destroy(&obj->base, NULL); + + reservation_object_fini(&obj->__builtin_resv); + drm_gem_object_release(&obj->base); + i915_gem_info_remove_obj(i915, obj->base.size); + + bitmap_free(obj->bit_17); + i915_gem_object_free(obj); + + GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); + atomic_dec(&i915->mm.free_count); + + if (on) + cond_resched(); + } + intel_runtime_pm_put(i915, wakeref); +} + +void i915_gem_flush_free_objects(struct drm_i915_private *i915) +{ + struct llist_node *freed; + + /* Free the oldest, most stale object to keep the free_list short */ + freed = NULL; + if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */ + /* Only one consumer of llist_del_first() allowed */ + spin_lock(&i915->mm.free_lock); + freed = llist_del_first(&i915->mm.free_list); + spin_unlock(&i915->mm.free_lock); + } + if (unlikely(freed)) { + freed->next = NULL; + __i915_gem_free_objects(i915, freed); + } +} + +static void __i915_gem_free_work(struct work_struct *work) +{ + struct drm_i915_private *i915 = + container_of(work, struct drm_i915_private, mm.free_work); + struct llist_node *freed; + + /* + * All file-owned VMA should have been released by this point through + * i915_gem_close_object(), or earlier by i915_gem_context_close(). + * However, the object may also be bound into the global GTT (e.g. + * older GPUs without per-process support, or for direct access through + * the GTT either for the user or for scanout). Those VMA still need to + * unbound now. + */ + + spin_lock(&i915->mm.free_lock); + while ((freed = llist_del_all(&i915->mm.free_list))) { + spin_unlock(&i915->mm.free_lock); + + __i915_gem_free_objects(i915, freed); + if (need_resched()) + return; + + spin_lock(&i915->mm.free_lock); + } + spin_unlock(&i915->mm.free_lock); +} + +static void __i915_gem_free_object_rcu(struct rcu_head *head) +{ + struct drm_i915_gem_object *obj = + container_of(head, typeof(*obj), rcu); + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + /* + * We reuse obj->rcu for the freed list, so we had better not treat + * it like a rcu_head from this point forwards. And we expect all + * objects to be freed via this path. + */ + destroy_rcu_head(&obj->rcu); + + /* + * Since we require blocking on struct_mutex to unbind the freed + * object from the GPU before releasing resources back to the + * system, we can not do that directly from the RCU callback (which may + * be a softirq context), but must instead then defer that work onto a + * kthread. We use the RCU callback rather than move the freed object + * directly onto the work queue so that we can mix between using the + * worker and performing frees directly from subsequent allocations for + * crude but effective memory throttling. + */ + if (llist_add(&obj->freed, &i915->mm.free_list)) + queue_work(i915->wq, &i915->mm.free_work); +} + +void i915_gem_free_object(struct drm_gem_object *gem_obj) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + + if (obj->mm.quirked) + __i915_gem_object_unpin_pages(obj); + + if (discard_backing_storage(obj)) + obj->mm.madv = I915_MADV_DONTNEED; + + /* + * Before we free the object, make sure any pure RCU-only + * read-side critical sections are complete, e.g. + * i915_gem_busy_ioctl(). For the corresponding synchronized + * lookup see i915_gem_object_lookup_rcu(). + */ + atomic_inc(&to_i915(obj->base.dev)->mm.free_count); + call_rcu(&obj->rcu, __i915_gem_free_object_rcu); +} + +void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) +{ + lockdep_assert_held(&obj->base.dev->struct_mutex); + + if (!i915_gem_object_has_active_reference(obj) && + i915_gem_object_is_active(obj)) + i915_gem_object_set_active_reference(obj); + else + i915_gem_object_put(obj); +} + +void i915_gem_init__objects(struct drm_i915_private *i915) +{ + INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); +} + static void i915_global_objects_shrink(void) { kmem_cache_shrink(global.slab_objects); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 9dfd8b0db725..05ef6b0076ae 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -15,9 +15,29 @@ #include "i915_gem_object_types.h" +void i915_gem_init__objects(struct drm_i915_private *i915); + struct drm_i915_gem_object *i915_gem_object_alloc(void); void i915_gem_object_free(struct drm_i915_gem_object *obj); +void i915_gem_object_init(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_object_ops *ops); +struct drm_i915_gem_object * +i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size); +struct drm_i915_gem_object * +i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, + const void *data, size_t size); + +extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops; +void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, + struct sg_table *pages, + bool needs_clflush); + +void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); +void i915_gem_free_object(struct drm_gem_object *obj); + +void i915_gem_flush_free_objects(struct drm_i915_private *i915); + /** * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle * @filp: DRM file private date @@ -343,8 +363,23 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, unsigned int cache_level); void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); -void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, - struct sg_table *pages, - bool needs_clflush); +static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) +{ + if (obj->cache_dirty) + return false; + + if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) + return true; + + return obj->pin_global; /* currently in use by HW, keep flushed */ +} + +static inline void __start_cpu_write(struct drm_i915_gem_object *obj) +{ + obj->read_domains = I915_GEM_DOMAIN_CPU; + obj->write_domain = I915_GEM_DOMAIN_CPU; + if (cpu_write_needs_clflush(obj)) + obj->cache_dirty = true; +} #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c new file mode 100644 index 000000000000..e2721bd9ab44 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -0,0 +1,511 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2016 Intel Corporation + */ + +#include +#include + +#include "i915_drv.h" +#include "i915_gem_object.h" + +/* + * Move pages to appropriate lru and release the pagevec, decrementing the + * ref count of those pages. + */ +static void check_release_pagevec(struct pagevec *pvec) +{ + check_move_unevictable_pages(pvec); + __pagevec_release(pvec); + cond_resched(); +} + +static int shmem_get_pages(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + const unsigned long page_count = obj->base.size / PAGE_SIZE; + unsigned long i; + struct address_space *mapping; + struct sg_table *st; + struct scatterlist *sg; + struct sgt_iter sgt_iter; + struct page *page; + unsigned long last_pfn = 0; /* suppress gcc warning */ + unsigned int max_segment = i915_sg_segment_size(); + unsigned int sg_page_sizes; + struct pagevec pvec; + gfp_t noreclaim; + int ret; + + /* + * Assert that the object is not currently in any GPU domain. As it + * wasn't in the GTT, there shouldn't be any way it could have been in + * a GPU cache + */ + GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); + GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); + + /* + * If there's no chance of allocating enough pages for the whole + * object, bail early. + */ + if (page_count > totalram_pages()) + return -ENOMEM; + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + +rebuild_st: + if (sg_alloc_table(st, page_count, GFP_KERNEL)) { + kfree(st); + return -ENOMEM; + } + + /* + * Get the list of pages out of our struct file. They'll be pinned + * at this point until we release them. + * + * Fail silently without starting the shrinker + */ + mapping = obj->base.filp->f_mapping; + mapping_set_unevictable(mapping); + noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); + noreclaim |= __GFP_NORETRY | __GFP_NOWARN; + + sg = st->sgl; + st->nents = 0; + sg_page_sizes = 0; + for (i = 0; i < page_count; i++) { + const unsigned int shrink[] = { + (I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_PURGEABLE), + 0, + }, *s = shrink; + gfp_t gfp = noreclaim; + + do { + cond_resched(); + page = shmem_read_mapping_page_gfp(mapping, i, gfp); + if (!IS_ERR(page)) + break; + + if (!*s) { + ret = PTR_ERR(page); + goto err_sg; + } + + i915_gem_shrink(i915, 2 * page_count, NULL, *s++); + + /* + * We've tried hard to allocate the memory by reaping + * our own buffer, now let the real VM do its job and + * go down in flames if truly OOM. + * + * However, since graphics tend to be disposable, + * defer the oom here by reporting the ENOMEM back + * to userspace. + */ + if (!*s) { + /* reclaim and warn, but no oom */ + gfp = mapping_gfp_mask(mapping); + + /* + * Our bo are always dirty and so we require + * kswapd to reclaim our pages (direct reclaim + * does not effectively begin pageout of our + * buffers on its own). However, direct reclaim + * only waits for kswapd when under allocation + * congestion. So as a result __GFP_RECLAIM is + * unreliable and fails to actually reclaim our + * dirty pages -- unless you try over and over + * again with !__GFP_NORETRY. However, we still + * want to fail this allocation rather than + * trigger the out-of-memory killer and for + * this we want __GFP_RETRY_MAYFAIL. + */ + gfp |= __GFP_RETRY_MAYFAIL; + } + } while (1); + + if (!i || + sg->length >= max_segment || + page_to_pfn(page) != last_pfn + 1) { + if (i) { + sg_page_sizes |= sg->length; + sg = sg_next(sg); + } + st->nents++; + sg_set_page(sg, page, PAGE_SIZE, 0); + } else { + sg->length += PAGE_SIZE; + } + last_pfn = page_to_pfn(page); + + /* Check that the i965g/gm workaround works. */ + WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); + } + if (sg) { /* loop terminated early; short sg table */ + sg_page_sizes |= sg->length; + sg_mark_end(sg); + } + + /* Trim unused sg entries to avoid wasting memory. */ + i915_sg_trim(st); + + ret = i915_gem_gtt_prepare_pages(obj, st); + if (ret) { + /* + * DMA remapping failed? One possible cause is that + * it could not reserve enough large entries, asking + * for PAGE_SIZE chunks instead may be helpful. + */ + if (max_segment > PAGE_SIZE) { + for_each_sgt_page(page, sgt_iter, st) + put_page(page); + sg_free_table(st); + + max_segment = PAGE_SIZE; + goto rebuild_st; + } else { + dev_warn(&i915->drm.pdev->dev, + "Failed to DMA remap %lu pages\n", + page_count); + goto err_pages; + } + } + + if (i915_gem_object_needs_bit17_swizzle(obj)) + i915_gem_object_do_bit_17_swizzle(obj, st); + + __i915_gem_object_set_pages(obj, st, sg_page_sizes); + + return 0; + +err_sg: + sg_mark_end(sg); +err_pages: + mapping_clear_unevictable(mapping); + pagevec_init(&pvec); + for_each_sgt_page(page, sgt_iter, st) { + if (!pagevec_add(&pvec, page)) + check_release_pagevec(&pvec); + } + if (pagevec_count(&pvec)) + check_release_pagevec(&pvec); + sg_free_table(st); + kfree(st); + + /* + * shmemfs first checks if there is enough memory to allocate the page + * and reports ENOSPC should there be insufficient, along with the usual + * ENOMEM for a genuine allocation failure. + * + * We use ENOSPC in our driver to mean that we have run out of aperture + * space and so want to translate the error from shmemfs back to our + * usual understanding of ENOMEM. + */ + if (ret == -ENOSPC) + ret = -ENOMEM; + + return ret; +} + +void +__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, + struct sg_table *pages, + bool needs_clflush) +{ + GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); + + if (obj->mm.madv == I915_MADV_DONTNEED) + obj->mm.dirty = false; + + if (needs_clflush && + (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && + !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) + drm_clflush_sg(pages); + + __start_cpu_write(obj); +} + +static void +shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) +{ + struct sgt_iter sgt_iter; + struct pagevec pvec; + struct page *page; + + __i915_gem_object_release_shmem(obj, pages, true); + + i915_gem_gtt_finish_pages(obj, pages); + + if (i915_gem_object_needs_bit17_swizzle(obj)) + i915_gem_object_save_bit_17_swizzle(obj, pages); + + mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping); + + pagevec_init(&pvec); + for_each_sgt_page(page, sgt_iter, pages) { + if (obj->mm.dirty) + set_page_dirty(page); + + if (obj->mm.madv == I915_MADV_WILLNEED) + mark_page_accessed(page); + + if (!pagevec_add(&pvec, page)) + check_release_pagevec(&pvec); + } + if (pagevec_count(&pvec)) + check_release_pagevec(&pvec); + obj->mm.dirty = false; + + sg_free_table(pages); + kfree(pages); +} + +static int +shmem_pwrite(struct drm_i915_gem_object *obj, + const struct drm_i915_gem_pwrite *arg) +{ + struct address_space *mapping = obj->base.filp->f_mapping; + char __user *user_data = u64_to_user_ptr(arg->data_ptr); + u64 remain, offset; + unsigned int pg; + + /* Caller already validated user args */ + GEM_BUG_ON(!access_ok(user_data, arg->size)); + + /* + * Before we instantiate/pin the backing store for our use, we + * can prepopulate the shmemfs filp efficiently using a write into + * the pagecache. We avoid the penalty of instantiating all the + * pages, important if the user is just writing to a few and never + * uses the object on the GPU, and using a direct write into shmemfs + * allows it to avoid the cost of retrieving a page (either swapin + * or clearing-before-use) before it is overwritten. + */ + if (i915_gem_object_has_pages(obj)) + return -ENODEV; + + if (obj->mm.madv != I915_MADV_WILLNEED) + return -EFAULT; + + /* + * Before the pages are instantiated the object is treated as being + * in the CPU domain. The pages will be clflushed as required before + * use, and we can freely write into the pages directly. If userspace + * races pwrite with any other operation; corruption will ensue - + * that is userspace's prerogative! + */ + + remain = arg->size; + offset = arg->offset; + pg = offset_in_page(offset); + + do { + unsigned int len, unwritten; + struct page *page; + void *data, *vaddr; + int err; + char c; + + len = PAGE_SIZE - pg; + if (len > remain) + len = remain; + + /* Prefault the user page to reduce potential recursion */ + err = __get_user(c, user_data); + if (err) + return err; + + err = __get_user(c, user_data + len - 1); + if (err) + return err; + + err = pagecache_write_begin(obj->base.filp, mapping, + offset, len, 0, + &page, &data); + if (err < 0) + return err; + + vaddr = kmap_atomic(page); + unwritten = __copy_from_user_inatomic(vaddr + pg, + user_data, + len); + kunmap_atomic(vaddr); + + err = pagecache_write_end(obj->base.filp, mapping, + offset, len, len - unwritten, + page, data); + if (err < 0) + return err; + + /* We don't handle -EFAULT, leave it to the caller to check */ + if (unwritten) + return -ENODEV; + + remain -= len; + user_data += len; + offset += len; + pg = 0; + } while (remain); + + return 0; +} + +const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { + .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | + I915_GEM_OBJECT_IS_SHRINKABLE, + + .get_pages = shmem_get_pages, + .put_pages = shmem_put_pages, + + .pwrite = shmem_pwrite, +}; + +static int create_shmem(struct drm_i915_private *i915, + struct drm_gem_object *obj, + size_t size) +{ + unsigned long flags = VM_NORESERVE; + struct file *filp; + + drm_gem_private_object_init(&i915->drm, obj, size); + + if (i915->mm.gemfs) + filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, + flags); + else + filp = shmem_file_setup("i915", size, flags); + if (IS_ERR(filp)) + return PTR_ERR(filp); + + obj->filp = filp; + return 0; +} + +struct drm_i915_gem_object * +i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size) +{ + struct drm_i915_gem_object *obj; + struct address_space *mapping; + unsigned int cache_level; + gfp_t mask; + int ret; + + /* There is a prevalence of the assumption that we fit the object's + * page count inside a 32bit _signed_ variable. Let's document this and + * catch if we ever need to fix it. In the meantime, if you do spot + * such a local variable, please consider fixing! + */ + if (size >> PAGE_SHIFT > INT_MAX) + return ERR_PTR(-E2BIG); + + if (overflows_type(size, obj->base.size)) + return ERR_PTR(-E2BIG); + + obj = i915_gem_object_alloc(); + if (!obj) + return ERR_PTR(-ENOMEM); + + ret = create_shmem(i915, &obj->base, size); + if (ret) + goto fail; + + mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; + if (IS_I965GM(i915) || IS_I965G(i915)) { + /* 965gm cannot relocate objects above 4GiB. */ + mask &= ~__GFP_HIGHMEM; + mask |= __GFP_DMA32; + } + + mapping = obj->base.filp->f_mapping; + mapping_set_gfp_mask(mapping, mask); + GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); + + i915_gem_object_init(obj, &i915_gem_shmem_ops); + + obj->write_domain = I915_GEM_DOMAIN_CPU; + obj->read_domains = I915_GEM_DOMAIN_CPU; + + if (HAS_LLC(i915)) + /* On some devices, we can have the GPU use the LLC (the CPU + * cache) for about a 10% performance improvement + * compared to uncached. Graphics requests other than + * display scanout are coherent with the CPU in + * accessing this cache. This means in this mode we + * don't need to clflush on the CPU side, and on the + * GPU side we only need to flush internal caches to + * get data visible to the CPU. + * + * However, we maintain the display planes as UC, and so + * need to rebind when first used as such. + */ + cache_level = I915_CACHE_LLC; + else + cache_level = I915_CACHE_NONE; + + i915_gem_object_set_cache_coherency(obj, cache_level); + + trace_i915_gem_object_create(obj); + + return obj; + +fail: + i915_gem_object_free(obj); + return ERR_PTR(ret); +} + +/* Allocate a new GEM object and fill it with the supplied data */ +struct drm_i915_gem_object * +i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, + const void *data, size_t size) +{ + struct drm_i915_gem_object *obj; + struct file *file; + size_t offset; + int err; + + obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); + if (IS_ERR(obj)) + return obj; + + GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); + + file = obj->base.filp; + offset = 0; + do { + unsigned int len = min_t(typeof(size), size, PAGE_SIZE); + struct page *page; + void *pgdata, *vaddr; + + err = pagecache_write_begin(file, file->f_mapping, + offset, len, 0, + &page, &pgdata); + if (err < 0) + goto fail; + + vaddr = kmap(page); + memcpy(vaddr, data, len); + kunmap(page); + + err = pagecache_write_end(file, file->f_mapping, + offset, len, len, + page, pgdata); + if (err < 0) + goto fail; + + size -= len; + data += len; + offset += len; + } while (size); + + return obj; + +fail: + i915_gem_object_put(obj); + return ERR_PTR(err); +} diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 1f7bee0cae0c..38a8e55a7c85 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1919,7 +1919,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) struct i915_vma *vma; int err; - obj = i915_gem_object_create(engine->i915, CTX_WA_BB_OBJ_SIZE); + obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -3010,7 +3010,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce, */ context_size += LRC_HEADER_PAGES * PAGE_SIZE; - ctx_obj = i915_gem_object_create(engine->i915, context_size); + ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size); if (IS_ERR(ctx_obj)) return PTR_ERR(ctx_obj); diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index f0d60affdba3..ac93080bd863 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1396,7 +1396,7 @@ alloc_context_vma(struct intel_engine_cs *engine) struct i915_vma *vma; int err; - obj = i915_gem_object_create(i915, engine->context_size); + obj = i915_gem_object_create_shmem(i915, engine->context_size); if (IS_ERR(obj)) return ERR_CAST(obj); diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 5cb59c0b4bbe..7584bf0aeaa4 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1725,7 +1725,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) int ret = 0; struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ? s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; - unsigned long gma_start_offset = 0; + unsigned long start_offset = 0; /* get the start gm address of the batch buffer */ gma = get_gma_bb_from_cmd(s, 1); @@ -1742,7 +1742,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true; - /* the gma_start_offset stores the batch buffer's start gma's + /* the start_offset stores the batch buffer's start gma's * offset relative to page boundary. so for non-privileged batch * buffer, the shadowed gem object holds exactly the same page * layout as original gem object. This is for the convience of @@ -1754,10 +1754,11 @@ static int perform_bb_shadow(struct parser_exec_state *s) * that of shadowed page. */ if (bb->ppgtt) - gma_start_offset = gma & ~I915_GTT_PAGE_MASK; + start_offset = gma & ~I915_GTT_PAGE_MASK; - bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv, - roundup(bb_size + gma_start_offset, PAGE_SIZE)); + bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->dev_priv, + round_up(bb_size + start_offset, + PAGE_SIZE)); if (IS_ERR(bb->obj)) { ret = PTR_ERR(bb->obj); goto err_free_bb; @@ -1780,7 +1781,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) ret = copy_gma_to_hva(s->vgpu, mm, gma, gma + bb_size, - bb->va + gma_start_offset); + bb->va + start_offset); if (ret < 0) { gvt_vgpu_err("fail to copy guest ring buffer\n"); ret = -EFAULT; @@ -1806,7 +1807,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) * buffer's gma in pair. After all, we don't want to pin the shadow * buffer here (too early). */ - s->ip_va = bb->va + gma_start_offset; + s->ip_va = bb->va + start_offset; s->ip_gma = gma; return 0; err_unmap: @@ -2829,9 +2830,9 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) int ret = 0; void *map; - obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv, - roundup(ctx_size + CACHELINE_BYTES, - PAGE_SIZE)); + obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv, + roundup(ctx_size + CACHELINE_BYTES, + PAGE_SIZE)); if (IS_ERR(obj)) return PTR_ERR(obj); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0b853643b5de..507dba5dae9a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2761,16 +2761,6 @@ void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); int i915_gem_freeze(struct drm_i915_private *dev_priv); int i915_gem_freeze_late(struct drm_i915_private *dev_priv); -void i915_gem_object_init(struct drm_i915_gem_object *obj, - const struct drm_i915_gem_object_ops *ops); -struct drm_i915_gem_object * -i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size); -struct drm_i915_gem_object * -i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, - const void *data, size_t size); -void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); -void i915_gem_free_object(struct drm_gem_object *obj); - static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) { if (!atomic_read(&i915->mm.free_count)) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index aed9f36db803..98ac2bbb6534 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -58,19 +58,6 @@ #include "intel_frontbuffer.h" #include "intel_pm.h" -static void i915_gem_flush_free_objects(struct drm_i915_private *i915); - -static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) -{ - if (obj->cache_dirty) - return false; - - if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) - return true; - - return obj->pin_global; /* currently in use by HW, keep flushed */ -} - static int insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size) @@ -88,25 +75,6 @@ remove_mappable_node(struct drm_mm_node *node) drm_mm_remove_node(node); } -/* some bookkeeping */ -static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, - u64 size) -{ - spin_lock(&dev_priv->mm.object_stat_lock); - dev_priv->mm.object_count++; - dev_priv->mm.object_memory += size; - spin_unlock(&dev_priv->mm.object_stat_lock); -} - -static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, - u64 size) -{ - spin_lock(&dev_priv->mm.object_stat_lock); - dev_priv->mm.object_count--; - dev_priv->mm.object_memory -= size; - spin_unlock(&dev_priv->mm.object_stat_lock); -} - int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file) @@ -207,32 +175,6 @@ err_phys: return err; } -static void __start_cpu_write(struct drm_i915_gem_object *obj) -{ - obj->read_domains = I915_GEM_DOMAIN_CPU; - obj->write_domain = I915_GEM_DOMAIN_CPU; - if (cpu_write_needs_clflush(obj)) - obj->cache_dirty = true; -} - -void -__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, - struct sg_table *pages, - bool needs_clflush) -{ - GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); - - if (obj->mm.madv == I915_MADV_DONTNEED) - obj->mm.dirty = false; - - if (needs_clflush && - (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && - !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) - drm_clflush_sg(pages); - - __start_cpu_write(obj); -} - static void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, struct sg_table *pages) @@ -284,8 +226,6 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { .release = i915_gem_object_release_phys, }; -static const struct drm_i915_gem_object_ops i915_gem_object_ops; - int i915_gem_object_unbind(struct drm_i915_gem_object *obj) { struct i915_vma *vma; @@ -542,7 +482,7 @@ i915_gem_create(struct drm_file *file, return -EINVAL; /* Allocate the new object */ - obj = i915_gem_object_create(dev_priv, size); + obj = i915_gem_object_create_shmem(dev_priv, size); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -2092,52 +2032,6 @@ void __i915_gem_object_truncate(struct drm_i915_gem_object *obj) obj->mm.pages = ERR_PTR(-EFAULT); } -/* - * Move pages to appropriate lru and release the pagevec, decrementing the - * ref count of those pages. - */ -static void check_release_pagevec(struct pagevec *pvec) -{ - check_move_unevictable_pages(pvec); - __pagevec_release(pvec); - cond_resched(); -} - -static void -i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, - struct sg_table *pages) -{ - struct sgt_iter sgt_iter; - struct pagevec pvec; - struct page *page; - - __i915_gem_object_release_shmem(obj, pages, true); - i915_gem_gtt_finish_pages(obj, pages); - - if (i915_gem_object_needs_bit17_swizzle(obj)) - i915_gem_object_save_bit_17_swizzle(obj, pages); - - mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping); - - pagevec_init(&pvec); - for_each_sgt_page(page, sgt_iter, pages) { - if (obj->mm.dirty) - set_page_dirty(page); - - if (obj->mm.madv == I915_MADV_WILLNEED) - mark_page_accessed(page); - - if (!pagevec_add(&pvec, page)) - check_release_pagevec(&pvec); - } - if (pagevec_count(&pvec)) - check_release_pagevec(&pvec); - obj->mm.dirty = false; - - sg_free_table(pages); - kfree(pages); -} - static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) { struct radix_tree_iter iter; @@ -2253,196 +2147,6 @@ bool i915_sg_trim(struct sg_table *orig_st) return true; } -static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - const unsigned long page_count = obj->base.size / PAGE_SIZE; - unsigned long i; - struct address_space *mapping; - struct sg_table *st; - struct scatterlist *sg; - struct sgt_iter sgt_iter; - struct page *page; - unsigned long last_pfn = 0; /* suppress gcc warning */ - unsigned int max_segment = i915_sg_segment_size(); - unsigned int sg_page_sizes; - struct pagevec pvec; - gfp_t noreclaim; - int ret; - - /* - * Assert that the object is not currently in any GPU domain. As it - * wasn't in the GTT, there shouldn't be any way it could have been in - * a GPU cache - */ - GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); - GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); - - /* - * If there's no chance of allocating enough pages for the whole - * object, bail early. - */ - if (page_count > totalram_pages()) - return -ENOMEM; - - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (st == NULL) - return -ENOMEM; - -rebuild_st: - if (sg_alloc_table(st, page_count, GFP_KERNEL)) { - kfree(st); - return -ENOMEM; - } - - /* - * Get the list of pages out of our struct file. They'll be pinned - * at this point until we release them. - * - * Fail silently without starting the shrinker - */ - mapping = obj->base.filp->f_mapping; - mapping_set_unevictable(mapping); - noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); - noreclaim |= __GFP_NORETRY | __GFP_NOWARN; - - sg = st->sgl; - st->nents = 0; - sg_page_sizes = 0; - for (i = 0; i < page_count; i++) { - const unsigned int shrink[] = { - I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE, - 0, - }, *s = shrink; - gfp_t gfp = noreclaim; - - do { - cond_resched(); - page = shmem_read_mapping_page_gfp(mapping, i, gfp); - if (!IS_ERR(page)) - break; - - if (!*s) { - ret = PTR_ERR(page); - goto err_sg; - } - - i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++); - - /* - * We've tried hard to allocate the memory by reaping - * our own buffer, now let the real VM do its job and - * go down in flames if truly OOM. - * - * However, since graphics tend to be disposable, - * defer the oom here by reporting the ENOMEM back - * to userspace. - */ - if (!*s) { - /* reclaim and warn, but no oom */ - gfp = mapping_gfp_mask(mapping); - - /* - * Our bo are always dirty and so we require - * kswapd to reclaim our pages (direct reclaim - * does not effectively begin pageout of our - * buffers on its own). However, direct reclaim - * only waits for kswapd when under allocation - * congestion. So as a result __GFP_RECLAIM is - * unreliable and fails to actually reclaim our - * dirty pages -- unless you try over and over - * again with !__GFP_NORETRY. However, we still - * want to fail this allocation rather than - * trigger the out-of-memory killer and for - * this we want __GFP_RETRY_MAYFAIL. - */ - gfp |= __GFP_RETRY_MAYFAIL; - } - } while (1); - - if (!i || - sg->length >= max_segment || - page_to_pfn(page) != last_pfn + 1) { - if (i) { - sg_page_sizes |= sg->length; - sg = sg_next(sg); - } - st->nents++; - sg_set_page(sg, page, PAGE_SIZE, 0); - } else { - sg->length += PAGE_SIZE; - } - last_pfn = page_to_pfn(page); - - /* Check that the i965g/gm workaround works. */ - WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); - } - if (sg) { /* loop terminated early; short sg table */ - sg_page_sizes |= sg->length; - sg_mark_end(sg); - } - - /* Trim unused sg entries to avoid wasting memory. */ - i915_sg_trim(st); - - ret = i915_gem_gtt_prepare_pages(obj, st); - if (ret) { - /* - * DMA remapping failed? One possible cause is that - * it could not reserve enough large entries, asking - * for PAGE_SIZE chunks instead may be helpful. - */ - if (max_segment > PAGE_SIZE) { - for_each_sgt_page(page, sgt_iter, st) - put_page(page); - sg_free_table(st); - - max_segment = PAGE_SIZE; - goto rebuild_st; - } else { - dev_warn(&dev_priv->drm.pdev->dev, - "Failed to DMA remap %lu pages\n", - page_count); - goto err_pages; - } - } - - if (i915_gem_object_needs_bit17_swizzle(obj)) - i915_gem_object_do_bit_17_swizzle(obj, st); - - __i915_gem_object_set_pages(obj, st, sg_page_sizes); - - return 0; - -err_sg: - sg_mark_end(sg); -err_pages: - mapping_clear_unevictable(mapping); - pagevec_init(&pvec); - for_each_sgt_page(page, sgt_iter, st) { - if (!pagevec_add(&pvec, page)) - check_release_pagevec(&pvec); - } - if (pagevec_count(&pvec)) - check_release_pagevec(&pvec); - sg_free_table(st); - kfree(st); - - /* - * shmemfs first checks if there is enough memory to allocate the page - * and reports ENOSPC should there be insufficient, along with the usual - * ENOMEM for a genuine allocation failure. - * - * We use ENOSPC in our driver to mean that we have run out of aperture - * space and so want to translate the error from shmemfs back to our - * usual understanding of ENOMEM. - */ - if (ret == -ENOSPC) - ret = -ENOMEM; - - return ret; -} - void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct sg_table *pages, unsigned int sg_page_sizes) @@ -2689,133 +2393,6 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, } } -static int -i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, - const struct drm_i915_gem_pwrite *arg) -{ - struct address_space *mapping = obj->base.filp->f_mapping; - char __user *user_data = u64_to_user_ptr(arg->data_ptr); - u64 remain, offset; - unsigned int pg; - - /* Caller already validated user args */ - GEM_BUG_ON(!access_ok(user_data, arg->size)); - - /* - * Before we instantiate/pin the backing store for our use, we - * can prepopulate the shmemfs filp efficiently using a write into - * the pagecache. We avoid the penalty of instantiating all the - * pages, important if the user is just writing to a few and never - * uses the object on the GPU, and using a direct write into shmemfs - * allows it to avoid the cost of retrieving a page (either swapin - * or clearing-before-use) before it is overwritten. - */ - if (i915_gem_object_has_pages(obj)) - return -ENODEV; - - if (obj->mm.madv != I915_MADV_WILLNEED) - return -EFAULT; - - /* - * Before the pages are instantiated the object is treated as being - * in the CPU domain. The pages will be clflushed as required before - * use, and we can freely write into the pages directly. If userspace - * races pwrite with any other operation; corruption will ensue - - * that is userspace's prerogative! - */ - - remain = arg->size; - offset = arg->offset; - pg = offset_in_page(offset); - - do { - unsigned int len, unwritten; - struct page *page; - void *data, *vaddr; - int err; - char c; - - len = PAGE_SIZE - pg; - if (len > remain) - len = remain; - - /* Prefault the user page to reduce potential recursion */ - err = __get_user(c, user_data); - if (err) - return err; - - err = __get_user(c, user_data + len - 1); - if (err) - return err; - - err = pagecache_write_begin(obj->base.filp, mapping, - offset, len, 0, - &page, &data); - if (err < 0) - return err; - - vaddr = kmap_atomic(page); - unwritten = __copy_from_user_inatomic(vaddr + pg, - user_data, - len); - kunmap_atomic(vaddr); - - err = pagecache_write_end(obj->base.filp, mapping, - offset, len, len - unwritten, - page, data); - if (err < 0) - return err; - - /* We don't handle -EFAULT, leave it to the caller to check */ - if (unwritten) - return -ENODEV; - - remain -= len; - user_data += len; - offset += len; - pg = 0; - } while (remain); - - return 0; -} - -void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) -{ - struct drm_i915_private *i915 = to_i915(gem->dev); - struct drm_i915_gem_object *obj = to_intel_bo(gem); - struct drm_i915_file_private *fpriv = file->driver_priv; - struct i915_lut_handle *lut, *ln; - - mutex_lock(&i915->drm.struct_mutex); - - list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { - struct i915_gem_context *ctx = lut->ctx; - struct i915_vma *vma; - - GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF)); - if (ctx->file_priv != fpriv) - continue; - - vma = radix_tree_delete(&ctx->handles_vma, lut->handle); - GEM_BUG_ON(vma->obj != obj); - - /* We allow the process to have multiple handles to the same - * vma, in the same fd namespace, by virtue of flink/open. - */ - GEM_BUG_ON(!vma->open_count); - if (!--vma->open_count && !i915_vma_is_ggtt(vma)) - i915_vma_close(vma); - - list_del(&lut->obj_link); - list_del(&lut->ctx_link); - - i915_lut_handle_free(lut); - __i915_gem_object_release_unless_active(obj); - } - - mutex_unlock(&i915->drm.struct_mutex); -} - static unsigned long to_wait_timeout(s64 timeout_ns) { if (timeout_ns < 0) @@ -3814,348 +3391,6 @@ out: return err; } -static void -frontbuffer_retire(struct i915_active_request *active, - struct i915_request *request) -{ - struct drm_i915_gem_object *obj = - container_of(active, typeof(*obj), frontbuffer_write); - - intel_fb_obj_flush(obj, ORIGIN_CS); -} - -void i915_gem_object_init(struct drm_i915_gem_object *obj, - const struct drm_i915_gem_object_ops *ops) -{ - mutex_init(&obj->mm.lock); - - spin_lock_init(&obj->vma.lock); - INIT_LIST_HEAD(&obj->vma.list); - - INIT_LIST_HEAD(&obj->lut_list); - INIT_LIST_HEAD(&obj->batch_pool_link); - - init_rcu_head(&obj->rcu); - - obj->ops = ops; - - reservation_object_init(&obj->__builtin_resv); - obj->resv = &obj->__builtin_resv; - - obj->frontbuffer_ggtt_origin = ORIGIN_GTT; - i915_active_request_init(&obj->frontbuffer_write, - NULL, frontbuffer_retire); - - obj->mm.madv = I915_MADV_WILLNEED; - INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); - mutex_init(&obj->mm.get_page.lock); - - i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); -} - -static const struct drm_i915_gem_object_ops i915_gem_object_ops = { - .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | - I915_GEM_OBJECT_IS_SHRINKABLE, - - .get_pages = i915_gem_object_get_pages_gtt, - .put_pages = i915_gem_object_put_pages_gtt, - - .pwrite = i915_gem_object_pwrite_gtt, -}; - -static int i915_gem_object_create_shmem(struct drm_device *dev, - struct drm_gem_object *obj, - size_t size) -{ - struct drm_i915_private *i915 = to_i915(dev); - unsigned long flags = VM_NORESERVE; - struct file *filp; - - drm_gem_private_object_init(dev, obj, size); - - if (i915->mm.gemfs) - filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, - flags); - else - filp = shmem_file_setup("i915", size, flags); - - if (IS_ERR(filp)) - return PTR_ERR(filp); - - obj->filp = filp; - - return 0; -} - -struct drm_i915_gem_object * -i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) -{ - struct drm_i915_gem_object *obj; - struct address_space *mapping; - unsigned int cache_level; - gfp_t mask; - int ret; - - /* There is a prevalence of the assumption that we fit the object's - * page count inside a 32bit _signed_ variable. Let's document this and - * catch if we ever need to fix it. In the meantime, if you do spot - * such a local variable, please consider fixing! - */ - if (size >> PAGE_SHIFT > INT_MAX) - return ERR_PTR(-E2BIG); - - if (overflows_type(size, obj->base.size)) - return ERR_PTR(-E2BIG); - - obj = i915_gem_object_alloc(); - if (obj == NULL) - return ERR_PTR(-ENOMEM); - - ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size); - if (ret) - goto fail; - - mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; - if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) { - /* 965gm cannot relocate objects above 4GiB. */ - mask &= ~__GFP_HIGHMEM; - mask |= __GFP_DMA32; - } - - mapping = obj->base.filp->f_mapping; - mapping_set_gfp_mask(mapping, mask); - GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); - - i915_gem_object_init(obj, &i915_gem_object_ops); - - obj->write_domain = I915_GEM_DOMAIN_CPU; - obj->read_domains = I915_GEM_DOMAIN_CPU; - - if (HAS_LLC(dev_priv)) - /* On some devices, we can have the GPU use the LLC (the CPU - * cache) for about a 10% performance improvement - * compared to uncached. Graphics requests other than - * display scanout are coherent with the CPU in - * accessing this cache. This means in this mode we - * don't need to clflush on the CPU side, and on the - * GPU side we only need to flush internal caches to - * get data visible to the CPU. - * - * However, we maintain the display planes as UC, and so - * need to rebind when first used as such. - */ - cache_level = I915_CACHE_LLC; - else - cache_level = I915_CACHE_NONE; - - i915_gem_object_set_cache_coherency(obj, cache_level); - - trace_i915_gem_object_create(obj); - - return obj; - -fail: - i915_gem_object_free(obj); - return ERR_PTR(ret); -} - -static bool discard_backing_storage(struct drm_i915_gem_object *obj) -{ - /* If we are the last user of the backing storage (be it shmemfs - * pages or stolen etc), we know that the pages are going to be - * immediately released. In this case, we can then skip copying - * back the contents from the GPU. - */ - - if (obj->mm.madv != I915_MADV_WILLNEED) - return false; - - if (obj->base.filp == NULL) - return true; - - /* At first glance, this looks racy, but then again so would be - * userspace racing mmap against close. However, the first external - * reference to the filp can only be obtained through the - * i915_gem_mmap_ioctl() which safeguards us against the user - * acquiring such a reference whilst we are in the middle of - * freeing the object. - */ - return file_count(obj->base.filp) == 1; -} - -static void __i915_gem_free_objects(struct drm_i915_private *i915, - struct llist_node *freed) -{ - struct drm_i915_gem_object *obj, *on; - intel_wakeref_t wakeref; - - wakeref = intel_runtime_pm_get(i915); - llist_for_each_entry_safe(obj, on, freed, freed) { - struct i915_vma *vma, *vn; - - trace_i915_gem_object_destroy(obj); - - mutex_lock(&i915->drm.struct_mutex); - - GEM_BUG_ON(i915_gem_object_is_active(obj)); - list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) { - GEM_BUG_ON(i915_vma_is_active(vma)); - vma->flags &= ~I915_VMA_PIN_MASK; - i915_vma_destroy(vma); - } - GEM_BUG_ON(!list_empty(&obj->vma.list)); - GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree)); - - /* This serializes freeing with the shrinker. Since the free - * is delayed, first by RCU then by the workqueue, we want the - * shrinker to be able to free pages of unreferenced objects, - * or else we may oom whilst there are plenty of deferred - * freed objects. - */ - if (i915_gem_object_has_pages(obj)) { - spin_lock(&i915->mm.obj_lock); - list_del_init(&obj->mm.link); - spin_unlock(&i915->mm.obj_lock); - } - - mutex_unlock(&i915->drm.struct_mutex); - - GEM_BUG_ON(obj->bind_count); - GEM_BUG_ON(obj->userfault_count); - GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); - GEM_BUG_ON(!list_empty(&obj->lut_list)); - - if (obj->ops->release) - obj->ops->release(obj); - - if (WARN_ON(i915_gem_object_has_pinned_pages(obj))) - atomic_set(&obj->mm.pages_pin_count, 0); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); - GEM_BUG_ON(i915_gem_object_has_pages(obj)); - - if (obj->base.import_attach) - drm_prime_gem_destroy(&obj->base, NULL); - - reservation_object_fini(&obj->__builtin_resv); - drm_gem_object_release(&obj->base); - i915_gem_info_remove_obj(i915, obj->base.size); - - bitmap_free(obj->bit_17); - i915_gem_object_free(obj); - - GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); - atomic_dec(&i915->mm.free_count); - - if (on) - cond_resched(); - } - intel_runtime_pm_put(i915, wakeref); -} - -static void i915_gem_flush_free_objects(struct drm_i915_private *i915) -{ - struct llist_node *freed; - - /* Free the oldest, most stale object to keep the free_list short */ - freed = NULL; - if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */ - /* Only one consumer of llist_del_first() allowed */ - spin_lock(&i915->mm.free_lock); - freed = llist_del_first(&i915->mm.free_list); - spin_unlock(&i915->mm.free_lock); - } - if (unlikely(freed)) { - freed->next = NULL; - __i915_gem_free_objects(i915, freed); - } -} - -static void __i915_gem_free_work(struct work_struct *work) -{ - struct drm_i915_private *i915 = - container_of(work, struct drm_i915_private, mm.free_work); - struct llist_node *freed; - - /* - * All file-owned VMA should have been released by this point through - * i915_gem_close_object(), or earlier by i915_gem_context_close(). - * However, the object may also be bound into the global GTT (e.g. - * older GPUs without per-process support, or for direct access through - * the GTT either for the user or for scanout). Those VMA still need to - * unbound now. - */ - - spin_lock(&i915->mm.free_lock); - while ((freed = llist_del_all(&i915->mm.free_list))) { - spin_unlock(&i915->mm.free_lock); - - __i915_gem_free_objects(i915, freed); - if (need_resched()) - return; - - spin_lock(&i915->mm.free_lock); - } - spin_unlock(&i915->mm.free_lock); -} - -static void __i915_gem_free_object_rcu(struct rcu_head *head) -{ - struct drm_i915_gem_object *obj = - container_of(head, typeof(*obj), rcu); - struct drm_i915_private *i915 = to_i915(obj->base.dev); - - /* - * We reuse obj->rcu for the freed list, so we had better not treat - * it like a rcu_head from this point forwards. And we expect all - * objects to be freed via this path. - */ - destroy_rcu_head(&obj->rcu); - - /* - * Since we require blocking on struct_mutex to unbind the freed - * object from the GPU before releasing resources back to the - * system, we can not do that directly from the RCU callback (which may - * be a softirq context), but must instead then defer that work onto a - * kthread. We use the RCU callback rather than move the freed object - * directly onto the work queue so that we can mix between using the - * worker and performing frees directly from subsequent allocations for - * crude but effective memory throttling. - */ - if (llist_add(&obj->freed, &i915->mm.free_list)) - queue_work(i915->wq, &i915->mm.free_work); -} - -void i915_gem_free_object(struct drm_gem_object *gem_obj) -{ - struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); - - if (obj->mm.quirked) - __i915_gem_object_unpin_pages(obj); - - if (discard_backing_storage(obj)) - obj->mm.madv = I915_MADV_DONTNEED; - - /* - * Before we free the object, make sure any pure RCU-only - * read-side critical sections are complete, e.g. - * i915_gem_busy_ioctl(). For the corresponding synchronized - * lookup see i915_gem_object_lookup_rcu(). - */ - atomic_inc(&to_i915(obj->base.dev)->mm.free_count); - call_rcu(&obj->rcu, __i915_gem_free_object_rcu); -} - -void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) -{ - lockdep_assert_held(&obj->base.dev->struct_mutex); - - if (!i915_gem_object_has_active_reference(obj) && - i915_gem_object_is_active(obj)) - i915_gem_object_set_active_reference(obj); - else - i915_gem_object_put(obj); -} - void i915_gem_sanitize(struct drm_i915_private *i915) { intel_wakeref_t wakeref; @@ -4756,7 +3991,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915) INIT_LIST_HEAD(&i915->mm.userfault_list); intel_wakeref_auto_init(&i915->mm.userfault_wakeref, i915); - INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); + i915_gem_init__objects(i915); } int i915_gem_init_early(struct drm_i915_private *dev_priv) @@ -4922,57 +4157,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, } } -/* Allocate a new GEM object and fill it with the supplied data */ -struct drm_i915_gem_object * -i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, - const void *data, size_t size) -{ - struct drm_i915_gem_object *obj; - struct file *file; - size_t offset; - int err; - - obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE)); - if (IS_ERR(obj)) - return obj; - - GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); - - file = obj->base.filp; - offset = 0; - do { - unsigned int len = min_t(typeof(size), size, PAGE_SIZE); - struct page *page; - void *pgdata, *vaddr; - - err = pagecache_write_begin(file, file->f_mapping, - offset, len, 0, - &page, &pgdata); - if (err < 0) - goto fail; - - vaddr = kmap(page); - memcpy(vaddr, data, len); - kunmap(page); - - err = pagecache_write_end(file, file->f_mapping, - offset, len, len, - page, pgdata); - if (err < 0) - goto fail; - - size -= len; - data += len; - offset += len; - } while (size); - - return obj; - -fail: - i915_gem_object_put(obj); - return ERR_PTR(err); -} - struct scatterlist * i915_gem_object_get_sg(struct drm_i915_gem_object *obj, unsigned int n, @@ -5147,7 +4331,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) if (obj->ops == &i915_gem_phys_ops) return 0; - if (obj->ops != &i915_gem_object_ops) + if (obj->ops != &i915_gem_shmem_ops) return -EINVAL; err = i915_gem_object_unbind(obj); @@ -5183,12 +4367,12 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) __i915_gem_object_pin_pages(obj); if (!IS_ERR_OR_NULL(pages)) - i915_gem_object_ops.put_pages(obj, pages); + i915_gem_shmem_ops.put_pages(obj, pages); mutex_unlock(&obj->mm.lock); return 0; err_xfer: - obj->ops = &i915_gem_object_ops; + obj->ops = &i915_gem_shmem_ops; if (!IS_ERR_OR_NULL(pages)) { unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index c4995d5a16d2..379fd89a180f 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1510,7 +1510,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv) BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); - bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE); + bo = i915_gem_object_create_shmem(dev_priv, OA_BUFFER_SIZE); if (IS_ERR(bo)) { DRM_ERROR("Failed to allocate OA buffer\n"); ret = PTR_ERR(bo); diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 89db71996148..0d3a6fa674e6 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -144,7 +144,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, if (size * 2 < dev_priv->stolen_usable_size) obj = i915_gem_object_create_stolen(dev_priv, size); if (obj == NULL) - obj = i915_gem_object_create(dev_priv, size); + obj = i915_gem_object_create_shmem(dev_priv, size); if (IS_ERR(obj)) { DRM_ERROR("failed to allocate framebuffer\n"); ret = PTR_ERR(obj); diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index cbe4b8df15fd..b88c349c4fa6 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -695,7 +695,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) u64 flags; int ret; - obj = i915_gem_object_create(dev_priv, size); + obj = i915_gem_object_create_shmem(dev_priv, size); if (IS_ERR(obj)) return ERR_CAST(obj); diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c index eca741a857a5..ec1e8c4deb4d 100644 --- a/drivers/gpu/drm/i915/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/intel_uc_fw.c @@ -159,7 +159,8 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, goto fail; } - obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size); + obj = i915_gem_object_create_shmem_from_data(dev_priv, + fw->data, fw->size); if (IS_ERR(obj)) { err = PTR_ERR(obj); DRM_DEBUG_DRIVER("%s fw object_create err=%d\n", diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 1e1f83326a96..ce4ec87698f6 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -1390,7 +1390,7 @@ static int igt_ppgtt_gemfs_huge(void *arg) for (i = 0; i < ARRAY_SIZE(sizes); ++i) { unsigned int size = sizes[i]; - obj = i915_gem_object_create(i915, size); + obj = i915_gem_object_create_shmem(i915, size); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -1568,7 +1568,7 @@ static int igt_tmpfs_fallback(void *arg) i915->mm.gemfs = NULL; - obj = i915_gem_object_create(i915, PAGE_SIZE); + obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out_restore; @@ -1628,7 +1628,7 @@ static int igt_shrink_thp(void *arg) return 0; } - obj = i915_gem_object_create(i915, SZ_2M); + obj = i915_gem_object_create_shmem(i915, SZ_2M); if (IS_ERR(obj)) return PTR_ERR(obj); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c index 2b943ee246c9..cc65a503e2f0 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c @@ -33,7 +33,7 @@ static int igt_dmabuf_export(void *arg) struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; - obj = i915_gem_object_create(i915, PAGE_SIZE); + obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -57,7 +57,7 @@ static int igt_dmabuf_import_self(void *arg) struct dma_buf *dmabuf; int err; - obj = i915_gem_object_create(i915, PAGE_SIZE); + obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -232,7 +232,7 @@ static int igt_dmabuf_export_vmap(void *arg) void *ptr; int err; - obj = i915_gem_object_create(i915, PAGE_SIZE); + obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -279,7 +279,7 @@ static int igt_dmabuf_export_kmap(void *arg) void *ptr; int err; - obj = i915_gem_object_create(i915, 2*PAGE_SIZE); + obj = i915_gem_object_create_shmem(i915, 2 * PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index b926d1cd165d..a0f59fb0d701 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -36,7 +36,7 @@ static int igt_gem_object(void *arg) /* Basic test to ensure we can create an object */ - obj = i915_gem_object_create(i915, PAGE_SIZE); + obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); pr_err("i915_gem_object_create failed, err=%d\n", err); @@ -59,7 +59,7 @@ static int igt_phys_object(void *arg) * i.e. exercise the i915_gem_object_phys API. */ - obj = i915_gem_object_create(i915, PAGE_SIZE); + obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); pr_err("i915_gem_object_create failed, err=%d\n", err); -- cgit v1.2.3 From f033428db28bdff19105e6050de77f857dabf5b8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:46 +0100 Subject: drm/i915: Move phys objects to its own file Continuing the decluttering of i915_gem.c, this time the legacy physical object. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 2 + drivers/gpu/drm/i915/gem/i915_gem_object.h | 11 +- drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 2 + drivers/gpu/drm/i915/gem/i915_gem_pages.c | 521 +++++++++++++++ drivers/gpu/drm/i915/gem/i915_gem_phys.c | 211 ++++++ drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 61 ++ drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c | 80 +++ drivers/gpu/drm/i915/i915_drv.h | 2 - drivers/gpu/drm/i915/i915_gem.c | 712 +-------------------- drivers/gpu/drm/i915/i915_gem_shrinker.c | 59 +- drivers/gpu/drm/i915/selftests/i915_gem_object.c | 54 -- .../gpu/drm/i915/selftests/i915_mock_selftests.h | 1 + 12 files changed, 895 insertions(+), 821 deletions(-) create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_pages.c create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_phys.c create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index ccf0520e8ad4..a584fff53e63 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -88,6 +88,8 @@ i915-y += $(gt-y) obj-y += gem/ gem-y += \ gem/i915_gem_object.o \ + gem/i915_gem_pages.o \ + gem/i915_gem_phys.o \ gem/i915_gem_shmem.o i915-y += \ $(gem-y) \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 05ef6b0076ae..88495530ebd9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -33,11 +33,17 @@ void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages, bool needs_clflush); +int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); + void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_flush_free_objects(struct drm_i915_private *i915); +struct sg_table * +__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); +void i915_gem_object_truncate(struct drm_i915_gem_object *obj); + /** * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle * @filp: DRM file private date @@ -236,6 +242,8 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct sg_table *pages, unsigned int sg_page_sizes); + +int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); static inline int __must_check @@ -291,7 +299,8 @@ enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, enum i915_mm_subclass subclass); -void __i915_gem_object_truncate(struct drm_i915_gem_object *obj); +void i915_gem_object_truncate(struct drm_i915_gem_object *obj); +void i915_gem_object_writeback(struct drm_i915_gem_object *obj); enum i915_map_type { I915_MAP_WB = 0, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index fe3b2a2775f7..df8e29ee3943 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -52,6 +52,8 @@ struct drm_i915_gem_object_ops { int (*get_pages)(struct drm_i915_gem_object *obj); void (*put_pages)(struct drm_i915_gem_object *obj, struct sg_table *pages); + void (*truncate)(struct drm_i915_gem_object *obj); + void (*writeback)(struct drm_i915_gem_object *obj); int (*pwrite)(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *arg); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c new file mode 100644 index 000000000000..3879b3669dea --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -0,0 +1,521 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2016 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_gem_object.h" + +void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages, + unsigned int sg_page_sizes) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + unsigned long supported = INTEL_INFO(i915)->page_sizes; + int i; + + lockdep_assert_held(&obj->mm.lock); + + /* Make the pages coherent with the GPU (flushing any swapin). */ + if (obj->cache_dirty) { + obj->write_domain = 0; + if (i915_gem_object_has_struct_page(obj)) + drm_clflush_sg(pages); + obj->cache_dirty = false; + } + + obj->mm.get_page.sg_pos = pages->sgl; + obj->mm.get_page.sg_idx = 0; + + obj->mm.pages = pages; + + if (i915_gem_object_is_tiled(obj) && + i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { + GEM_BUG_ON(obj->mm.quirked); + __i915_gem_object_pin_pages(obj); + obj->mm.quirked = true; + } + + GEM_BUG_ON(!sg_page_sizes); + obj->mm.page_sizes.phys = sg_page_sizes; + + /* + * Calculate the supported page-sizes which fit into the given + * sg_page_sizes. This will give us the page-sizes which we may be able + * to use opportunistically when later inserting into the GTT. For + * example if phys=2G, then in theory we should be able to use 1G, 2M, + * 64K or 4K pages, although in practice this will depend on a number of + * other factors. + */ + obj->mm.page_sizes.sg = 0; + for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { + if (obj->mm.page_sizes.phys & ~0u << i) + obj->mm.page_sizes.sg |= BIT(i); + } + GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); + + spin_lock(&i915->mm.obj_lock); + list_add(&obj->mm.link, &i915->mm.unbound_list); + spin_unlock(&i915->mm.obj_lock); +} + +int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) +{ + int err; + + if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { + DRM_DEBUG("Attempting to obtain a purgeable object\n"); + return -EFAULT; + } + + err = obj->ops->get_pages(obj); + GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj)); + + return err; +} + +/* Ensure that the associated pages are gathered from the backing storage + * and pinned into our object. i915_gem_object_pin_pages() may be called + * multiple times before they are released by a single call to + * i915_gem_object_unpin_pages() - once the pages are no longer referenced + * either as a result of memory pressure (reaping pages under the shrinker) + * or as the object is itself released. + */ +int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) +{ + int err; + + err = mutex_lock_interruptible(&obj->mm.lock); + if (err) + return err; + + if (unlikely(!i915_gem_object_has_pages(obj))) { + GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); + + err = ____i915_gem_object_get_pages(obj); + if (err) + goto unlock; + + smp_mb__before_atomic(); + } + atomic_inc(&obj->mm.pages_pin_count); + +unlock: + mutex_unlock(&obj->mm.lock); + return err; +} + +/* Immediately discard the backing storage */ +void i915_gem_object_truncate(struct drm_i915_gem_object *obj) +{ + drm_gem_free_mmap_offset(&obj->base); + if (obj->ops->truncate) + obj->ops->truncate(obj); +} + +/* Try to discard unwanted pages */ +void i915_gem_object_writeback(struct drm_i915_gem_object *obj) +{ + lockdep_assert_held(&obj->mm.lock); + GEM_BUG_ON(i915_gem_object_has_pages(obj)); + + if (obj->ops->writeback) + obj->ops->writeback(obj); +} + +static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) +{ + struct radix_tree_iter iter; + void __rcu **slot; + + rcu_read_lock(); + radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) + radix_tree_delete(&obj->mm.get_page.radix, iter.index); + rcu_read_unlock(); +} + +struct sg_table * +__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct sg_table *pages; + + pages = fetch_and_zero(&obj->mm.pages); + if (IS_ERR_OR_NULL(pages)) + return pages; + + spin_lock(&i915->mm.obj_lock); + list_del(&obj->mm.link); + spin_unlock(&i915->mm.obj_lock); + + if (obj->mm.mapping) { + void *ptr; + + ptr = page_mask_bits(obj->mm.mapping); + if (is_vmalloc_addr(ptr)) + vunmap(ptr); + else + kunmap(kmap_to_page(ptr)); + + obj->mm.mapping = NULL; + } + + __i915_gem_object_reset_page_iter(obj); + obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; + + return pages; +} + +int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, + enum i915_mm_subclass subclass) +{ + struct sg_table *pages; + int err; + + if (i915_gem_object_has_pinned_pages(obj)) + return -EBUSY; + + GEM_BUG_ON(obj->bind_count); + + /* May be called by shrinker from within get_pages() (on another bo) */ + mutex_lock_nested(&obj->mm.lock, subclass); + if (unlikely(atomic_read(&obj->mm.pages_pin_count))) { + err = -EBUSY; + goto unlock; + } + + /* + * ->put_pages might need to allocate memory for the bit17 swizzle + * array, hence protect them from being reaped by removing them from gtt + * lists early. + */ + pages = __i915_gem_object_unset_pages(obj); + + /* + * XXX Temporary hijinx to avoid updating all backends to handle + * NULL pages. In the future, when we have more asynchronous + * get_pages backends we should be better able to handle the + * cancellation of the async task in a more uniform manner. + */ + if (!pages && !i915_gem_object_needs_async_cancel(obj)) + pages = ERR_PTR(-EINVAL); + + if (!IS_ERR(pages)) + obj->ops->put_pages(obj, pages); + + err = 0; +unlock: + mutex_unlock(&obj->mm.lock); + + return err; +} + +/* The 'mapping' part of i915_gem_object_pin_map() below */ +static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, + enum i915_map_type type) +{ + unsigned long n_pages = obj->base.size >> PAGE_SHIFT; + struct sg_table *sgt = obj->mm.pages; + struct sgt_iter sgt_iter; + struct page *page; + struct page *stack_pages[32]; + struct page **pages = stack_pages; + unsigned long i = 0; + pgprot_t pgprot; + void *addr; + + /* A single page can always be kmapped */ + if (n_pages == 1 && type == I915_MAP_WB) + return kmap(sg_page(sgt->sgl)); + + if (n_pages > ARRAY_SIZE(stack_pages)) { + /* Too big for stack -- allocate temporary array instead */ + pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); + if (!pages) + return NULL; + } + + for_each_sgt_page(page, sgt_iter, sgt) + pages[i++] = page; + + /* Check that we have the expected number of pages */ + GEM_BUG_ON(i != n_pages); + + switch (type) { + default: + MISSING_CASE(type); + /* fallthrough to use PAGE_KERNEL anyway */ + case I915_MAP_WB: + pgprot = PAGE_KERNEL; + break; + case I915_MAP_WC: + pgprot = pgprot_writecombine(PAGE_KERNEL_IO); + break; + } + addr = vmap(pages, n_pages, 0, pgprot); + + if (pages != stack_pages) + kvfree(pages); + + return addr; +} + +/* get, pin, and map the pages of the object into kernel space */ +void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, + enum i915_map_type type) +{ + enum i915_map_type has_type; + bool pinned; + void *ptr; + int err; + + if (unlikely(!i915_gem_object_has_struct_page(obj))) + return ERR_PTR(-ENXIO); + + err = mutex_lock_interruptible(&obj->mm.lock); + if (err) + return ERR_PTR(err); + + pinned = !(type & I915_MAP_OVERRIDE); + type &= ~I915_MAP_OVERRIDE; + + if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { + if (unlikely(!i915_gem_object_has_pages(obj))) { + GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); + + err = ____i915_gem_object_get_pages(obj); + if (err) + goto err_unlock; + + smp_mb__before_atomic(); + } + atomic_inc(&obj->mm.pages_pin_count); + pinned = false; + } + GEM_BUG_ON(!i915_gem_object_has_pages(obj)); + + ptr = page_unpack_bits(obj->mm.mapping, &has_type); + if (ptr && has_type != type) { + if (pinned) { + err = -EBUSY; + goto err_unpin; + } + + if (is_vmalloc_addr(ptr)) + vunmap(ptr); + else + kunmap(kmap_to_page(ptr)); + + ptr = obj->mm.mapping = NULL; + } + + if (!ptr) { + ptr = i915_gem_object_map(obj, type); + if (!ptr) { + err = -ENOMEM; + goto err_unpin; + } + + obj->mm.mapping = page_pack_bits(ptr, type); + } + +out_unlock: + mutex_unlock(&obj->mm.lock); + return ptr; + +err_unpin: + atomic_dec(&obj->mm.pages_pin_count); +err_unlock: + ptr = ERR_PTR(err); + goto out_unlock; +} + +void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, + unsigned long offset, + unsigned long size) +{ + enum i915_map_type has_type; + void *ptr; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + GEM_BUG_ON(range_overflows_t(typeof(obj->base.size), + offset, size, obj->base.size)); + + obj->mm.dirty = true; + + if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) + return; + + ptr = page_unpack_bits(obj->mm.mapping, &has_type); + if (has_type == I915_MAP_WC) + return; + + drm_clflush_virt_range(ptr + offset, size); + if (size == obj->base.size) { + obj->write_domain &= ~I915_GEM_DOMAIN_CPU; + obj->cache_dirty = false; + } +} + +struct scatterlist * +i915_gem_object_get_sg(struct drm_i915_gem_object *obj, + unsigned int n, + unsigned int *offset) +{ + struct i915_gem_object_page_iter *iter = &obj->mm.get_page; + struct scatterlist *sg; + unsigned int idx, count; + + might_sleep(); + GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + + /* As we iterate forward through the sg, we record each entry in a + * radixtree for quick repeated (backwards) lookups. If we have seen + * this index previously, we will have an entry for it. + * + * Initial lookup is O(N), but this is amortized to O(1) for + * sequential page access (where each new request is consecutive + * to the previous one). Repeated lookups are O(lg(obj->base.size)), + * i.e. O(1) with a large constant! + */ + if (n < READ_ONCE(iter->sg_idx)) + goto lookup; + + mutex_lock(&iter->lock); + + /* We prefer to reuse the last sg so that repeated lookup of this + * (or the subsequent) sg are fast - comparing against the last + * sg is faster than going through the radixtree. + */ + + sg = iter->sg_pos; + idx = iter->sg_idx; + count = __sg_page_count(sg); + + while (idx + count <= n) { + void *entry; + unsigned long i; + int ret; + + /* If we cannot allocate and insert this entry, or the + * individual pages from this range, cancel updating the + * sg_idx so that on this lookup we are forced to linearly + * scan onwards, but on future lookups we will try the + * insertion again (in which case we need to be careful of + * the error return reporting that we have already inserted + * this index). + */ + ret = radix_tree_insert(&iter->radix, idx, sg); + if (ret && ret != -EEXIST) + goto scan; + + entry = xa_mk_value(idx); + for (i = 1; i < count; i++) { + ret = radix_tree_insert(&iter->radix, idx + i, entry); + if (ret && ret != -EEXIST) + goto scan; + } + + idx += count; + sg = ____sg_next(sg); + count = __sg_page_count(sg); + } + +scan: + iter->sg_pos = sg; + iter->sg_idx = idx; + + mutex_unlock(&iter->lock); + + if (unlikely(n < idx)) /* insertion completed by another thread */ + goto lookup; + + /* In case we failed to insert the entry into the radixtree, we need + * to look beyond the current sg. + */ + while (idx + count <= n) { + idx += count; + sg = ____sg_next(sg); + count = __sg_page_count(sg); + } + + *offset = n - idx; + return sg; + +lookup: + rcu_read_lock(); + + sg = radix_tree_lookup(&iter->radix, n); + GEM_BUG_ON(!sg); + + /* If this index is in the middle of multi-page sg entry, + * the radix tree will contain a value entry that points + * to the start of that range. We will return the pointer to + * the base page and the offset of this page within the + * sg entry's range. + */ + *offset = 0; + if (unlikely(xa_is_value(sg))) { + unsigned long base = xa_to_value(sg); + + sg = radix_tree_lookup(&iter->radix, base); + GEM_BUG_ON(!sg); + + *offset = n - base; + } + + rcu_read_unlock(); + + return sg; +} + +struct page * +i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) +{ + struct scatterlist *sg; + unsigned int offset; + + GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); + + sg = i915_gem_object_get_sg(obj, n, &offset); + return nth_page(sg_page(sg), offset); +} + +/* Like i915_gem_object_get_page(), but mark the returned page dirty */ +struct page * +i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, + unsigned int n) +{ + struct page *page; + + page = i915_gem_object_get_page(obj, n); + if (!obj->mm.dirty) + set_page_dirty(page); + + return page; +} + +dma_addr_t +i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, + unsigned long n, + unsigned int *len) +{ + struct scatterlist *sg; + unsigned int offset; + + sg = i915_gem_object_get_sg(obj, n, &offset); + + if (len) + *len = sg_dma_len(sg) - (offset << PAGE_SHIFT); + + return sg_dma_address(sg) + (offset << PAGE_SHIFT); +} + +dma_addr_t +i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, + unsigned long n) +{ + return i915_gem_object_get_dma_address_len(obj, n, NULL); +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c new file mode 100644 index 000000000000..1c0ce69f765b --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -0,0 +1,211 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2016 Intel Corporation + */ + +#include +#include +#include + +#include /* for drm_legacy.h! */ +#include +#include /* for drm_pci.h! */ +#include + +#include "i915_drv.h" +#include "i915_gem_object.h" + +static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) +{ + struct address_space *mapping = obj->base.filp->f_mapping; + struct drm_dma_handle *phys; + struct sg_table *st; + struct scatterlist *sg; + char *vaddr; + int i; + int err; + + if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) + return -EINVAL; + + /* Always aligning to the object size, allows a single allocation + * to handle all possible callers, and given typical object sizes, + * the alignment of the buddy allocation will naturally match. + */ + phys = drm_pci_alloc(obj->base.dev, + roundup_pow_of_two(obj->base.size), + roundup_pow_of_two(obj->base.size)); + if (!phys) + return -ENOMEM; + + vaddr = phys->vaddr; + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { + struct page *page; + char *src; + + page = shmem_read_mapping_page(mapping, i); + if (IS_ERR(page)) { + err = PTR_ERR(page); + goto err_phys; + } + + src = kmap_atomic(page); + memcpy(vaddr, src, PAGE_SIZE); + drm_clflush_virt_range(vaddr, PAGE_SIZE); + kunmap_atomic(src); + + put_page(page); + vaddr += PAGE_SIZE; + } + + i915_gem_chipset_flush(to_i915(obj->base.dev)); + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) { + err = -ENOMEM; + goto err_phys; + } + + if (sg_alloc_table(st, 1, GFP_KERNEL)) { + kfree(st); + err = -ENOMEM; + goto err_phys; + } + + sg = st->sgl; + sg->offset = 0; + sg->length = obj->base.size; + + sg_dma_address(sg) = phys->busaddr; + sg_dma_len(sg) = obj->base.size; + + obj->phys_handle = phys; + + __i915_gem_object_set_pages(obj, st, sg->length); + + return 0; + +err_phys: + drm_pci_free(obj->base.dev, phys); + + return err; +} + +static void +i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + __i915_gem_object_release_shmem(obj, pages, false); + + if (obj->mm.dirty) { + struct address_space *mapping = obj->base.filp->f_mapping; + char *vaddr = obj->phys_handle->vaddr; + int i; + + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { + struct page *page; + char *dst; + + page = shmem_read_mapping_page(mapping, i); + if (IS_ERR(page)) + continue; + + dst = kmap_atomic(page); + drm_clflush_virt_range(vaddr, PAGE_SIZE); + memcpy(dst, vaddr, PAGE_SIZE); + kunmap_atomic(dst); + + set_page_dirty(page); + if (obj->mm.madv == I915_MADV_WILLNEED) + mark_page_accessed(page); + put_page(page); + vaddr += PAGE_SIZE; + } + obj->mm.dirty = false; + } + + sg_free_table(pages); + kfree(pages); + + drm_pci_free(obj->base.dev, obj->phys_handle); +} + +static void +i915_gem_object_release_phys(struct drm_i915_gem_object *obj) +{ + i915_gem_object_unpin_pages(obj); +} + +static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { + .get_pages = i915_gem_object_get_pages_phys, + .put_pages = i915_gem_object_put_pages_phys, + .release = i915_gem_object_release_phys, +}; + +int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) +{ + struct sg_table *pages; + int err; + + if (align > obj->base.size) + return -EINVAL; + + if (obj->ops == &i915_gem_phys_ops) + return 0; + + if (obj->ops != &i915_gem_shmem_ops) + return -EINVAL; + + err = i915_gem_object_unbind(obj); + if (err) + return err; + + mutex_lock(&obj->mm.lock); + + if (obj->mm.madv != I915_MADV_WILLNEED) { + err = -EFAULT; + goto err_unlock; + } + + if (obj->mm.quirked) { + err = -EFAULT; + goto err_unlock; + } + + if (obj->mm.mapping) { + err = -EBUSY; + goto err_unlock; + } + + pages = __i915_gem_object_unset_pages(obj); + + obj->ops = &i915_gem_phys_ops; + + err = ____i915_gem_object_get_pages(obj); + if (err) + goto err_xfer; + + /* Perma-pin (until release) the physical set of pages */ + __i915_gem_object_pin_pages(obj); + + if (!IS_ERR_OR_NULL(pages)) + i915_gem_shmem_ops.put_pages(obj, pages); + mutex_unlock(&obj->mm.lock); + return 0; + +err_xfer: + obj->ops = &i915_gem_shmem_ops; + if (!IS_ERR_OR_NULL(pages)) { + unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl); + + __i915_gem_object_set_pages(obj, pages, sg_page_sizes); + } +err_unlock: + mutex_unlock(&obj->mm.lock); + return err; +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/i915_gem_phys.c" +#endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index e2721bd9ab44..568164ca66fd 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -213,6 +213,65 @@ err_pages: return ret; } +static void +shmem_truncate(struct drm_i915_gem_object *obj) +{ + /* + * Our goal here is to return as much of the memory as + * is possible back to the system as we are called from OOM. + * To do this we must instruct the shmfs to drop all of its + * backing pages, *now*. + */ + shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); + obj->mm.madv = __I915_MADV_PURGED; + obj->mm.pages = ERR_PTR(-EFAULT); +} + +static void +shmem_writeback(struct drm_i915_gem_object *obj) +{ + struct address_space *mapping; + struct writeback_control wbc = { + .sync_mode = WB_SYNC_NONE, + .nr_to_write = SWAP_CLUSTER_MAX, + .range_start = 0, + .range_end = LLONG_MAX, + .for_reclaim = 1, + }; + unsigned long i; + + /* + * Leave mmapings intact (GTT will have been revoked on unbinding, + * leaving only CPU mmapings around) and add those pages to the LRU + * instead of invoking writeback so they are aged and paged out + * as normal. + */ + mapping = obj->base.filp->f_mapping; + + /* Begin writeback on each dirty page */ + for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) { + struct page *page; + + page = find_lock_entry(mapping, i); + if (!page || xa_is_value(page)) + continue; + + if (!page_mapped(page) && clear_page_dirty_for_io(page)) { + int ret; + + SetPageReclaim(page); + ret = mapping->a_ops->writepage(page, &wbc); + if (!PageWriteback(page)) + ClearPageReclaim(page); + if (!ret) + goto put; + } + unlock_page(page); +put: + put_page(page); + } +} + void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages, @@ -362,6 +421,8 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { .get_pages = shmem_get_pages, .put_pages = shmem_put_pages, + .truncate = shmem_truncate, + .writeback = shmem_writeback, .pwrite = shmem_pwrite, }; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c new file mode 100644 index 000000000000..ed64012e5d24 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#include "i915_selftest.h" + +#include "selftests/mock_gem_device.h" + +static int mock_phys_object(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + int err; + + /* Create an object and bind it to a contiguous set of physical pages, + * i.e. exercise the i915_gem_object_phys API. + */ + + obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + pr_err("i915_gem_object_create failed, err=%d\n", err); + goto out; + } + + mutex_lock(&i915->drm.struct_mutex); + err = i915_gem_object_attach_phys(obj, PAGE_SIZE); + mutex_unlock(&i915->drm.struct_mutex); + if (err) { + pr_err("i915_gem_object_attach_phys failed, err=%d\n", err); + goto out_obj; + } + + if (obj->ops != &i915_gem_phys_ops) { + pr_err("i915_gem_object_attach_phys did not create a phys object\n"); + err = -EINVAL; + goto out_obj; + } + + if (!atomic_read(&obj->mm.pages_pin_count)) { + pr_err("i915_gem_object_attach_phys did not pin its phys pages\n"); + err = -EINVAL; + goto out_obj; + } + + /* Make the object dirty so that put_pages must do copy back the data */ + mutex_lock(&i915->drm.struct_mutex); + err = i915_gem_object_set_to_gtt_domain(obj, true); + mutex_unlock(&i915->drm.struct_mutex); + if (err) { + pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n", + err); + goto out_obj; + } + +out_obj: + i915_gem_object_put(obj); +out: + return err; +} + +int i915_gem_phys_mock_selftests(void) +{ + static const struct i915_subtest tests[] = { + SUBTEST(mock_phys_object), + }; + struct drm_i915_private *i915; + int err; + + i915 = mock_gem_device(); + if (!i915) + return -ENOMEM; + + err = i915_subtests(tests, i915); + + drm_dev_put(&i915->drm); + return err; +} diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 507dba5dae9a..0f018fd58a60 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2903,8 +2903,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view, unsigned int flags); void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); -int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, - int align); int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 98ac2bbb6534..6f3e9394e806 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -26,7 +26,6 @@ */ #include -#include #include #include #include @@ -99,133 +98,6 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, return 0; } -static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) -{ - struct address_space *mapping = obj->base.filp->f_mapping; - drm_dma_handle_t *phys; - struct sg_table *st; - struct scatterlist *sg; - char *vaddr; - int i; - int err; - - if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) - return -EINVAL; - - /* Always aligning to the object size, allows a single allocation - * to handle all possible callers, and given typical object sizes, - * the alignment of the buddy allocation will naturally match. - */ - phys = drm_pci_alloc(obj->base.dev, - roundup_pow_of_two(obj->base.size), - roundup_pow_of_two(obj->base.size)); - if (!phys) - return -ENOMEM; - - vaddr = phys->vaddr; - for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { - struct page *page; - char *src; - - page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) { - err = PTR_ERR(page); - goto err_phys; - } - - src = kmap_atomic(page); - memcpy(vaddr, src, PAGE_SIZE); - drm_clflush_virt_range(vaddr, PAGE_SIZE); - kunmap_atomic(src); - - put_page(page); - vaddr += PAGE_SIZE; - } - - i915_gem_chipset_flush(to_i915(obj->base.dev)); - - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) { - err = -ENOMEM; - goto err_phys; - } - - if (sg_alloc_table(st, 1, GFP_KERNEL)) { - kfree(st); - err = -ENOMEM; - goto err_phys; - } - - sg = st->sgl; - sg->offset = 0; - sg->length = obj->base.size; - - sg_dma_address(sg) = phys->busaddr; - sg_dma_len(sg) = obj->base.size; - - obj->phys_handle = phys; - - __i915_gem_object_set_pages(obj, st, sg->length); - - return 0; - -err_phys: - drm_pci_free(obj->base.dev, phys); - - return err; -} - -static void -i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, - struct sg_table *pages) -{ - __i915_gem_object_release_shmem(obj, pages, false); - - if (obj->mm.dirty) { - struct address_space *mapping = obj->base.filp->f_mapping; - char *vaddr = obj->phys_handle->vaddr; - int i; - - for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { - struct page *page; - char *dst; - - page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) - continue; - - dst = kmap_atomic(page); - drm_clflush_virt_range(vaddr, PAGE_SIZE); - memcpy(dst, vaddr, PAGE_SIZE); - kunmap_atomic(dst); - - set_page_dirty(page); - if (obj->mm.madv == I915_MADV_WILLNEED) - mark_page_accessed(page); - put_page(page); - vaddr += PAGE_SIZE; - } - obj->mm.dirty = false; - } - - sg_free_table(pages); - kfree(pages); - - drm_pci_free(obj->base.dev, obj->phys_handle); -} - -static void -i915_gem_object_release_phys(struct drm_i915_gem_object *obj) -{ - i915_gem_object_unpin_pages(obj); -} - -static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { - .get_pages = i915_gem_object_get_pages_phys, - .put_pages = i915_gem_object_put_pages_phys, - .release = i915_gem_object_release_phys, -}; - int i915_gem_object_unbind(struct drm_i915_gem_object *obj) { struct i915_vma *vma; @@ -1964,11 +1836,6 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) return err; } -static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) -{ - drm_gem_free_mmap_offset(&obj->base); -} - int i915_gem_mmap_gtt(struct drm_file *file, struct drm_device *dev, @@ -2014,111 +1881,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); } -/* Immediately discard the backing storage */ -void __i915_gem_object_truncate(struct drm_i915_gem_object *obj) -{ - i915_gem_object_free_mmap_offset(obj); - - if (obj->base.filp == NULL) - return; - - /* Our goal here is to return as much of the memory as - * is possible back to the system as we are called from OOM. - * To do this we must instruct the shmfs to drop all of its - * backing pages, *now*. - */ - shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); - obj->mm.madv = __I915_MADV_PURGED; - obj->mm.pages = ERR_PTR(-EFAULT); -} - -static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) -{ - struct radix_tree_iter iter; - void __rcu **slot; - - rcu_read_lock(); - radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) - radix_tree_delete(&obj->mm.get_page.radix, iter.index); - rcu_read_unlock(); -} - -static struct sg_table * -__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct sg_table *pages; - - pages = fetch_and_zero(&obj->mm.pages); - if (IS_ERR_OR_NULL(pages)) - return pages; - - spin_lock(&i915->mm.obj_lock); - list_del(&obj->mm.link); - spin_unlock(&i915->mm.obj_lock); - - if (obj->mm.mapping) { - void *ptr; - - ptr = page_mask_bits(obj->mm.mapping); - if (is_vmalloc_addr(ptr)) - vunmap(ptr); - else - kunmap(kmap_to_page(ptr)); - - obj->mm.mapping = NULL; - } - - __i915_gem_object_reset_page_iter(obj); - obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; - - return pages; -} - -int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, - enum i915_mm_subclass subclass) -{ - struct sg_table *pages; - int ret; - - if (i915_gem_object_has_pinned_pages(obj)) - return -EBUSY; - - GEM_BUG_ON(obj->bind_count); - - /* May be called by shrinker from within get_pages() (on another bo) */ - mutex_lock_nested(&obj->mm.lock, subclass); - if (unlikely(atomic_read(&obj->mm.pages_pin_count))) { - ret = -EBUSY; - goto unlock; - } - - /* - * ->put_pages might need to allocate memory for the bit17 swizzle - * array, hence protect them from being reaped by removing them from gtt - * lists early. - */ - pages = __i915_gem_object_unset_pages(obj); - - /* - * XXX Temporary hijinx to avoid updating all backends to handle - * NULL pages. In the future, when we have more asynchronous - * get_pages backends we should be better able to handle the - * cancellation of the async task in a more uniform manner. - */ - if (!pages && !i915_gem_object_needs_async_cancel(obj)) - pages = ERR_PTR(-EINVAL); - - if (!IS_ERR(pages)) - obj->ops->put_pages(obj, pages); - - ret = 0; -unlock: - mutex_unlock(&obj->mm.lock); - - return ret; -} - bool i915_sg_trim(struct sg_table *orig_st) { struct sg_table new_st; @@ -2147,252 +1909,6 @@ bool i915_sg_trim(struct sg_table *orig_st) return true; } -void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, - struct sg_table *pages, - unsigned int sg_page_sizes) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - unsigned long supported = INTEL_INFO(i915)->page_sizes; - int i; - - lockdep_assert_held(&obj->mm.lock); - - /* Make the pages coherent with the GPU (flushing any swapin). */ - if (obj->cache_dirty) { - obj->write_domain = 0; - if (i915_gem_object_has_struct_page(obj)) - drm_clflush_sg(pages); - obj->cache_dirty = false; - } - - obj->mm.get_page.sg_pos = pages->sgl; - obj->mm.get_page.sg_idx = 0; - - obj->mm.pages = pages; - - if (i915_gem_object_is_tiled(obj) && - i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { - GEM_BUG_ON(obj->mm.quirked); - __i915_gem_object_pin_pages(obj); - obj->mm.quirked = true; - } - - GEM_BUG_ON(!sg_page_sizes); - obj->mm.page_sizes.phys = sg_page_sizes; - - /* - * Calculate the supported page-sizes which fit into the given - * sg_page_sizes. This will give us the page-sizes which we may be able - * to use opportunistically when later inserting into the GTT. For - * example if phys=2G, then in theory we should be able to use 1G, 2M, - * 64K or 4K pages, although in practice this will depend on a number of - * other factors. - */ - obj->mm.page_sizes.sg = 0; - for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { - if (obj->mm.page_sizes.phys & ~0u << i) - obj->mm.page_sizes.sg |= BIT(i); - } - GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); - - spin_lock(&i915->mm.obj_lock); - list_add(&obj->mm.link, &i915->mm.unbound_list); - spin_unlock(&i915->mm.obj_lock); -} - -static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) -{ - int err; - - if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { - DRM_DEBUG("Attempting to obtain a purgeable object\n"); - return -EFAULT; - } - - err = obj->ops->get_pages(obj); - GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj)); - - return err; -} - -/* Ensure that the associated pages are gathered from the backing storage - * and pinned into our object. i915_gem_object_pin_pages() may be called - * multiple times before they are released by a single call to - * i915_gem_object_unpin_pages() - once the pages are no longer referenced - * either as a result of memory pressure (reaping pages under the shrinker) - * or as the object is itself released. - */ -int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) -{ - int err; - - err = mutex_lock_interruptible(&obj->mm.lock); - if (err) - return err; - - if (unlikely(!i915_gem_object_has_pages(obj))) { - GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); - - err = ____i915_gem_object_get_pages(obj); - if (err) - goto unlock; - - smp_mb__before_atomic(); - } - atomic_inc(&obj->mm.pages_pin_count); - -unlock: - mutex_unlock(&obj->mm.lock); - return err; -} - -/* The 'mapping' part of i915_gem_object_pin_map() below */ -static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, - enum i915_map_type type) -{ - unsigned long n_pages = obj->base.size >> PAGE_SHIFT; - struct sg_table *sgt = obj->mm.pages; - struct sgt_iter sgt_iter; - struct page *page; - struct page *stack_pages[32]; - struct page **pages = stack_pages; - unsigned long i = 0; - pgprot_t pgprot; - void *addr; - - /* A single page can always be kmapped */ - if (n_pages == 1 && type == I915_MAP_WB) - return kmap(sg_page(sgt->sgl)); - - if (n_pages > ARRAY_SIZE(stack_pages)) { - /* Too big for stack -- allocate temporary array instead */ - pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); - if (!pages) - return NULL; - } - - for_each_sgt_page(page, sgt_iter, sgt) - pages[i++] = page; - - /* Check that we have the expected number of pages */ - GEM_BUG_ON(i != n_pages); - - switch (type) { - default: - MISSING_CASE(type); - /* fallthrough to use PAGE_KERNEL anyway */ - case I915_MAP_WB: - pgprot = PAGE_KERNEL; - break; - case I915_MAP_WC: - pgprot = pgprot_writecombine(PAGE_KERNEL_IO); - break; - } - addr = vmap(pages, n_pages, 0, pgprot); - - if (pages != stack_pages) - kvfree(pages); - - return addr; -} - -/* get, pin, and map the pages of the object into kernel space */ -void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, - enum i915_map_type type) -{ - enum i915_map_type has_type; - bool pinned; - void *ptr; - int ret; - - if (unlikely(!i915_gem_object_has_struct_page(obj))) - return ERR_PTR(-ENXIO); - - ret = mutex_lock_interruptible(&obj->mm.lock); - if (ret) - return ERR_PTR(ret); - - pinned = !(type & I915_MAP_OVERRIDE); - type &= ~I915_MAP_OVERRIDE; - - if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { - if (unlikely(!i915_gem_object_has_pages(obj))) { - GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); - - ret = ____i915_gem_object_get_pages(obj); - if (ret) - goto err_unlock; - - smp_mb__before_atomic(); - } - atomic_inc(&obj->mm.pages_pin_count); - pinned = false; - } - GEM_BUG_ON(!i915_gem_object_has_pages(obj)); - - ptr = page_unpack_bits(obj->mm.mapping, &has_type); - if (ptr && has_type != type) { - if (pinned) { - ret = -EBUSY; - goto err_unpin; - } - - if (is_vmalloc_addr(ptr)) - vunmap(ptr); - else - kunmap(kmap_to_page(ptr)); - - ptr = obj->mm.mapping = NULL; - } - - if (!ptr) { - ptr = i915_gem_object_map(obj, type); - if (!ptr) { - ret = -ENOMEM; - goto err_unpin; - } - - obj->mm.mapping = page_pack_bits(ptr, type); - } - -out_unlock: - mutex_unlock(&obj->mm.lock); - return ptr; - -err_unpin: - atomic_dec(&obj->mm.pages_pin_count); -err_unlock: - ptr = ERR_PTR(ret); - goto out_unlock; -} - -void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, - unsigned long offset, - unsigned long size) -{ - enum i915_map_type has_type; - void *ptr; - - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - GEM_BUG_ON(range_overflows_t(typeof(obj->base.size), - offset, size, obj->base.size)); - - obj->mm.dirty = true; - - if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) - return; - - ptr = page_unpack_bits(obj->mm.mapping, &has_type); - if (has_type == I915_MAP_WC) - return; - - drm_clflush_virt_range(ptr + offset, size); - if (size == obj->base.size) { - obj->write_domain &= ~I915_GEM_DOMAIN_CPU; - obj->cache_dirty = false; - } -} - static unsigned long to_wait_timeout(s64 timeout_ns) { if (timeout_ns < 0) @@ -3381,7 +2897,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, /* if the object is no longer attached, discard its backing storage */ if (obj->mm.madv == I915_MADV_DONTNEED && !i915_gem_object_has_pages(obj)) - __i915_gem_object_truncate(obj); + i915_gem_object_truncate(obj); args->retained = obj->mm.madv != __I915_MADV_PURGED; mutex_unlock(&obj->mm.lock); @@ -4157,232 +3673,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, } } -struct scatterlist * -i915_gem_object_get_sg(struct drm_i915_gem_object *obj, - unsigned int n, - unsigned int *offset) -{ - struct i915_gem_object_page_iter *iter = &obj->mm.get_page; - struct scatterlist *sg; - unsigned int idx, count; - - might_sleep(); - GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - - /* As we iterate forward through the sg, we record each entry in a - * radixtree for quick repeated (backwards) lookups. If we have seen - * this index previously, we will have an entry for it. - * - * Initial lookup is O(N), but this is amortized to O(1) for - * sequential page access (where each new request is consecutive - * to the previous one). Repeated lookups are O(lg(obj->base.size)), - * i.e. O(1) with a large constant! - */ - if (n < READ_ONCE(iter->sg_idx)) - goto lookup; - - mutex_lock(&iter->lock); - - /* We prefer to reuse the last sg so that repeated lookup of this - * (or the subsequent) sg are fast - comparing against the last - * sg is faster than going through the radixtree. - */ - - sg = iter->sg_pos; - idx = iter->sg_idx; - count = __sg_page_count(sg); - - while (idx + count <= n) { - void *entry; - unsigned long i; - int ret; - - /* If we cannot allocate and insert this entry, or the - * individual pages from this range, cancel updating the - * sg_idx so that on this lookup we are forced to linearly - * scan onwards, but on future lookups we will try the - * insertion again (in which case we need to be careful of - * the error return reporting that we have already inserted - * this index). - */ - ret = radix_tree_insert(&iter->radix, idx, sg); - if (ret && ret != -EEXIST) - goto scan; - - entry = xa_mk_value(idx); - for (i = 1; i < count; i++) { - ret = radix_tree_insert(&iter->radix, idx + i, entry); - if (ret && ret != -EEXIST) - goto scan; - } - - idx += count; - sg = ____sg_next(sg); - count = __sg_page_count(sg); - } - -scan: - iter->sg_pos = sg; - iter->sg_idx = idx; - - mutex_unlock(&iter->lock); - - if (unlikely(n < idx)) /* insertion completed by another thread */ - goto lookup; - - /* In case we failed to insert the entry into the radixtree, we need - * to look beyond the current sg. - */ - while (idx + count <= n) { - idx += count; - sg = ____sg_next(sg); - count = __sg_page_count(sg); - } - - *offset = n - idx; - return sg; - -lookup: - rcu_read_lock(); - - sg = radix_tree_lookup(&iter->radix, n); - GEM_BUG_ON(!sg); - - /* If this index is in the middle of multi-page sg entry, - * the radix tree will contain a value entry that points - * to the start of that range. We will return the pointer to - * the base page and the offset of this page within the - * sg entry's range. - */ - *offset = 0; - if (unlikely(xa_is_value(sg))) { - unsigned long base = xa_to_value(sg); - - sg = radix_tree_lookup(&iter->radix, base); - GEM_BUG_ON(!sg); - - *offset = n - base; - } - - rcu_read_unlock(); - - return sg; -} - -struct page * -i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) -{ - struct scatterlist *sg; - unsigned int offset; - - GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); - - sg = i915_gem_object_get_sg(obj, n, &offset); - return nth_page(sg_page(sg), offset); -} - -/* Like i915_gem_object_get_page(), but mark the returned page dirty */ -struct page * -i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, - unsigned int n) -{ - struct page *page; - - page = i915_gem_object_get_page(obj, n); - if (!obj->mm.dirty) - set_page_dirty(page); - - return page; -} - -dma_addr_t -i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, - unsigned long n, - unsigned int *len) -{ - struct scatterlist *sg; - unsigned int offset; - - sg = i915_gem_object_get_sg(obj, n, &offset); - - if (len) - *len = sg_dma_len(sg) - (offset << PAGE_SHIFT); - - return sg_dma_address(sg) + (offset << PAGE_SHIFT); -} - -dma_addr_t -i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, - unsigned long n) -{ - return i915_gem_object_get_dma_address_len(obj, n, NULL); -} - - -int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) -{ - struct sg_table *pages; - int err; - - if (align > obj->base.size) - return -EINVAL; - - if (obj->ops == &i915_gem_phys_ops) - return 0; - - if (obj->ops != &i915_gem_shmem_ops) - return -EINVAL; - - err = i915_gem_object_unbind(obj); - if (err) - return err; - - mutex_lock(&obj->mm.lock); - - if (obj->mm.madv != I915_MADV_WILLNEED) { - err = -EFAULT; - goto err_unlock; - } - - if (obj->mm.quirked) { - err = -EFAULT; - goto err_unlock; - } - - if (obj->mm.mapping) { - err = -EBUSY; - goto err_unlock; - } - - pages = __i915_gem_object_unset_pages(obj); - - obj->ops = &i915_gem_phys_ops; - - err = ____i915_gem_object_get_pages(obj); - if (err) - goto err_xfer; - - /* Perma-pin (until release) the physical set of pages */ - __i915_gem_object_pin_pages(obj); - - if (!IS_ERR_OR_NULL(pages)) - i915_gem_shmem_ops.put_pages(obj, pages); - mutex_unlock(&obj->mm.lock); - return 0; - -err_xfer: - obj->ops = &i915_gem_shmem_ops; - if (!IS_ERR_OR_NULL(pages)) { - unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl); - - __i915_gem_object_set_pages(obj, pages, sg_page_sizes); - } -err_unlock: - mutex_unlock(&obj->mm.lock); - return err; -} - #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/scatterlist.c" #include "selftests/mock_gem_device.c" diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 588e3898b120..2c7aefb3e101 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -114,65 +114,18 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) return !i915_gem_object_has_pages(obj); } -static void __start_writeback(struct drm_i915_gem_object *obj, - unsigned int flags) +static void try_to_writeback(struct drm_i915_gem_object *obj, + unsigned int flags) { - struct address_space *mapping; - struct writeback_control wbc = { - .sync_mode = WB_SYNC_NONE, - .nr_to_write = SWAP_CLUSTER_MAX, - .range_start = 0, - .range_end = LLONG_MAX, - .for_reclaim = 1, - }; - unsigned long i; - - lockdep_assert_held(&obj->mm.lock); - GEM_BUG_ON(i915_gem_object_has_pages(obj)); - switch (obj->mm.madv) { case I915_MADV_DONTNEED: - __i915_gem_object_truncate(obj); + i915_gem_object_truncate(obj); case __I915_MADV_PURGED: return; } - if (!obj->base.filp) - return; - - if (!(flags & I915_SHRINK_WRITEBACK)) - return; - - /* - * Leave mmapings intact (GTT will have been revoked on unbinding, - * leaving only CPU mmapings around) and add those pages to the LRU - * instead of invoking writeback so they are aged and paged out - * as normal. - */ - mapping = obj->base.filp->f_mapping; - - /* Begin writeback on each dirty page */ - for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) { - struct page *page; - - page = find_lock_entry(mapping, i); - if (!page || xa_is_value(page)) - continue; - - if (!page_mapped(page) && clear_page_dirty_for_io(page)) { - int ret; - - SetPageReclaim(page); - ret = mapping->a_ops->writepage(page, &wbc); - if (!PageWriteback(page)) - ClearPageReclaim(page); - if (!ret) - goto put; - } - unlock_page(page); -put: - put_page(page); - } + if (flags & I915_SHRINK_WRITEBACK) + i915_gem_object_writeback(obj); } /** @@ -315,7 +268,7 @@ i915_gem_shrink(struct drm_i915_private *i915, mutex_lock_nested(&obj->mm.lock, I915_MM_SHRINKER); if (!i915_gem_object_has_pages(obj)) { - __start_writeback(obj, flags); + try_to_writeback(obj, flags); count += obj->base.size >> PAGE_SHIFT; } mutex_unlock(&obj->mm.lock); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index a0f59fb0d701..b98a286a8be5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -49,59 +49,6 @@ out: return err; } -static int igt_phys_object(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj; - int err; - - /* Create an object and bind it to a contiguous set of physical pages, - * i.e. exercise the i915_gem_object_phys API. - */ - - obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - pr_err("i915_gem_object_create failed, err=%d\n", err); - goto out; - } - - mutex_lock(&i915->drm.struct_mutex); - err = i915_gem_object_attach_phys(obj, PAGE_SIZE); - mutex_unlock(&i915->drm.struct_mutex); - if (err) { - pr_err("i915_gem_object_attach_phys failed, err=%d\n", err); - goto out_obj; - } - - if (obj->ops != &i915_gem_phys_ops) { - pr_err("i915_gem_object_attach_phys did not create a phys object\n"); - err = -EINVAL; - goto out_obj; - } - - if (!atomic_read(&obj->mm.pages_pin_count)) { - pr_err("i915_gem_object_attach_phys did not pin its phys pages\n"); - err = -EINVAL; - goto out_obj; - } - - /* Make the object dirty so that put_pages must do copy back the data */ - mutex_lock(&i915->drm.struct_mutex); - err = i915_gem_object_set_to_gtt_domain(obj, true); - mutex_unlock(&i915->drm.struct_mutex); - if (err) { - pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n", - err); - goto out_obj; - } - -out_obj: - i915_gem_object_put(obj); -out: - return err; -} - static int igt_gem_huge(void *arg) { const unsigned int nreal = 509; /* just to be awkward */ @@ -631,7 +578,6 @@ int i915_gem_object_mock_selftests(void) { static const struct i915_subtest tests[] = { SUBTEST(igt_gem_object), - SUBTEST(igt_phys_object), }; struct drm_i915_private *i915; int err; diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h index 88e5ab586337..510eb176bb2c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h @@ -18,6 +18,7 @@ selftest(engine, intel_engine_cs_mock_selftests) selftest(timelines, i915_timeline_mock_selftests) selftest(requests, i915_request_mock_selftests) selftest(objects, i915_gem_object_mock_selftests) +selftest(phys, i915_gem_phys_mock_selftests) selftest(dmabuf, i915_gem_dmabuf_mock_selftests) selftest(vma, i915_vma_mock_selftests) selftest(evict, i915_gem_evict_mock_selftests) -- cgit v1.2.3 From b414fcd5be0b0045635ba18efbe04ed662d86fe8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:47 +0100 Subject: drm/i915: Move mmap and friends to its own file Continuing the decluttering of i915_gem.c, now the turn of do_mmap and the faulthandlers Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-6-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gem/i915_gem_mman.c | 507 ++++++++++++++++++ drivers/gpu/drm/i915/gem/i915_gem_object.c | 56 ++ drivers/gpu/drm/i915/gem/i915_gem_object.h | 7 + drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 503 ++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_gem.c | 564 +-------------------- drivers/gpu/drm/i915/i915_gem_tiling.c | 2 +- drivers/gpu/drm/i915/selftests/i915_gem_object.c | 487 ------------------ .../gpu/drm/i915/selftests/i915_live_selftests.h | 1 + 10 files changed, 1090 insertions(+), 1039 deletions(-) create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_mman.c create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index a584fff53e63..4c14628dc943 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -88,6 +88,7 @@ i915-y += $(gt-y) obj-y += gem/ gem-y += \ gem/i915_gem_object.o \ + gem/i915_gem_mman.o \ gem/i915_gem_pages.o \ gem/i915_gem_phys.o \ gem/i915_gem_shmem.o diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c new file mode 100644 index 000000000000..c7b9b34de01b --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -0,0 +1,507 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2016 Intel Corporation + */ + +#include +#include + +#include "i915_drv.h" +#include "i915_gem_gtt.h" +#include "i915_gem_ioctls.h" +#include "i915_gem_object.h" +#include "i915_vma.h" +#include "intel_drv.h" + +static inline bool +__vma_matches(struct vm_area_struct *vma, struct file *filp, + unsigned long addr, unsigned long size) +{ + if (vma->vm_file != filp) + return false; + + return vma->vm_start == addr && + (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); +} + +/** + * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address + * it is mapped to. + * @dev: drm device + * @data: ioctl data blob + * @file: drm file + * + * While the mapping holds a reference on the contents of the object, it doesn't + * imply a ref on the object itself. + * + * IMPORTANT: + * + * DRM driver writers who look a this function as an example for how to do GEM + * mmap support, please don't implement mmap support like here. The modern way + * to implement DRM mmap support is with an mmap offset ioctl (like + * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. + * That way debug tooling like valgrind will understand what's going on, hiding + * the mmap call in a driver private ioctl will break that. The i915 driver only + * does cpu mmaps this way because we didn't know better. + */ +int +i915_gem_mmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_mmap *args = data; + struct drm_i915_gem_object *obj; + unsigned long addr; + + if (args->flags & ~(I915_MMAP_WC)) + return -EINVAL; + + if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT)) + return -ENODEV; + + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; + + /* prime objects have no backing filp to GEM mmap + * pages from. + */ + if (!obj->base.filp) { + addr = -ENXIO; + goto err; + } + + if (range_overflows(args->offset, args->size, (u64)obj->base.size)) { + addr = -EINVAL; + goto err; + } + + addr = vm_mmap(obj->base.filp, 0, args->size, + PROT_READ | PROT_WRITE, MAP_SHARED, + args->offset); + if (IS_ERR_VALUE(addr)) + goto err; + + if (args->flags & I915_MMAP_WC) { + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + + if (down_write_killable(&mm->mmap_sem)) { + addr = -EINTR; + goto err; + } + vma = find_vma(mm, addr); + if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) + vma->vm_page_prot = + pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + else + addr = -ENOMEM; + up_write(&mm->mmap_sem); + if (IS_ERR_VALUE(addr)) + goto err; + + /* This may race, but that's ok, it only gets set */ + WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU); + } + i915_gem_object_put(obj); + + args->addr_ptr = (u64)addr; + return 0; + +err: + i915_gem_object_put(obj); + return addr; +} + +static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) +{ + return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; +} + +/** + * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps + * + * A history of the GTT mmap interface: + * + * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to + * aligned and suitable for fencing, and still fit into the available + * mappable space left by the pinned display objects. A classic problem + * we called the page-fault-of-doom where we would ping-pong between + * two objects that could not fit inside the GTT and so the memcpy + * would page one object in at the expense of the other between every + * single byte. + * + * 1 - Objects can be any size, and have any compatible fencing (X Y, or none + * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the + * object is too large for the available space (or simply too large + * for the mappable aperture!), a view is created instead and faulted + * into userspace. (This view is aligned and sized appropriately for + * fenced access.) + * + * 2 - Recognise WC as a separate cache domain so that we can flush the + * delayed writes via GTT before performing direct access via WC. + * + * 3 - Remove implicit set-domain(GTT) and synchronisation on initial + * pagefault; swapin remains transparent. + * + * Restrictions: + * + * * snoopable objects cannot be accessed via the GTT. It can cause machine + * hangs on some architectures, corruption on others. An attempt to service + * a GTT page fault from a snoopable object will generate a SIGBUS. + * + * * the object must be able to fit into RAM (physical memory, though no + * limited to the mappable aperture). + * + * + * Caveats: + * + * * a new GTT page fault will synchronize rendering from the GPU and flush + * all data to system memory. Subsequent access will not be synchronized. + * + * * all mappings are revoked on runtime device suspend. + * + * * there are only 8, 16 or 32 fence registers to share between all users + * (older machines require fence register for display and blitter access + * as well). Contention of the fence registers will cause the previous users + * to be unmapped and any new access will generate new page faults. + * + * * running out of memory while servicing a fault may generate a SIGBUS, + * rather than the expected SIGSEGV. + */ +int i915_gem_mmap_gtt_version(void) +{ + return 3; +} + +static inline struct i915_ggtt_view +compute_partial_view(const struct drm_i915_gem_object *obj, + pgoff_t page_offset, + unsigned int chunk) +{ + struct i915_ggtt_view view; + + if (i915_gem_object_is_tiled(obj)) + chunk = roundup(chunk, tile_row_pages(obj)); + + view.type = I915_GGTT_VIEW_PARTIAL; + view.partial.offset = rounddown(page_offset, chunk); + view.partial.size = + min_t(unsigned int, chunk, + (obj->base.size >> PAGE_SHIFT) - view.partial.offset); + + /* If the partial covers the entire object, just create a normal VMA. */ + if (chunk >= obj->base.size >> PAGE_SHIFT) + view.type = I915_GGTT_VIEW_NORMAL; + + return view; +} + +/** + * i915_gem_fault - fault a page into the GTT + * @vmf: fault info + * + * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped + * from userspace. The fault handler takes care of binding the object to + * the GTT (if needed), allocating and programming a fence register (again, + * only if needed based on whether the old reg is still valid or the object + * is tiled) and inserting a new PTE into the faulting process. + * + * Note that the faulting process may involve evicting existing objects + * from the GTT and/or fence registers to make room. So performance may + * suffer if the GTT working set is large or there are few fence registers + * left. + * + * The current feature set supported by i915_gem_fault() and thus GTT mmaps + * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). + */ +vm_fault_t i915_gem_fault(struct vm_fault *vmf) +{ +#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) + struct vm_area_struct *area = vmf->vma; + struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *i915 = to_i915(dev); + struct i915_ggtt *ggtt = &i915->ggtt; + bool write = area->vm_flags & VM_WRITE; + intel_wakeref_t wakeref; + struct i915_vma *vma; + pgoff_t page_offset; + int srcu; + int ret; + + /* Sanity check that we allow writing into this object */ + if (i915_gem_object_is_readonly(obj) && write) + return VM_FAULT_SIGBUS; + + /* We don't use vmf->pgoff since that has the fake offset */ + page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; + + trace_i915_gem_object_fault(obj, page_offset, true, write); + + ret = i915_gem_object_pin_pages(obj); + if (ret) + goto err; + + wakeref = intel_runtime_pm_get(i915); + + srcu = i915_reset_trylock(i915); + if (srcu < 0) { + ret = srcu; + goto err_rpm; + } + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto err_reset; + + /* Access to snoopable pages through the GTT is incoherent. */ + if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) { + ret = -EFAULT; + goto err_unlock; + } + + /* Now pin it into the GTT as needed */ + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, + PIN_MAPPABLE | + PIN_NONBLOCK | + PIN_NONFAULT); + if (IS_ERR(vma)) { + /* Use a partial view if it is bigger than available space */ + struct i915_ggtt_view view = + compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); + unsigned int flags; + + flags = PIN_MAPPABLE; + if (view.type == I915_GGTT_VIEW_NORMAL) + flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ + + /* + * Userspace is now writing through an untracked VMA, abandon + * all hope that the hardware is able to track future writes. + */ + obj->frontbuffer_ggtt_origin = ORIGIN_CPU; + + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); + if (IS_ERR(vma) && !view.type) { + flags = PIN_MAPPABLE; + view.type = I915_GGTT_VIEW_PARTIAL; + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); + } + } + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_unlock; + } + + ret = i915_vma_pin_fence(vma); + if (ret) + goto err_unpin; + + /* Finally, remap it using the new GTT offset */ + ret = remap_io_mapping(area, + area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), + (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, + min_t(u64, vma->size, area->vm_end - area->vm_start), + &ggtt->iomap); + if (ret) + goto err_fence; + + /* Mark as being mmapped into userspace for later revocation */ + assert_rpm_wakelock_held(i915); + if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) + list_add(&obj->userfault_link, &i915->mm.userfault_list); + if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) + intel_wakeref_auto(&i915->mm.userfault_wakeref, + msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); + GEM_BUG_ON(!obj->userfault_count); + + i915_vma_set_ggtt_write(vma); + +err_fence: + i915_vma_unpin_fence(vma); +err_unpin: + __i915_vma_unpin(vma); +err_unlock: + mutex_unlock(&dev->struct_mutex); +err_reset: + i915_reset_unlock(i915, srcu); +err_rpm: + intel_runtime_pm_put(i915, wakeref); + i915_gem_object_unpin_pages(obj); +err: + switch (ret) { + case -EIO: + /* + * We eat errors when the gpu is terminally wedged to avoid + * userspace unduly crashing (gl has no provisions for mmaps to + * fail). But any other -EIO isn't ours (e.g. swap in failure) + * and so needs to be reported. + */ + if (!i915_terminally_wedged(i915)) + return VM_FAULT_SIGBUS; + /* else: fall through */ + case -EAGAIN: + /* + * EAGAIN means the gpu is hung and we'll wait for the error + * handler to reset everything when re-faulting in + * i915_mutex_lock_interruptible. + */ + case 0: + case -ERESTARTSYS: + case -EINTR: + case -EBUSY: + /* + * EBUSY is ok: this just means that another thread + * already did the job. + */ + return VM_FAULT_NOPAGE; + case -ENOMEM: + return VM_FAULT_OOM; + case -ENOSPC: + case -EFAULT: + return VM_FAULT_SIGBUS; + default: + WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret); + return VM_FAULT_SIGBUS; + } +} + +void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) +{ + struct i915_vma *vma; + + GEM_BUG_ON(!obj->userfault_count); + + obj->userfault_count = 0; + list_del(&obj->userfault_link); + drm_vma_node_unmap(&obj->base.vma_node, + obj->base.dev->anon_inode->i_mapping); + + for_each_ggtt_vma(vma, obj) + i915_vma_unset_userfault(vma); +} + +/** + * i915_gem_object_release_mmap - remove physical page mappings + * @obj: obj in question + * + * Preserve the reservation of the mmapping with the DRM core code, but + * relinquish ownership of the pages back to the system. + * + * It is vital that we remove the page mapping if we have mapped a tiled + * object through the GTT and then lose the fence register due to + * resource pressure. Similarly if the object has been moved out of the + * aperture, than pages mapped into userspace must be revoked. Removing the + * mapping will then trigger a page fault on the next user access, allowing + * fixup by i915_gem_fault(). + */ +void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + intel_wakeref_t wakeref; + + /* Serialisation between user GTT access and our code depends upon + * revoking the CPU's PTE whilst the mutex is held. The next user + * pagefault then has to wait until we release the mutex. + * + * Note that RPM complicates somewhat by adding an additional + * requirement that operations to the GGTT be made holding the RPM + * wakeref. + */ + lockdep_assert_held(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + if (!obj->userfault_count) + goto out; + + __i915_gem_object_release_mmap(obj); + + /* Ensure that the CPU's PTE are revoked and there are not outstanding + * memory transactions from userspace before we return. The TLB + * flushing implied above by changing the PTE above *should* be + * sufficient, an extra barrier here just provides us with a bit + * of paranoid documentation about our requirement to serialise + * memory writes before touching registers / GSM. + */ + wmb(); + +out: + intel_runtime_pm_put(i915, wakeref); +} + +static int create_mmap_offset(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + int err; + + err = drm_gem_create_mmap_offset(&obj->base); + if (likely(!err)) + return 0; + + /* Attempt to reap some mmap space from dead objects */ + do { + err = i915_gem_wait_for_idle(i915, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); + if (err) + break; + + i915_gem_drain_freed_objects(i915); + err = drm_gem_create_mmap_offset(&obj->base); + if (!err) + break; + + } while (flush_delayed_work(&i915->gem.retire_work)); + + return err; +} + +int +i915_gem_mmap_gtt(struct drm_file *file, + struct drm_device *dev, + u32 handle, + u64 *offset) +{ + struct drm_i915_gem_object *obj; + int ret; + + obj = i915_gem_object_lookup(file, handle); + if (!obj) + return -ENOENT; + + ret = create_mmap_offset(obj); + if (ret == 0) + *offset = drm_vma_node_offset_addr(&obj->base.vma_node); + + i915_gem_object_put(obj); + return ret; +} + +/** + * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing + * @dev: DRM device + * @data: GTT mapping ioctl data + * @file: GEM object info + * + * Simply returns the fake offset to userspace so it can mmap it. + * The mmap call will end up in drm_gem_mmap(), which will set things + * up so we can get faults in the handler above. + * + * The fault handler will take care of binding the object into the GTT + * (since it may have been evicted to make room for something), allocating + * a fence register, and mapping the appropriate aperture address into + * userspace. + */ +int +i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_mmap_gtt *args = data; + + return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/i915_gem_mman.c" +#endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 3000b26b4bbf..4ed28ac9ab3a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -24,6 +24,7 @@ #include "i915_drv.h" #include "i915_gem_object.h" +#include "i915_gem_clflush.h" #include "i915_globals.h" #include "intel_frontbuffer.h" @@ -356,6 +357,61 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) i915_gem_object_put(obj); } +static inline enum fb_op_origin +fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain) +{ + return (domain == I915_GEM_DOMAIN_GTT ? + obj->frontbuffer_ggtt_origin : ORIGIN_CPU); +} + +static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) +{ + return !(obj->cache_level == I915_CACHE_NONE || + obj->cache_level == I915_CACHE_WT); +} + +void +i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, + unsigned int flush_domains) +{ + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct i915_vma *vma; + + if (!(obj->write_domain & flush_domains)) + return; + + switch (obj->write_domain) { + case I915_GEM_DOMAIN_GTT: + i915_gem_flush_ggtt_writes(dev_priv); + + intel_fb_obj_flush(obj, + fb_write_origin(obj, I915_GEM_DOMAIN_GTT)); + + for_each_ggtt_vma(vma, obj) { + if (vma->iomap) + continue; + + i915_vma_unset_ggtt_write(vma); + } + break; + + case I915_GEM_DOMAIN_WC: + wmb(); + break; + + case I915_GEM_DOMAIN_CPU: + i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); + break; + + case I915_GEM_DOMAIN_RENDER: + if (gpu_write_needs_clflush(obj)) + obj->cache_dirty = true; + break; + } + + obj->write_domain = 0; +} + void i915_gem_init__objects(struct drm_i915_private *i915) { INIT_WORK(&i915->mm.free_work, __i915_gem_free_work); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 88495530ebd9..07f487cbff79 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -351,6 +351,13 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) i915_gem_object_unpin_pages(obj); } +void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj); +void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj); + +void +i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, + unsigned int flush_domains); + static inline struct intel_engine_cs * i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) { diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c new file mode 100644 index 000000000000..87da01230179 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -0,0 +1,503 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#include + +#include "gt/intel_gt_pm.h" +#include "i915_selftest.h" +#include "selftests/huge_gem_object.h" +#include "selftests/igt_flush_test.h" + +struct tile { + unsigned int width; + unsigned int height; + unsigned int stride; + unsigned int size; + unsigned int tiling; + unsigned int swizzle; +}; + +static u64 swizzle_bit(unsigned int bit, u64 offset) +{ + return (offset & BIT_ULL(bit)) >> (bit - 6); +} + +static u64 tiled_offset(const struct tile *tile, u64 v) +{ + u64 x, y; + + if (tile->tiling == I915_TILING_NONE) + return v; + + y = div64_u64_rem(v, tile->stride, &x); + v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height; + + if (tile->tiling == I915_TILING_X) { + v += y * tile->width; + v += div64_u64_rem(x, tile->width, &x) << tile->size; + v += x; + } else if (tile->width == 128) { + const unsigned int ytile_span = 16; + const unsigned int ytile_height = 512; + + v += y * ytile_span; + v += div64_u64_rem(x, ytile_span, &x) * ytile_height; + v += x; + } else { + const unsigned int ytile_span = 32; + const unsigned int ytile_height = 256; + + v += y * ytile_span; + v += div64_u64_rem(x, ytile_span, &x) * ytile_height; + v += x; + } + + switch (tile->swizzle) { + case I915_BIT_6_SWIZZLE_9: + v ^= swizzle_bit(9, v); + break; + case I915_BIT_6_SWIZZLE_9_10: + v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v); + break; + case I915_BIT_6_SWIZZLE_9_11: + v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v); + break; + case I915_BIT_6_SWIZZLE_9_10_11: + v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v); + break; + } + + return v; +} + +static int check_partial_mapping(struct drm_i915_gem_object *obj, + const struct tile *tile, + unsigned long end_time) +{ + const unsigned int nreal = obj->scratch / PAGE_SIZE; + const unsigned long npages = obj->base.size / PAGE_SIZE; + struct i915_vma *vma; + unsigned long page; + int err; + + if (igt_timeout(end_time, + "%s: timed out before tiling=%d stride=%d\n", + __func__, tile->tiling, tile->stride)) + return -EINTR; + + err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); + if (err) { + pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", + tile->tiling, tile->stride, err); + return err; + } + + GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); + GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); + + for_each_prime_number_from(page, 1, npages) { + struct i915_ggtt_view view = + compute_partial_view(obj, page, MIN_CHUNK_PAGES); + u32 __iomem *io; + struct page *p; + unsigned int n; + u64 offset; + u32 *cpu; + + GEM_BUG_ON(view.partial.size > nreal); + cond_resched(); + + err = i915_gem_object_set_to_gtt_domain(obj, true); + if (err) { + pr_err("Failed to flush to GTT write domain; err=%d\n", + err); + return err; + } + + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); + if (IS_ERR(vma)) { + pr_err("Failed to pin partial view: offset=%lu; err=%d\n", + page, (int)PTR_ERR(vma)); + return PTR_ERR(vma); + } + + n = page - view.partial.offset; + GEM_BUG_ON(n >= view.partial.size); + + io = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + if (IS_ERR(io)) { + pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", + page, (int)PTR_ERR(io)); + return PTR_ERR(io); + } + + iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); + i915_vma_unpin_iomap(vma); + + offset = tiled_offset(tile, page << PAGE_SHIFT); + if (offset >= obj->base.size) + continue; + + i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); + + p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); + cpu = kmap(p) + offset_in_page(offset); + drm_clflush_virt_range(cpu, sizeof(*cpu)); + if (*cpu != (u32)page) { + pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n", + page, n, + view.partial.offset, + view.partial.size, + vma->size >> PAGE_SHIFT, + tile->tiling ? tile_row_pages(obj) : 0, + vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, + offset >> PAGE_SHIFT, + (unsigned int)offset_in_page(offset), + offset, + (u32)page, *cpu); + err = -EINVAL; + } + *cpu = 0; + drm_clflush_virt_range(cpu, sizeof(*cpu)); + kunmap(p); + if (err) + return err; + + i915_vma_destroy(vma); + } + + return 0; +} + +static int igt_partial_tiling(void *arg) +{ + const unsigned int nreal = 1 << 12; /* largest tile row x2 */ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; + int tiling; + int err; + + /* We want to check the page mapping and fencing of a large object + * mmapped through the GTT. The object we create is larger than can + * possibly be mmaped as a whole, and so we must use partial GGTT vma. + * We then check that a write through each partial GGTT vma ends up + * in the right set of pages within the object, and with the expected + * tiling, which we verify by manual swizzling. + */ + + obj = huge_gem_object(i915, + nreal << PAGE_SHIFT, + (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = i915_gem_object_pin_pages(obj); + if (err) { + pr_err("Failed to allocate %u pages (%lu total), err=%d\n", + nreal, obj->base.size / PAGE_SIZE, err); + goto out; + } + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + if (1) { + IGT_TIMEOUT(end); + struct tile tile; + + tile.height = 1; + tile.width = 1; + tile.size = 0; + tile.stride = 0; + tile.swizzle = I915_BIT_6_SWIZZLE_NONE; + tile.tiling = I915_TILING_NONE; + + err = check_partial_mapping(obj, &tile, end); + if (err && err != -EINTR) + goto out_unlock; + } + + for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) { + IGT_TIMEOUT(end); + unsigned int max_pitch; + unsigned int pitch; + struct tile tile; + + if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) + /* + * The swizzling pattern is actually unknown as it + * varies based on physical address of each page. + * See i915_gem_detect_bit_6_swizzle(). + */ + break; + + tile.tiling = tiling; + switch (tiling) { + case I915_TILING_X: + tile.swizzle = i915->mm.bit_6_swizzle_x; + break; + case I915_TILING_Y: + tile.swizzle = i915->mm.bit_6_swizzle_y; + break; + } + + GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN); + if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || + tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) + continue; + + if (INTEL_GEN(i915) <= 2) { + tile.height = 16; + tile.width = 128; + tile.size = 11; + } else if (tile.tiling == I915_TILING_Y && + HAS_128_BYTE_Y_TILING(i915)) { + tile.height = 32; + tile.width = 128; + tile.size = 12; + } else { + tile.height = 8; + tile.width = 512; + tile.size = 12; + } + + if (INTEL_GEN(i915) < 4) + max_pitch = 8192 / tile.width; + else if (INTEL_GEN(i915) < 7) + max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width; + else + max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width; + + for (pitch = max_pitch; pitch; pitch >>= 1) { + tile.stride = tile.width * pitch; + err = check_partial_mapping(obj, &tile, end); + if (err == -EINTR) + goto next_tiling; + if (err) + goto out_unlock; + + if (pitch > 2 && INTEL_GEN(i915) >= 4) { + tile.stride = tile.width * (pitch - 1); + err = check_partial_mapping(obj, &tile, end); + if (err == -EINTR) + goto next_tiling; + if (err) + goto out_unlock; + } + + if (pitch < max_pitch && INTEL_GEN(i915) >= 4) { + tile.stride = tile.width * (pitch + 1); + err = check_partial_mapping(obj, &tile, end); + if (err == -EINTR) + goto next_tiling; + if (err) + goto out_unlock; + } + } + + if (INTEL_GEN(i915) >= 4) { + for_each_prime_number(pitch, max_pitch) { + tile.stride = tile.width * pitch; + err = check_partial_mapping(obj, &tile, end); + if (err == -EINTR) + goto next_tiling; + if (err) + goto out_unlock; + } + } + +next_tiling: ; + } + +out_unlock: + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + i915_gem_object_unpin_pages(obj); +out: + i915_gem_object_put(obj); + return err; +} + +static int make_obj_busy(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_request *rq; + struct i915_vma *vma; + int err; + + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + return err; + + rq = i915_request_create(i915->engine[RCS0]->kernel_context); + if (IS_ERR(rq)) { + i915_vma_unpin(vma); + return PTR_ERR(rq); + } + + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + + i915_request_add(rq); + + __i915_gem_object_release_unless_active(obj); + i915_vma_unpin(vma); + + return err; +} + +static bool assert_mmap_offset(struct drm_i915_private *i915, + unsigned long size, + int expected) +{ + struct drm_i915_gem_object *obj; + int err; + + obj = i915_gem_object_create_internal(i915, size); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = create_mmap_offset(obj); + i915_gem_object_put(obj); + + return err == expected; +} + +static void disable_retire_worker(struct drm_i915_private *i915) +{ + i915_gem_shrinker_unregister(i915); + + intel_gt_pm_get(i915); + + cancel_delayed_work_sync(&i915->gem.retire_work); + flush_work(&i915->gem.idle_work); +} + +static void restore_retire_worker(struct drm_i915_private *i915) +{ + intel_gt_pm_put(i915); + + mutex_lock(&i915->drm.struct_mutex); + igt_flush_test(i915, I915_WAIT_LOCKED); + mutex_unlock(&i915->drm.struct_mutex); + + i915_gem_shrinker_register(i915); +} + +static int igt_mmap_offset_exhaustion(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm; + struct drm_i915_gem_object *obj; + struct drm_mm_node resv, *hole; + u64 hole_start, hole_end; + int loop, err; + + /* Disable background reaper */ + disable_retire_worker(i915); + GEM_BUG_ON(!i915->gt.awake); + + /* Trim the device mmap space to only a page */ + memset(&resv, 0, sizeof(resv)); + drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { + resv.start = hole_start; + resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */ + err = drm_mm_reserve_node(mm, &resv); + if (err) { + pr_err("Failed to trim VMA manager, err=%d\n", err); + goto out_park; + } + break; + } + + /* Just fits! */ + if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) { + pr_err("Unable to insert object into single page hole\n"); + err = -EINVAL; + goto out; + } + + /* Too large */ + if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) { + pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); + err = -EINVAL; + goto out; + } + + /* Fill the hole, further allocation attempts should then fail */ + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out; + } + + err = create_mmap_offset(obj); + if (err) { + pr_err("Unable to insert object into reclaimed hole\n"); + goto err_obj; + } + + if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) { + pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); + err = -EINVAL; + goto err_obj; + } + + i915_gem_object_put(obj); + + /* Now fill with busy dead objects that we expect to reap */ + for (loop = 0; loop < 3; loop++) { + if (i915_terminally_wedged(i915)) + break; + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out; + } + + mutex_lock(&i915->drm.struct_mutex); + err = make_obj_busy(obj); + mutex_unlock(&i915->drm.struct_mutex); + if (err) { + pr_err("[loop %d] Failed to busy the object\n", loop); + goto err_obj; + } + + /* NB we rely on the _active_ reference to access obj now */ + GEM_BUG_ON(!i915_gem_object_is_active(obj)); + err = create_mmap_offset(obj); + if (err) { + pr_err("[loop %d] create_mmap_offset failed with err=%d\n", + loop, err); + goto out; + } + } + +out: + drm_mm_remove_node(&resv); +out_park: + restore_retire_worker(i915); + return err; +err_obj: + i915_gem_object_put(obj); + goto out; +} + +int i915_gem_mman_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_partial_tiling), + SUBTEST(igt_mmap_offset_exhaustion), + }; + + return i915_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0f018fd58a60..584ebb901e18 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2806,7 +2806,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, u64 flags); int i915_gem_object_unbind(struct drm_i915_gem_object *obj); -void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6f3e9394e806..bde25d5326ba 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -404,12 +404,6 @@ i915_gem_dumb_create(struct drm_file *file, &args->size, &args->handle); } -static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) -{ - return !(obj->cache_level == I915_CACHE_NONE || - obj->cache_level == I915_CACHE_WT); -} - /** * Creates a new mm object and returns a handle to it. * @dev: drm device pointer @@ -429,13 +423,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, &args->size, &args->handle); } -static inline enum fb_op_origin -fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain) -{ - return (domain == I915_GEM_DOMAIN_GTT ? - obj->frontbuffer_ggtt_origin : ORIGIN_CPU); -} - void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) { intel_wakeref_t wakeref; @@ -475,47 +462,6 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) } } -static void -flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) -{ - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct i915_vma *vma; - - if (!(obj->write_domain & flush_domains)) - return; - - switch (obj->write_domain) { - case I915_GEM_DOMAIN_GTT: - i915_gem_flush_ggtt_writes(dev_priv); - - intel_fb_obj_flush(obj, - fb_write_origin(obj, I915_GEM_DOMAIN_GTT)); - - for_each_ggtt_vma(vma, obj) { - if (vma->iomap) - continue; - - i915_vma_unset_ggtt_write(vma); - } - break; - - case I915_GEM_DOMAIN_WC: - wmb(); - break; - - case I915_GEM_DOMAIN_CPU: - i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); - break; - - case I915_GEM_DOMAIN_RENDER: - if (gpu_write_needs_clflush(obj)) - obj->cache_dirty = true; - break; - } - - obj->write_domain = 0; -} - /* * Pins the specified object's pages and synchronizes the object with * GPU accesses. Sets needs_clflush to non-zero if the caller should @@ -552,7 +498,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, goto out; } - flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); + i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); /* If we're not in the cpu read domain, set ourself into the gtt * read domain and manually flush cachelines (if required). This @@ -604,7 +550,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, goto out; } - flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); + i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); /* If we're not in the cpu write domain, set ourself into the * gtt write domain and manually flush cachelines (as required). @@ -1207,6 +1153,13 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) spin_unlock(&i915->mm.obj_lock); } +static inline enum fb_op_origin +fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain) +{ + return (domain == I915_GEM_DOMAIN_GTT ? + obj->frontbuffer_ggtt_origin : ORIGIN_CPU); +} + /** * Called when user space prepares to use an object with the CPU, either * through the mmap ioctl's mapping or a GTT mapping. @@ -1350,423 +1303,6 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, return 0; } -static inline bool -__vma_matches(struct vm_area_struct *vma, struct file *filp, - unsigned long addr, unsigned long size) -{ - if (vma->vm_file != filp) - return false; - - return vma->vm_start == addr && - (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); -} - -/** - * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address - * it is mapped to. - * @dev: drm device - * @data: ioctl data blob - * @file: drm file - * - * While the mapping holds a reference on the contents of the object, it doesn't - * imply a ref on the object itself. - * - * IMPORTANT: - * - * DRM driver writers who look a this function as an example for how to do GEM - * mmap support, please don't implement mmap support like here. The modern way - * to implement DRM mmap support is with an mmap offset ioctl (like - * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. - * That way debug tooling like valgrind will understand what's going on, hiding - * the mmap call in a driver private ioctl will break that. The i915 driver only - * does cpu mmaps this way because we didn't know better. - */ -int -i915_gem_mmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_gem_mmap *args = data; - struct drm_i915_gem_object *obj; - unsigned long addr; - - if (args->flags & ~(I915_MMAP_WC)) - return -EINVAL; - - if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT)) - return -ENODEV; - - obj = i915_gem_object_lookup(file, args->handle); - if (!obj) - return -ENOENT; - - /* prime objects have no backing filp to GEM mmap - * pages from. - */ - if (!obj->base.filp) { - addr = -ENXIO; - goto err; - } - - if (range_overflows(args->offset, args->size, (u64)obj->base.size)) { - addr = -EINVAL; - goto err; - } - - addr = vm_mmap(obj->base.filp, 0, args->size, - PROT_READ | PROT_WRITE, MAP_SHARED, - args->offset); - if (IS_ERR_VALUE(addr)) - goto err; - - if (args->flags & I915_MMAP_WC) { - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - - if (down_write_killable(&mm->mmap_sem)) { - addr = -EINTR; - goto err; - } - vma = find_vma(mm, addr); - if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) - vma->vm_page_prot = - pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - else - addr = -ENOMEM; - up_write(&mm->mmap_sem); - if (IS_ERR_VALUE(addr)) - goto err; - - /* This may race, but that's ok, it only gets set */ - WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU); - } - i915_gem_object_put(obj); - - args->addr_ptr = (u64)addr; - return 0; - -err: - i915_gem_object_put(obj); - return addr; -} - -static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) -{ - return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; -} - -/** - * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps - * - * A history of the GTT mmap interface: - * - * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to - * aligned and suitable for fencing, and still fit into the available - * mappable space left by the pinned display objects. A classic problem - * we called the page-fault-of-doom where we would ping-pong between - * two objects that could not fit inside the GTT and so the memcpy - * would page one object in at the expense of the other between every - * single byte. - * - * 1 - Objects can be any size, and have any compatible fencing (X Y, or none - * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the - * object is too large for the available space (or simply too large - * for the mappable aperture!), a view is created instead and faulted - * into userspace. (This view is aligned and sized appropriately for - * fenced access.) - * - * 2 - Recognise WC as a separate cache domain so that we can flush the - * delayed writes via GTT before performing direct access via WC. - * - * 3 - Remove implicit set-domain(GTT) and synchronisation on initial - * pagefault; swapin remains transparent. - * - * Restrictions: - * - * * snoopable objects cannot be accessed via the GTT. It can cause machine - * hangs on some architectures, corruption on others. An attempt to service - * a GTT page fault from a snoopable object will generate a SIGBUS. - * - * * the object must be able to fit into RAM (physical memory, though no - * limited to the mappable aperture). - * - * - * Caveats: - * - * * a new GTT page fault will synchronize rendering from the GPU and flush - * all data to system memory. Subsequent access will not be synchronized. - * - * * all mappings are revoked on runtime device suspend. - * - * * there are only 8, 16 or 32 fence registers to share between all users - * (older machines require fence register for display and blitter access - * as well). Contention of the fence registers will cause the previous users - * to be unmapped and any new access will generate new page faults. - * - * * running out of memory while servicing a fault may generate a SIGBUS, - * rather than the expected SIGSEGV. - */ -int i915_gem_mmap_gtt_version(void) -{ - return 3; -} - -static inline struct i915_ggtt_view -compute_partial_view(const struct drm_i915_gem_object *obj, - pgoff_t page_offset, - unsigned int chunk) -{ - struct i915_ggtt_view view; - - if (i915_gem_object_is_tiled(obj)) - chunk = roundup(chunk, tile_row_pages(obj)); - - view.type = I915_GGTT_VIEW_PARTIAL; - view.partial.offset = rounddown(page_offset, chunk); - view.partial.size = - min_t(unsigned int, chunk, - (obj->base.size >> PAGE_SHIFT) - view.partial.offset); - - /* If the partial covers the entire object, just create a normal VMA. */ - if (chunk >= obj->base.size >> PAGE_SHIFT) - view.type = I915_GGTT_VIEW_NORMAL; - - return view; -} - -/** - * i915_gem_fault - fault a page into the GTT - * @vmf: fault info - * - * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped - * from userspace. The fault handler takes care of binding the object to - * the GTT (if needed), allocating and programming a fence register (again, - * only if needed based on whether the old reg is still valid or the object - * is tiled) and inserting a new PTE into the faulting process. - * - * Note that the faulting process may involve evicting existing objects - * from the GTT and/or fence registers to make room. So performance may - * suffer if the GTT working set is large or there are few fence registers - * left. - * - * The current feature set supported by i915_gem_fault() and thus GTT mmaps - * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). - */ -vm_fault_t i915_gem_fault(struct vm_fault *vmf) -{ -#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) - struct vm_area_struct *area = vmf->vma; - struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - bool write = area->vm_flags & VM_WRITE; - intel_wakeref_t wakeref; - struct i915_vma *vma; - pgoff_t page_offset; - int srcu; - int ret; - - /* Sanity check that we allow writing into this object */ - if (i915_gem_object_is_readonly(obj) && write) - return VM_FAULT_SIGBUS; - - /* We don't use vmf->pgoff since that has the fake offset */ - page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; - - trace_i915_gem_object_fault(obj, page_offset, true, write); - - ret = i915_gem_object_pin_pages(obj); - if (ret) - goto err; - - wakeref = intel_runtime_pm_get(dev_priv); - - srcu = i915_reset_trylock(dev_priv); - if (srcu < 0) { - ret = srcu; - goto err_rpm; - } - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto err_reset; - - /* Access to snoopable pages through the GTT is incoherent. */ - if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) { - ret = -EFAULT; - goto err_unlock; - } - - /* Now pin it into the GTT as needed */ - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, - PIN_MAPPABLE | - PIN_NONBLOCK | - PIN_NONFAULT); - if (IS_ERR(vma)) { - /* Use a partial view if it is bigger than available space */ - struct i915_ggtt_view view = - compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); - unsigned int flags; - - flags = PIN_MAPPABLE; - if (view.type == I915_GGTT_VIEW_NORMAL) - flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ - - /* - * Userspace is now writing through an untracked VMA, abandon - * all hope that the hardware is able to track future writes. - */ - obj->frontbuffer_ggtt_origin = ORIGIN_CPU; - - vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); - if (IS_ERR(vma) && !view.type) { - flags = PIN_MAPPABLE; - view.type = I915_GGTT_VIEW_PARTIAL; - vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); - } - } - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err_unlock; - } - - ret = i915_vma_pin_fence(vma); - if (ret) - goto err_unpin; - - /* Finally, remap it using the new GTT offset */ - ret = remap_io_mapping(area, - area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), - (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, - min_t(u64, vma->size, area->vm_end - area->vm_start), - &ggtt->iomap); - if (ret) - goto err_fence; - - /* Mark as being mmapped into userspace for later revocation */ - assert_rpm_wakelock_held(dev_priv); - if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) - list_add(&obj->userfault_link, &dev_priv->mm.userfault_list); - if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) - intel_wakeref_auto(&dev_priv->mm.userfault_wakeref, - msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); - GEM_BUG_ON(!obj->userfault_count); - - i915_vma_set_ggtt_write(vma); - -err_fence: - i915_vma_unpin_fence(vma); -err_unpin: - __i915_vma_unpin(vma); -err_unlock: - mutex_unlock(&dev->struct_mutex); -err_reset: - i915_reset_unlock(dev_priv, srcu); -err_rpm: - intel_runtime_pm_put(dev_priv, wakeref); - i915_gem_object_unpin_pages(obj); -err: - switch (ret) { - case -EIO: - /* - * We eat errors when the gpu is terminally wedged to avoid - * userspace unduly crashing (gl has no provisions for mmaps to - * fail). But any other -EIO isn't ours (e.g. swap in failure) - * and so needs to be reported. - */ - if (!i915_terminally_wedged(dev_priv)) - return VM_FAULT_SIGBUS; - /* else: fall through */ - case -EAGAIN: - /* - * EAGAIN means the gpu is hung and we'll wait for the error - * handler to reset everything when re-faulting in - * i915_mutex_lock_interruptible. - */ - case 0: - case -ERESTARTSYS: - case -EINTR: - case -EBUSY: - /* - * EBUSY is ok: this just means that another thread - * already did the job. - */ - return VM_FAULT_NOPAGE; - case -ENOMEM: - return VM_FAULT_OOM; - case -ENOSPC: - case -EFAULT: - return VM_FAULT_SIGBUS; - default: - WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); - return VM_FAULT_SIGBUS; - } -} - -static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) -{ - struct i915_vma *vma; - - GEM_BUG_ON(!obj->userfault_count); - - obj->userfault_count = 0; - list_del(&obj->userfault_link); - drm_vma_node_unmap(&obj->base.vma_node, - obj->base.dev->anon_inode->i_mapping); - - for_each_ggtt_vma(vma, obj) - i915_vma_unset_userfault(vma); -} - -/** - * i915_gem_release_mmap - remove physical page mappings - * @obj: obj in question - * - * Preserve the reservation of the mmapping with the DRM core code, but - * relinquish ownership of the pages back to the system. - * - * It is vital that we remove the page mapping if we have mapped a tiled - * object through the GTT and then lose the fence register due to - * resource pressure. Similarly if the object has been moved out of the - * aperture, than pages mapped into userspace must be revoked. Removing the - * mapping will then trigger a page fault on the next user access, allowing - * fixup by i915_gem_fault(). - */ -void -i915_gem_release_mmap(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - intel_wakeref_t wakeref; - - /* Serialisation between user GTT access and our code depends upon - * revoking the CPU's PTE whilst the mutex is held. The next user - * pagefault then has to wait until we release the mutex. - * - * Note that RPM complicates somewhat by adding an additional - * requirement that operations to the GGTT be made holding the RPM - * wakeref. - */ - lockdep_assert_held(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (!obj->userfault_count) - goto out; - - __i915_gem_object_release_mmap(obj); - - /* Ensure that the CPU's PTE are revoked and there are not outstanding - * memory transactions from userspace before we return. The TLB - * flushing implied above by changing the PTE above *should* be - * sufficient, an extra barrier here just provides us with a bit - * of paranoid documentation about our requirement to serialise - * memory writes before touching registers / GSM. - */ - wmb(); - -out: - intel_runtime_pm_put(i915, wakeref); -} - void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) { struct drm_i915_gem_object *obj, *on; @@ -1809,78 +1345,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) } } -static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - int err; - - err = drm_gem_create_mmap_offset(&obj->base); - if (likely(!err)) - return 0; - - /* Attempt to reap some mmap space from dead objects */ - do { - err = i915_gem_wait_for_idle(dev_priv, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT); - if (err) - break; - - i915_gem_drain_freed_objects(dev_priv); - err = drm_gem_create_mmap_offset(&obj->base); - if (!err) - break; - - } while (flush_delayed_work(&dev_priv->gem.retire_work)); - - return err; -} - -int -i915_gem_mmap_gtt(struct drm_file *file, - struct drm_device *dev, - u32 handle, - u64 *offset) -{ - struct drm_i915_gem_object *obj; - int ret; - - obj = i915_gem_object_lookup(file, handle); - if (!obj) - return -ENOENT; - - ret = i915_gem_object_create_mmap_offset(obj); - if (ret == 0) - *offset = drm_vma_node_offset_addr(&obj->base.vma_node); - - i915_gem_object_put(obj); - return ret; -} - -/** - * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing - * @dev: DRM device - * @data: GTT mapping ioctl data - * @file: GEM object info - * - * Simply returns the fake offset to userspace so it can mmap it. - * The mmap call will end up in drm_gem_mmap(), which will set things - * up so we can get faults in the handler above. - * - * The fault handler will take care of binding the object into the GTT - * (since it may have been evicted to make room for something), allocating - * a fence register, and mapping the appropriate aperture address into - * userspace. - */ -int -i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_gem_mmap_gtt *args = data; - - return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); -} - bool i915_sg_trim(struct sg_table *orig_st) { struct sg_table new_st; @@ -2084,7 +1548,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) * We manually flush the CPU domain so that we can override and * force the flush for the display, and perform it asyncrhonously. */ - flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); + i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); if (obj->cache_dirty) i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); obj->write_domain = 0; @@ -2138,7 +1602,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) if (ret) return ret; - flush_write_domain(obj, ~I915_GEM_DOMAIN_WC); + i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC); /* Serialise direct access to this object with the barriers for * coherent writes from the GPU, by effectively invalidating the @@ -2200,7 +1664,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) if (ret) return ret; - flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT); + i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT); /* Serialise direct access to this object with the barriers for * coherent writes from the GPU, by effectively invalidating the @@ -2309,7 +1773,7 @@ restart: * then double check if the GTT mapping is still * valid for that pointer access. */ - i915_gem_release_mmap(obj); + i915_gem_object_release_mmap(obj); /* As we no longer need a fence for GTT access, * we can relinquish it now (and so prevent having @@ -2564,7 +2028,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) if (ret) return ret; - flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); + i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); /* Flush the CPU cache if it's still invalid. */ if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 7d5cc9ccf6dd..86d6d92ccbc9 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -299,7 +299,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, i915_gem_object_unlock(obj); /* Force the fence to be reacquired for GTT access */ - i915_gem_release_mmap(obj); + i915_gem_object_release_mmap(obj); /* Try to preallocate memory required to save swizzling on put-pages */ if (i915_gem_object_needs_bit17_swizzle(obj)) { diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index b98a286a8be5..a3dd2f1be95b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -89,491 +89,6 @@ out: return err; } -struct tile { - unsigned int width; - unsigned int height; - unsigned int stride; - unsigned int size; - unsigned int tiling; - unsigned int swizzle; -}; - -static u64 swizzle_bit(unsigned int bit, u64 offset) -{ - return (offset & BIT_ULL(bit)) >> (bit - 6); -} - -static u64 tiled_offset(const struct tile *tile, u64 v) -{ - u64 x, y; - - if (tile->tiling == I915_TILING_NONE) - return v; - - y = div64_u64_rem(v, tile->stride, &x); - v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height; - - if (tile->tiling == I915_TILING_X) { - v += y * tile->width; - v += div64_u64_rem(x, tile->width, &x) << tile->size; - v += x; - } else if (tile->width == 128) { - const unsigned int ytile_span = 16; - const unsigned int ytile_height = 512; - - v += y * ytile_span; - v += div64_u64_rem(x, ytile_span, &x) * ytile_height; - v += x; - } else { - const unsigned int ytile_span = 32; - const unsigned int ytile_height = 256; - - v += y * ytile_span; - v += div64_u64_rem(x, ytile_span, &x) * ytile_height; - v += x; - } - - switch (tile->swizzle) { - case I915_BIT_6_SWIZZLE_9: - v ^= swizzle_bit(9, v); - break; - case I915_BIT_6_SWIZZLE_9_10: - v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v); - break; - case I915_BIT_6_SWIZZLE_9_11: - v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v); - break; - case I915_BIT_6_SWIZZLE_9_10_11: - v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v); - break; - } - - return v; -} - -static int check_partial_mapping(struct drm_i915_gem_object *obj, - const struct tile *tile, - unsigned long end_time) -{ - const unsigned int nreal = obj->scratch / PAGE_SIZE; - const unsigned long npages = obj->base.size / PAGE_SIZE; - struct i915_vma *vma; - unsigned long page; - int err; - - if (igt_timeout(end_time, - "%s: timed out before tiling=%d stride=%d\n", - __func__, tile->tiling, tile->stride)) - return -EINTR; - - err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); - if (err) { - pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", - tile->tiling, tile->stride, err); - return err; - } - - GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); - GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); - - for_each_prime_number_from(page, 1, npages) { - struct i915_ggtt_view view = - compute_partial_view(obj, page, MIN_CHUNK_PAGES); - u32 __iomem *io; - struct page *p; - unsigned int n; - u64 offset; - u32 *cpu; - - GEM_BUG_ON(view.partial.size > nreal); - cond_resched(); - - err = i915_gem_object_set_to_gtt_domain(obj, true); - if (err) { - pr_err("Failed to flush to GTT write domain; err=%d\n", - err); - return err; - } - - vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); - if (IS_ERR(vma)) { - pr_err("Failed to pin partial view: offset=%lu; err=%d\n", - page, (int)PTR_ERR(vma)); - return PTR_ERR(vma); - } - - n = page - view.partial.offset; - GEM_BUG_ON(n >= view.partial.size); - - io = i915_vma_pin_iomap(vma); - i915_vma_unpin(vma); - if (IS_ERR(io)) { - pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", - page, (int)PTR_ERR(io)); - return PTR_ERR(io); - } - - iowrite32(page, io + n * PAGE_SIZE/sizeof(*io)); - i915_vma_unpin_iomap(vma); - - offset = tiled_offset(tile, page << PAGE_SHIFT); - if (offset >= obj->base.size) - continue; - - flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); - - p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); - cpu = kmap(p) + offset_in_page(offset); - drm_clflush_virt_range(cpu, sizeof(*cpu)); - if (*cpu != (u32)page) { - pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n", - page, n, - view.partial.offset, - view.partial.size, - vma->size >> PAGE_SHIFT, - tile->tiling ? tile_row_pages(obj) : 0, - vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, - offset >> PAGE_SHIFT, - (unsigned int)offset_in_page(offset), - offset, - (u32)page, *cpu); - err = -EINVAL; - } - *cpu = 0; - drm_clflush_virt_range(cpu, sizeof(*cpu)); - kunmap(p); - if (err) - return err; - - i915_vma_destroy(vma); - } - - return 0; -} - -static int igt_partial_tiling(void *arg) -{ - const unsigned int nreal = 1 << 12; /* largest tile row x2 */ - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj; - intel_wakeref_t wakeref; - int tiling; - int err; - - /* We want to check the page mapping and fencing of a large object - * mmapped through the GTT. The object we create is larger than can - * possibly be mmaped as a whole, and so we must use partial GGTT vma. - * We then check that a write through each partial GGTT vma ends up - * in the right set of pages within the object, and with the expected - * tiling, which we verify by manual swizzling. - */ - - obj = huge_gem_object(i915, - nreal << PAGE_SHIFT, - (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - err = i915_gem_object_pin_pages(obj); - if (err) { - pr_err("Failed to allocate %u pages (%lu total), err=%d\n", - nreal, obj->base.size / PAGE_SIZE, err); - goto out; - } - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - if (1) { - IGT_TIMEOUT(end); - struct tile tile; - - tile.height = 1; - tile.width = 1; - tile.size = 0; - tile.stride = 0; - tile.swizzle = I915_BIT_6_SWIZZLE_NONE; - tile.tiling = I915_TILING_NONE; - - err = check_partial_mapping(obj, &tile, end); - if (err && err != -EINTR) - goto out_unlock; - } - - for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) { - IGT_TIMEOUT(end); - unsigned int max_pitch; - unsigned int pitch; - struct tile tile; - - if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) - /* - * The swizzling pattern is actually unknown as it - * varies based on physical address of each page. - * See i915_gem_detect_bit_6_swizzle(). - */ - break; - - tile.tiling = tiling; - switch (tiling) { - case I915_TILING_X: - tile.swizzle = i915->mm.bit_6_swizzle_x; - break; - case I915_TILING_Y: - tile.swizzle = i915->mm.bit_6_swizzle_y; - break; - } - - GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN); - if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || - tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) - continue; - - if (INTEL_GEN(i915) <= 2) { - tile.height = 16; - tile.width = 128; - tile.size = 11; - } else if (tile.tiling == I915_TILING_Y && - HAS_128_BYTE_Y_TILING(i915)) { - tile.height = 32; - tile.width = 128; - tile.size = 12; - } else { - tile.height = 8; - tile.width = 512; - tile.size = 12; - } - - if (INTEL_GEN(i915) < 4) - max_pitch = 8192 / tile.width; - else if (INTEL_GEN(i915) < 7) - max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width; - else - max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width; - - for (pitch = max_pitch; pitch; pitch >>= 1) { - tile.stride = tile.width * pitch; - err = check_partial_mapping(obj, &tile, end); - if (err == -EINTR) - goto next_tiling; - if (err) - goto out_unlock; - - if (pitch > 2 && INTEL_GEN(i915) >= 4) { - tile.stride = tile.width * (pitch - 1); - err = check_partial_mapping(obj, &tile, end); - if (err == -EINTR) - goto next_tiling; - if (err) - goto out_unlock; - } - - if (pitch < max_pitch && INTEL_GEN(i915) >= 4) { - tile.stride = tile.width * (pitch + 1); - err = check_partial_mapping(obj, &tile, end); - if (err == -EINTR) - goto next_tiling; - if (err) - goto out_unlock; - } - } - - if (INTEL_GEN(i915) >= 4) { - for_each_prime_number(pitch, max_pitch) { - tile.stride = tile.width * pitch; - err = check_partial_mapping(obj, &tile, end); - if (err == -EINTR) - goto next_tiling; - if (err) - goto out_unlock; - } - } - -next_tiling: ; - } - -out_unlock: - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - i915_gem_object_unpin_pages(obj); -out: - i915_gem_object_put(obj); - return err; -} - -static int make_obj_busy(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_request *rq; - struct i915_vma *vma; - int err; - - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return err; - - rq = i915_request_create(i915->engine[RCS0]->kernel_context); - if (IS_ERR(rq)) { - i915_vma_unpin(vma); - return PTR_ERR(rq); - } - - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - - i915_request_add(rq); - - __i915_gem_object_release_unless_active(obj); - i915_vma_unpin(vma); - - return err; -} - -static bool assert_mmap_offset(struct drm_i915_private *i915, - unsigned long size, - int expected) -{ - struct drm_i915_gem_object *obj; - int err; - - obj = i915_gem_object_create_internal(i915, size); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - err = i915_gem_object_create_mmap_offset(obj); - i915_gem_object_put(obj); - - return err == expected; -} - -static void disable_retire_worker(struct drm_i915_private *i915) -{ - i915_gem_shrinker_unregister(i915); - - intel_gt_pm_get(i915); - - cancel_delayed_work_sync(&i915->gem.retire_work); - flush_work(&i915->gem.idle_work); -} - -static void restore_retire_worker(struct drm_i915_private *i915) -{ - intel_gt_pm_put(i915); - - mutex_lock(&i915->drm.struct_mutex); - igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); - - i915_gem_shrinker_register(i915); -} - -static int igt_mmap_offset_exhaustion(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm; - struct drm_i915_gem_object *obj; - struct drm_mm_node resv, *hole; - u64 hole_start, hole_end; - int loop, err; - - /* Disable background reaper */ - disable_retire_worker(i915); - GEM_BUG_ON(!i915->gt.awake); - - /* Trim the device mmap space to only a page */ - memset(&resv, 0, sizeof(resv)); - drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { - resv.start = hole_start; - resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */ - err = drm_mm_reserve_node(mm, &resv); - if (err) { - pr_err("Failed to trim VMA manager, err=%d\n", err); - goto out_park; - } - break; - } - - /* Just fits! */ - if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) { - pr_err("Unable to insert object into single page hole\n"); - err = -EINVAL; - goto out; - } - - /* Too large */ - if (!assert_mmap_offset(i915, 2*PAGE_SIZE, -ENOSPC)) { - pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); - err = -EINVAL; - goto out; - } - - /* Fill the hole, further allocation attempts should then fail */ - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto out; - } - - err = i915_gem_object_create_mmap_offset(obj); - if (err) { - pr_err("Unable to insert object into reclaimed hole\n"); - goto err_obj; - } - - if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) { - pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); - err = -EINVAL; - goto err_obj; - } - - i915_gem_object_put(obj); - - /* Now fill with busy dead objects that we expect to reap */ - for (loop = 0; loop < 3; loop++) { - intel_wakeref_t wakeref; - - if (i915_terminally_wedged(i915)) - break; - - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto out; - } - - err = 0; - mutex_lock(&i915->drm.struct_mutex); - with_intel_runtime_pm(i915, wakeref) - err = make_obj_busy(obj); - mutex_unlock(&i915->drm.struct_mutex); - if (err) { - pr_err("[loop %d] Failed to busy the object\n", loop); - goto err_obj; - } - - /* NB we rely on the _active_ reference to access obj now */ - GEM_BUG_ON(!i915_gem_object_is_active(obj)); - err = i915_gem_object_create_mmap_offset(obj); - if (err) { - pr_err("[loop %d] i915_gem_object_create_mmap_offset failed with err=%d\n", - loop, err); - goto out; - } - } - -out: - drm_mm_remove_node(&resv); -out_park: - restore_retire_worker(i915); - return err; -err_obj: - i915_gem_object_put(obj); - goto out; -} - int i915_gem_object_mock_selftests(void) { static const struct i915_subtest tests[] = { @@ -596,8 +111,6 @@ int i915_gem_object_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_gem_huge), - SUBTEST(igt_partial_tiling), - SUBTEST(igt_mmap_offset_exhaustion), }; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index a953125b14c4..9bda36a598b3 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -16,6 +16,7 @@ selftest(timelines, i915_timeline_live_selftests) selftest(requests, i915_request_live_selftests) selftest(active, i915_active_live_selftests) selftest(objects, i915_gem_object_live_selftests) +selftest(mman, i915_gem_mman_live_selftests) selftest(dmabuf, i915_gem_dmabuf_live_selftests) selftest(vma, i915_vma_live_selftests) selftest(coherency, i915_gem_coherency_live_selftests) -- cgit v1.2.3 From f0e4a06397526d3352a3c80b0575ac22ab24da94 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:48 +0100 Subject: drm/i915: Move GEM domain management to its own file Continuing the decluttering of i915_gem.c, that of the read/write domains, perhaps the biggest of GEM's follies? Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-7-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gem/i915_gem_domain.c | 782 +++++++++++++++++++++ drivers/gpu/drm/i915/gem/i915_gem_object.h | 29 + drivers/gpu/drm/i915/gvt/cmd_parser.c | 4 +- drivers/gpu/drm/i915/gvt/scheduler.c | 6 +- drivers/gpu/drm/i915/i915_cmd_parser.c | 8 +- drivers/gpu/drm/i915/i915_drv.h | 26 - drivers/gpu/drm/i915/i915_gem.c | 777 +------------------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 4 +- drivers/gpu/drm/i915/i915_gem_render_state.c | 4 +- drivers/gpu/drm/i915/selftests/huge_pages.c | 4 +- .../gpu/drm/i915/selftests/i915_gem_coherency.c | 8 +- drivers/gpu/drm/i915/selftests/i915_gem_context.c | 8 +- 13 files changed, 839 insertions(+), 822 deletions(-) create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_domain.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 4c14628dc943..5ffd7e9b19ad 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -87,6 +87,7 @@ i915-y += $(gt-y) # GEM (Graphics Execution Management) code obj-y += gem/ gem-y += \ + gem/i915_gem_domain.o \ gem/i915_gem_object.o \ gem/i915_gem_mman.o \ gem/i915_gem_pages.o \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c new file mode 100644 index 000000000000..bbc7fb758186 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -0,0 +1,782 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2016 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_gem_clflush.h" +#include "i915_gem_gtt.h" +#include "i915_gem_ioctls.h" +#include "i915_gem_object.h" +#include "i915_vma.h" +#include "intel_frontbuffer.h" + +static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) +{ + /* + * We manually flush the CPU domain so that we can override and + * force the flush for the display, and perform it asyncrhonously. + */ + i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); + if (obj->cache_dirty) + i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); + obj->write_domain = 0; +} + +void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) +{ + if (!READ_ONCE(obj->pin_global)) + return; + + mutex_lock(&obj->base.dev->struct_mutex); + __i915_gem_object_flush_for_display(obj); + mutex_unlock(&obj->base.dev->struct_mutex); +} + +/** + * Moves a single object to the WC read, and possibly write domain. + * @obj: object to act on + * @write: ask for write access or read only + * + * This function returns when the move is complete, including waiting on + * flushes to occur. + */ +int +i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) +{ + int ret; + + lockdep_assert_held(&obj->base.dev->struct_mutex); + + ret = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_LOCKED | + (write ? I915_WAIT_ALL : 0), + MAX_SCHEDULE_TIMEOUT); + if (ret) + return ret; + + if (obj->write_domain == I915_GEM_DOMAIN_WC) + return 0; + + /* Flush and acquire obj->pages so that we are coherent through + * direct access in memory with previous cached writes through + * shmemfs and that our cache domain tracking remains valid. + * For example, if the obj->filp was moved to swap without us + * being notified and releasing the pages, we would mistakenly + * continue to assume that the obj remained out of the CPU cached + * domain. + */ + ret = i915_gem_object_pin_pages(obj); + if (ret) + return ret; + + i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC); + + /* Serialise direct access to this object with the barriers for + * coherent writes from the GPU, by effectively invalidating the + * WC domain upon first access. + */ + if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0) + mb(); + + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. + */ + GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0); + obj->read_domains |= I915_GEM_DOMAIN_WC; + if (write) { + obj->read_domains = I915_GEM_DOMAIN_WC; + obj->write_domain = I915_GEM_DOMAIN_WC; + obj->mm.dirty = true; + } + + i915_gem_object_unpin_pages(obj); + return 0; +} + +/** + * Moves a single object to the GTT read, and possibly write domain. + * @obj: object to act on + * @write: ask for write access or read only + * + * This function returns when the move is complete, including waiting on + * flushes to occur. + */ +int +i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) +{ + int ret; + + lockdep_assert_held(&obj->base.dev->struct_mutex); + + ret = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_LOCKED | + (write ? I915_WAIT_ALL : 0), + MAX_SCHEDULE_TIMEOUT); + if (ret) + return ret; + + if (obj->write_domain == I915_GEM_DOMAIN_GTT) + return 0; + + /* Flush and acquire obj->pages so that we are coherent through + * direct access in memory with previous cached writes through + * shmemfs and that our cache domain tracking remains valid. + * For example, if the obj->filp was moved to swap without us + * being notified and releasing the pages, we would mistakenly + * continue to assume that the obj remained out of the CPU cached + * domain. + */ + ret = i915_gem_object_pin_pages(obj); + if (ret) + return ret; + + i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT); + + /* Serialise direct access to this object with the barriers for + * coherent writes from the GPU, by effectively invalidating the + * GTT domain upon first access. + */ + if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0) + mb(); + + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. + */ + GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); + obj->read_domains |= I915_GEM_DOMAIN_GTT; + if (write) { + obj->read_domains = I915_GEM_DOMAIN_GTT; + obj->write_domain = I915_GEM_DOMAIN_GTT; + obj->mm.dirty = true; + } + + i915_gem_object_unpin_pages(obj); + return 0; +} + +/** + * Changes the cache-level of an object across all VMA. + * @obj: object to act on + * @cache_level: new cache level to set for the object + * + * After this function returns, the object will be in the new cache-level + * across all GTT and the contents of the backing storage will be coherent, + * with respect to the new cache-level. In order to keep the backing storage + * coherent for all users, we only allow a single cache level to be set + * globally on the object and prevent it from being changed whilst the + * hardware is reading from the object. That is if the object is currently + * on the scanout it will be set to uncached (or equivalent display + * cache coherency) and all non-MOCS GPU access will also be uncached so + * that all direct access to the scanout remains coherent. + */ +int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, + enum i915_cache_level cache_level) +{ + struct i915_vma *vma; + int ret; + + lockdep_assert_held(&obj->base.dev->struct_mutex); + + if (obj->cache_level == cache_level) + return 0; + + /* Inspect the list of currently bound VMA and unbind any that would + * be invalid given the new cache-level. This is principally to + * catch the issue of the CS prefetch crossing page boundaries and + * reading an invalid PTE on older architectures. + */ +restart: + list_for_each_entry(vma, &obj->vma.list, obj_link) { + if (!drm_mm_node_allocated(&vma->node)) + continue; + + if (i915_vma_is_pinned(vma)) { + DRM_DEBUG("can not change the cache level of pinned objects\n"); + return -EBUSY; + } + + if (!i915_vma_is_closed(vma) && + i915_gem_valid_gtt_space(vma, cache_level)) + continue; + + ret = i915_vma_unbind(vma); + if (ret) + return ret; + + /* As unbinding may affect other elements in the + * obj->vma_list (due to side-effects from retiring + * an active vma), play safe and restart the iterator. + */ + goto restart; + } + + /* We can reuse the existing drm_mm nodes but need to change the + * cache-level on the PTE. We could simply unbind them all and + * rebind with the correct cache-level on next use. However since + * we already have a valid slot, dma mapping, pages etc, we may as + * rewrite the PTE in the belief that doing so tramples upon less + * state and so involves less work. + */ + if (obj->bind_count) { + /* Before we change the PTE, the GPU must not be accessing it. + * If we wait upon the object, we know that all the bound + * VMA are no longer active. + */ + ret = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_LOCKED | + I915_WAIT_ALL, + MAX_SCHEDULE_TIMEOUT); + if (ret) + return ret; + + if (!HAS_LLC(to_i915(obj->base.dev)) && + cache_level != I915_CACHE_NONE) { + /* Access to snoopable pages through the GTT is + * incoherent and on some machines causes a hard + * lockup. Relinquish the CPU mmaping to force + * userspace to refault in the pages and we can + * then double check if the GTT mapping is still + * valid for that pointer access. + */ + i915_gem_object_release_mmap(obj); + + /* As we no longer need a fence for GTT access, + * we can relinquish it now (and so prevent having + * to steal a fence from someone else on the next + * fence request). Note GPU activity would have + * dropped the fence as all snoopable access is + * supposed to be linear. + */ + for_each_ggtt_vma(vma, obj) { + ret = i915_vma_put_fence(vma); + if (ret) + return ret; + } + } else { + /* We either have incoherent backing store and + * so no GTT access or the architecture is fully + * coherent. In such cases, existing GTT mmaps + * ignore the cache bit in the PTE and we can + * rewrite it without confusing the GPU or having + * to force userspace to fault back in its mmaps. + */ + } + + list_for_each_entry(vma, &obj->vma.list, obj_link) { + if (!drm_mm_node_allocated(&vma->node)) + continue; + + ret = i915_vma_bind(vma, cache_level, PIN_UPDATE); + if (ret) + return ret; + } + } + + list_for_each_entry(vma, &obj->vma.list, obj_link) + vma->node.color = cache_level; + i915_gem_object_set_cache_coherency(obj, cache_level); + obj->cache_dirty = true; /* Always invalidate stale cachelines */ + + return 0; +} + +int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_caching *args = data; + struct drm_i915_gem_object *obj; + int err = 0; + + rcu_read_lock(); + obj = i915_gem_object_lookup_rcu(file, args->handle); + if (!obj) { + err = -ENOENT; + goto out; + } + + switch (obj->cache_level) { + case I915_CACHE_LLC: + case I915_CACHE_L3_LLC: + args->caching = I915_CACHING_CACHED; + break; + + case I915_CACHE_WT: + args->caching = I915_CACHING_DISPLAY; + break; + + default: + args->caching = I915_CACHING_NONE; + break; + } +out: + rcu_read_unlock(); + return err; +} + +int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *i915 = to_i915(dev); + struct drm_i915_gem_caching *args = data; + struct drm_i915_gem_object *obj; + enum i915_cache_level level; + int ret = 0; + + switch (args->caching) { + case I915_CACHING_NONE: + level = I915_CACHE_NONE; + break; + case I915_CACHING_CACHED: + /* + * Due to a HW issue on BXT A stepping, GPU stores via a + * snooped mapping may leave stale data in a corresponding CPU + * cacheline, whereas normally such cachelines would get + * invalidated. + */ + if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) + return -ENODEV; + + level = I915_CACHE_LLC; + break; + case I915_CACHING_DISPLAY: + level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE; + break; + default: + return -EINVAL; + } + + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; + + /* + * The caching mode of proxy object is handled by its generator, and + * not allowed to be changed by userspace. + */ + if (i915_gem_object_is_proxy(obj)) { + ret = -ENXIO; + goto out; + } + + if (obj->cache_level == level) + goto out; + + ret = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); + if (ret) + goto out; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto out; + + ret = i915_gem_object_set_cache_level(obj, level); + mutex_unlock(&dev->struct_mutex); + +out: + i915_gem_object_put(obj); + return ret; +} + +/* + * Prepare buffer for display plane (scanout, cursors, etc). Can be called from + * an uninterruptible phase (modesetting) and allows any flushes to be pipelined + * (for pageflips). We only flush the caches while preparing the buffer for + * display, the callers are responsible for frontbuffer flush. + */ +struct i915_vma * +i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, + u32 alignment, + const struct i915_ggtt_view *view, + unsigned int flags) +{ + struct i915_vma *vma; + int ret; + + lockdep_assert_held(&obj->base.dev->struct_mutex); + + /* Mark the global pin early so that we account for the + * display coherency whilst setting up the cache domains. + */ + obj->pin_global++; + + /* The display engine is not coherent with the LLC cache on gen6. As + * a result, we make sure that the pinning that is about to occur is + * done with uncached PTEs. This is lowest common denominator for all + * chipsets. + * + * However for gen6+, we could do better by using the GFDT bit instead + * of uncaching, which would allow us to flush all the LLC-cached data + * with that bit in the PTE to main memory with just one PIPE_CONTROL. + */ + ret = i915_gem_object_set_cache_level(obj, + HAS_WT(to_i915(obj->base.dev)) ? + I915_CACHE_WT : I915_CACHE_NONE); + if (ret) { + vma = ERR_PTR(ret); + goto err_unpin_global; + } + + /* As the user may map the buffer once pinned in the display plane + * (e.g. libkms for the bootup splash), we have to ensure that we + * always use map_and_fenceable for all scanout buffers. However, + * it may simply be too big to fit into mappable, in which case + * put it anyway and hope that userspace can cope (but always first + * try to preserve the existing ABI). + */ + vma = ERR_PTR(-ENOSPC); + if ((flags & PIN_MAPPABLE) == 0 && + (!view || view->type == I915_GGTT_VIEW_NORMAL)) + vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, + flags | + PIN_MAPPABLE | + PIN_NONBLOCK); + if (IS_ERR(vma)) + vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); + if (IS_ERR(vma)) + goto err_unpin_global; + + vma->display_alignment = max_t(u64, vma->display_alignment, alignment); + + __i915_gem_object_flush_for_display(obj); + + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. + */ + obj->read_domains |= I915_GEM_DOMAIN_GTT; + + return vma; + +err_unpin_global: + obj->pin_global--; + return vma; +} + +static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct list_head *list; + struct i915_vma *vma; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + + mutex_lock(&i915->ggtt.vm.mutex); + for_each_ggtt_vma(vma, obj) { + if (!drm_mm_node_allocated(&vma->node)) + continue; + + list_move_tail(&vma->vm_link, &vma->vm->bound_list); + } + mutex_unlock(&i915->ggtt.vm.mutex); + + spin_lock(&i915->mm.obj_lock); + list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list; + list_move_tail(&obj->mm.link, list); + spin_unlock(&i915->mm.obj_lock); +} + +void +i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) +{ + lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); + + if (WARN_ON(vma->obj->pin_global == 0)) + return; + + if (--vma->obj->pin_global == 0) + vma->display_alignment = I915_GTT_MIN_ALIGNMENT; + + /* Bump the LRU to try and avoid premature eviction whilst flipping */ + i915_gem_object_bump_inactive_ggtt(vma->obj); + + i915_vma_unpin(vma); +} + +/** + * Moves a single object to the CPU read, and possibly write domain. + * @obj: object to act on + * @write: requesting write or read-only access + * + * This function returns when the move is complete, including waiting on + * flushes to occur. + */ +int +i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) +{ + int ret; + + lockdep_assert_held(&obj->base.dev->struct_mutex); + + ret = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_LOCKED | + (write ? I915_WAIT_ALL : 0), + MAX_SCHEDULE_TIMEOUT); + if (ret) + return ret; + + i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); + + /* Flush the CPU cache if it's still invalid. */ + if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { + i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); + obj->read_domains |= I915_GEM_DOMAIN_CPU; + } + + /* It should now be out of any other write domains, and we can update + * the domain values for our changes. + */ + GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU); + + /* If we're writing through the CPU, then the GPU read domains will + * need to be invalidated at next use. + */ + if (write) + __start_cpu_write(obj); + + return 0; +} + +static inline enum fb_op_origin +fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain) +{ + return (domain == I915_GEM_DOMAIN_GTT ? + obj->frontbuffer_ggtt_origin : ORIGIN_CPU); +} + +/** + * Called when user space prepares to use an object with the CPU, either + * through the mmap ioctl's mapping or a GTT mapping. + * @dev: drm device + * @data: ioctl data blob + * @file: drm file + */ +int +i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_set_domain *args = data; + struct drm_i915_gem_object *obj; + u32 read_domains = args->read_domains; + u32 write_domain = args->write_domain; + int err; + + /* Only handle setting domains to types used by the CPU. */ + if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS) + return -EINVAL; + + /* + * Having something in the write domain implies it's in the read + * domain, and only that read domain. Enforce that in the request. + */ + if (write_domain && read_domains != write_domain) + return -EINVAL; + + if (!read_domains) + return 0; + + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; + + /* + * Already in the desired write domain? Nothing for us to do! + * + * We apply a little bit of cunning here to catch a broader set of + * no-ops. If obj->write_domain is set, we must be in the same + * obj->read_domains, and only that domain. Therefore, if that + * obj->write_domain matches the request read_domains, we are + * already in the same read/write domain and can skip the operation, + * without having to further check the requested write_domain. + */ + if (READ_ONCE(obj->write_domain) == read_domains) { + err = 0; + goto out; + } + + /* + * Try to flush the object off the GPU without holding the lock. + * We will repeat the flush holding the lock in the normal manner + * to catch cases where we are gazumped. + */ + err = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_PRIORITY | + (write_domain ? I915_WAIT_ALL : 0), + MAX_SCHEDULE_TIMEOUT); + if (err) + goto out; + + /* + * Proxy objects do not control access to the backing storage, ergo + * they cannot be used as a means to manipulate the cache domain + * tracking for that backing storage. The proxy object is always + * considered to be outside of any cache domain. + */ + if (i915_gem_object_is_proxy(obj)) { + err = -ENXIO; + goto out; + } + + /* + * Flush and acquire obj->pages so that we are coherent through + * direct access in memory with previous cached writes through + * shmemfs and that our cache domain tracking remains valid. + * For example, if the obj->filp was moved to swap without us + * being notified and releasing the pages, we would mistakenly + * continue to assume that the obj remained out of the CPU cached + * domain. + */ + err = i915_gem_object_pin_pages(obj); + if (err) + goto out; + + err = i915_mutex_lock_interruptible(dev); + if (err) + goto out_unpin; + + if (read_domains & I915_GEM_DOMAIN_WC) + err = i915_gem_object_set_to_wc_domain(obj, write_domain); + else if (read_domains & I915_GEM_DOMAIN_GTT) + err = i915_gem_object_set_to_gtt_domain(obj, write_domain); + else + err = i915_gem_object_set_to_cpu_domain(obj, write_domain); + + /* And bump the LRU for this access */ + i915_gem_object_bump_inactive_ggtt(obj); + + mutex_unlock(&dev->struct_mutex); + + if (write_domain != 0) + intel_fb_obj_invalidate(obj, + fb_write_origin(obj, write_domain)); + +out_unpin: + i915_gem_object_unpin_pages(obj); +out: + i915_gem_object_put(obj); + return err; +} + +/* + * Pins the specified object's pages and synchronizes the object with + * GPU accesses. Sets needs_clflush to non-zero if the caller should + * flush the object from the CPU cache. + */ +int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, + unsigned int *needs_clflush) +{ + int ret; + + lockdep_assert_held(&obj->base.dev->struct_mutex); + + *needs_clflush = 0; + if (!i915_gem_object_has_struct_page(obj)) + return -ENODEV; + + ret = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + if (ret) + return ret; + + ret = i915_gem_object_pin_pages(obj); + if (ret) + return ret; + + if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ || + !static_cpu_has(X86_FEATURE_CLFLUSH)) { + ret = i915_gem_object_set_to_cpu_domain(obj, false); + if (ret) + goto err_unpin; + else + goto out; + } + + i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); + + /* If we're not in the cpu read domain, set ourself into the gtt + * read domain and manually flush cachelines (if required). This + * optimizes for the case when the gpu will dirty the data + * anyway again before the next pread happens. + */ + if (!obj->cache_dirty && + !(obj->read_domains & I915_GEM_DOMAIN_CPU)) + *needs_clflush = CLFLUSH_BEFORE; + +out: + /* return with the pages pinned */ + return 0; + +err_unpin: + i915_gem_object_unpin_pages(obj); + return ret; +} + +int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, + unsigned int *needs_clflush) +{ + int ret; + + lockdep_assert_held(&obj->base.dev->struct_mutex); + + *needs_clflush = 0; + if (!i915_gem_object_has_struct_page(obj)) + return -ENODEV; + + ret = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_LOCKED | + I915_WAIT_ALL, + MAX_SCHEDULE_TIMEOUT); + if (ret) + return ret; + + ret = i915_gem_object_pin_pages(obj); + if (ret) + return ret; + + if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE || + !static_cpu_has(X86_FEATURE_CLFLUSH)) { + ret = i915_gem_object_set_to_cpu_domain(obj, true); + if (ret) + goto err_unpin; + else + goto out; + } + + i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); + + /* If we're not in the cpu write domain, set ourself into the + * gtt write domain and manually flush cachelines (as required). + * This optimizes for the case when the gpu will use the data + * right away and we therefore have to clflush anyway. + */ + if (!obj->cache_dirty) { + *needs_clflush |= CLFLUSH_AFTER; + + /* + * Same trick applies to invalidate partially written + * cachelines read before writing. + */ + if (!(obj->read_domains & I915_GEM_DOMAIN_CPU)) + *needs_clflush |= CLFLUSH_BEFORE; + } + +out: + intel_fb_obj_invalidate(obj, ORIGIN_CPU); + obj->mm.dirty = true; + /* return with the pages pinned */ + return 0; + +err_unpin: + i915_gem_object_unpin_pages(obj); + return ret; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 07f487cbff79..8cf082abb0ab 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -15,6 +15,8 @@ #include "i915_gem_object_types.h" +#include "i915_gem_gtt.h" + void i915_gem_init__objects(struct drm_i915_private *i915); struct drm_i915_gem_object *i915_gem_object_alloc(void); @@ -358,6 +360,20 @@ void i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains); +int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, + unsigned int *needs_clflush); +int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, + unsigned int *needs_clflush); +#define CLFLUSH_BEFORE BIT(0) +#define CLFLUSH_AFTER BIT(1) +#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) + +static inline void +i915_gem_object_finish_access(struct drm_i915_gem_object *obj) +{ + i915_gem_object_unpin_pages(obj); +} + static inline struct intel_engine_cs * i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) { @@ -379,6 +395,19 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, unsigned int cache_level); void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); +int __must_check +i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); +int __must_check +i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); +int __must_check +i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); +struct i915_vma * __must_check +i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, + u32 alignment, + const struct i915_ggtt_view *view, + unsigned int flags); +void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); + static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) { if (obj->cache_dirty) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 7584bf0aeaa4..e3608b170105 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1764,7 +1764,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) goto err_free_bb; } - ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush); + ret = i915_gem_object_prepare_write(bb->obj, &bb->clflush); if (ret) goto err_free_obj; @@ -1813,7 +1813,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) err_unmap: i915_gem_object_unpin_map(bb->obj); err_finish_shmem_access: - i915_gem_obj_finish_shmem_access(bb->obj); + i915_gem_object_finish_access(bb->obj); err_free_obj: i915_gem_object_put(bb->obj); err_free_bb: diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 38897d241f5f..3a691447f76c 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -482,7 +482,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) bb->obj->base.size); bb->clflush &= ~CLFLUSH_AFTER; } - i915_gem_obj_finish_shmem_access(bb->obj); + i915_gem_object_finish_access(bb->obj); bb->accessing = false; } else { @@ -510,7 +510,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) if (ret) goto err; - i915_gem_obj_finish_shmem_access(bb->obj); + i915_gem_object_finish_access(bb->obj); bb->accessing = false; ret = i915_vma_move_to_active(bb->vma, @@ -588,7 +588,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { if (bb->obj) { if (bb->accessing) - i915_gem_obj_finish_shmem_access(bb->obj); + i915_gem_object_finish_access(bb->obj); if (bb->va && !IS_ERR(bb->va)) i915_gem_object_unpin_map(bb->obj); diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index e9fadcb4d592..c893bd4eb2c8 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1058,11 +1058,11 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, void *dst, *src; int ret; - ret = i915_gem_obj_prepare_shmem_read(src_obj, &src_needs_clflush); + ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush); if (ret) return ERR_PTR(ret); - ret = i915_gem_obj_prepare_shmem_write(dst_obj, &dst_needs_clflush); + ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush); if (ret) { dst = ERR_PTR(ret); goto unpin_src; @@ -1120,9 +1120,9 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER; unpin_dst: - i915_gem_obj_finish_shmem_access(dst_obj); + i915_gem_object_finish_access(dst_obj); unpin_src: - i915_gem_obj_finish_shmem_access(src_obj); + i915_gem_object_finish_access(src_obj); return dst; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 584ebb901e18..596af542afea 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2814,20 +2814,6 @@ static inline int __sg_page_count(const struct scatterlist *sg) return sg->length >> PAGE_SHIFT; } -int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, - unsigned int *needs_clflush); -int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, - unsigned int *needs_clflush); -#define CLFLUSH_BEFORE BIT(0) -#define CLFLUSH_AFTER BIT(1) -#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) - -static inline void -i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) -{ - i915_gem_object_unpin_pages(obj); -} - static inline int __must_check i915_mutex_lock_interruptible(struct drm_device *dev) { @@ -2890,18 +2876,6 @@ int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, const struct i915_sched_attr *attr); #define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX) -int __must_check -i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); -int __must_check -i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write); -int __must_check -i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); -struct i915_vma * __must_check -i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, - u32 alignment, - const struct i915_ggtt_view *view, - unsigned int flags); -void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bde25d5326ba..0570907cc9d2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -462,123 +462,6 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) } } -/* - * Pins the specified object's pages and synchronizes the object with - * GPU accesses. Sets needs_clflush to non-zero if the caller should - * flush the object from the CPU cache. - */ -int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, - unsigned int *needs_clflush) -{ - int ret; - - lockdep_assert_held(&obj->base.dev->struct_mutex); - - *needs_clflush = 0; - if (!i915_gem_object_has_struct_page(obj)) - return -ENODEV; - - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (ret) - return ret; - - ret = i915_gem_object_pin_pages(obj); - if (ret) - return ret; - - if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ || - !static_cpu_has(X86_FEATURE_CLFLUSH)) { - ret = i915_gem_object_set_to_cpu_domain(obj, false); - if (ret) - goto err_unpin; - else - goto out; - } - - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); - - /* If we're not in the cpu read domain, set ourself into the gtt - * read domain and manually flush cachelines (if required). This - * optimizes for the case when the gpu will dirty the data - * anyway again before the next pread happens. - */ - if (!obj->cache_dirty && - !(obj->read_domains & I915_GEM_DOMAIN_CPU)) - *needs_clflush = CLFLUSH_BEFORE; - -out: - /* return with the pages pinned */ - return 0; - -err_unpin: - i915_gem_object_unpin_pages(obj); - return ret; -} - -int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, - unsigned int *needs_clflush) -{ - int ret; - - lockdep_assert_held(&obj->base.dev->struct_mutex); - - *needs_clflush = 0; - if (!i915_gem_object_has_struct_page(obj)) - return -ENODEV; - - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | - I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT); - if (ret) - return ret; - - ret = i915_gem_object_pin_pages(obj); - if (ret) - return ret; - - if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE || - !static_cpu_has(X86_FEATURE_CLFLUSH)) { - ret = i915_gem_object_set_to_cpu_domain(obj, true); - if (ret) - goto err_unpin; - else - goto out; - } - - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); - - /* If we're not in the cpu write domain, set ourself into the - * gtt write domain and manually flush cachelines (as required). - * This optimizes for the case when the gpu will use the data - * right away and we therefore have to clflush anyway. - */ - if (!obj->cache_dirty) { - *needs_clflush |= CLFLUSH_AFTER; - - /* - * Same trick applies to invalidate partially written - * cachelines read before writing. - */ - if (!(obj->read_domains & I915_GEM_DOMAIN_CPU)) - *needs_clflush |= CLFLUSH_BEFORE; - } - -out: - intel_fb_obj_invalidate(obj, ORIGIN_CPU); - obj->mm.dirty = true; - /* return with the pages pinned */ - return 0; - -err_unpin: - i915_gem_object_unpin_pages(obj); - return ret; -} - static int shmem_pread(struct page *page, int offset, int len, char __user *user_data, bool needs_clflush) @@ -612,7 +495,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, if (ret) return ret; - ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); + ret = i915_gem_object_prepare_read(obj, &needs_clflush); mutex_unlock(&obj->base.dev->struct_mutex); if (ret) return ret; @@ -634,7 +517,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, offset = 0; } - i915_gem_obj_finish_shmem_access(obj); + i915_gem_object_finish_access(obj); return ret; } @@ -1009,7 +892,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, if (ret) return ret; - ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); + ret = i915_gem_object_prepare_write(obj, &needs_clflush); mutex_unlock(&i915->drm.struct_mutex); if (ret) return ret; @@ -1041,7 +924,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, } intel_fb_obj_flush(obj, ORIGIN_CPU); - i915_gem_obj_finish_shmem_access(obj); + i915_gem_object_finish_access(obj); return ret; } @@ -1130,150 +1013,6 @@ err: return ret; } -static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct list_head *list; - struct i915_vma *vma; - - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - - mutex_lock(&i915->ggtt.vm.mutex); - for_each_ggtt_vma(vma, obj) { - if (!drm_mm_node_allocated(&vma->node)) - continue; - - list_move_tail(&vma->vm_link, &vma->vm->bound_list); - } - mutex_unlock(&i915->ggtt.vm.mutex); - - spin_lock(&i915->mm.obj_lock); - list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list; - list_move_tail(&obj->mm.link, list); - spin_unlock(&i915->mm.obj_lock); -} - -static inline enum fb_op_origin -fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain) -{ - return (domain == I915_GEM_DOMAIN_GTT ? - obj->frontbuffer_ggtt_origin : ORIGIN_CPU); -} - -/** - * Called when user space prepares to use an object with the CPU, either - * through the mmap ioctl's mapping or a GTT mapping. - * @dev: drm device - * @data: ioctl data blob - * @file: drm file - */ -int -i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_gem_set_domain *args = data; - struct drm_i915_gem_object *obj; - u32 read_domains = args->read_domains; - u32 write_domain = args->write_domain; - int err; - - /* Only handle setting domains to types used by the CPU. */ - if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS) - return -EINVAL; - - /* - * Having something in the write domain implies it's in the read - * domain, and only that read domain. Enforce that in the request. - */ - if (write_domain && read_domains != write_domain) - return -EINVAL; - - if (!read_domains) - return 0; - - obj = i915_gem_object_lookup(file, args->handle); - if (!obj) - return -ENOENT; - - /* - * Already in the desired write domain? Nothing for us to do! - * - * We apply a little bit of cunning here to catch a broader set of - * no-ops. If obj->write_domain is set, we must be in the same - * obj->read_domains, and only that domain. Therefore, if that - * obj->write_domain matches the request read_domains, we are - * already in the same read/write domain and can skip the operation, - * without having to further check the requested write_domain. - */ - if (READ_ONCE(obj->write_domain) == read_domains) { - err = 0; - goto out; - } - - /* - * Try to flush the object off the GPU without holding the lock. - * We will repeat the flush holding the lock in the normal manner - * to catch cases where we are gazumped. - */ - err = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_PRIORITY | - (write_domain ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT); - if (err) - goto out; - - /* - * Proxy objects do not control access to the backing storage, ergo - * they cannot be used as a means to manipulate the cache domain - * tracking for that backing storage. The proxy object is always - * considered to be outside of any cache domain. - */ - if (i915_gem_object_is_proxy(obj)) { - err = -ENXIO; - goto out; - } - - /* - * Flush and acquire obj->pages so that we are coherent through - * direct access in memory with previous cached writes through - * shmemfs and that our cache domain tracking remains valid. - * For example, if the obj->filp was moved to swap without us - * being notified and releasing the pages, we would mistakenly - * continue to assume that the obj remained out of the CPU cached - * domain. - */ - err = i915_gem_object_pin_pages(obj); - if (err) - goto out; - - err = i915_mutex_lock_interruptible(dev); - if (err) - goto out_unpin; - - if (read_domains & I915_GEM_DOMAIN_WC) - err = i915_gem_object_set_to_wc_domain(obj, write_domain); - else if (read_domains & I915_GEM_DOMAIN_GTT) - err = i915_gem_object_set_to_gtt_domain(obj, write_domain); - else - err = i915_gem_object_set_to_cpu_domain(obj, write_domain); - - /* And bump the LRU for this access */ - i915_gem_object_bump_inactive_ggtt(obj); - - mutex_unlock(&dev->struct_mutex); - - if (write_domain != 0) - intel_fb_obj_invalidate(obj, - fb_write_origin(obj, write_domain)); - -out_unpin: - i915_gem_object_unpin_pages(obj); -out: - i915_gem_object_put(obj); - return err; -} - /** * Called when user space has done writes to this buffer * @dev: drm device @@ -1542,514 +1281,6 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, return 0; } -static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) -{ - /* - * We manually flush the CPU domain so that we can override and - * force the flush for the display, and perform it asyncrhonously. - */ - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); - if (obj->cache_dirty) - i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); - obj->write_domain = 0; -} - -void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) -{ - if (!READ_ONCE(obj->pin_global)) - return; - - mutex_lock(&obj->base.dev->struct_mutex); - __i915_gem_object_flush_for_display(obj); - mutex_unlock(&obj->base.dev->struct_mutex); -} - -/** - * Moves a single object to the WC read, and possibly write domain. - * @obj: object to act on - * @write: ask for write access or read only - * - * This function returns when the move is complete, including waiting on - * flushes to occur. - */ -int -i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) -{ - int ret; - - lockdep_assert_held(&obj->base.dev->struct_mutex); - - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | - (write ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT); - if (ret) - return ret; - - if (obj->write_domain == I915_GEM_DOMAIN_WC) - return 0; - - /* Flush and acquire obj->pages so that we are coherent through - * direct access in memory with previous cached writes through - * shmemfs and that our cache domain tracking remains valid. - * For example, if the obj->filp was moved to swap without us - * being notified and releasing the pages, we would mistakenly - * continue to assume that the obj remained out of the CPU cached - * domain. - */ - ret = i915_gem_object_pin_pages(obj); - if (ret) - return ret; - - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC); - - /* Serialise direct access to this object with the barriers for - * coherent writes from the GPU, by effectively invalidating the - * WC domain upon first access. - */ - if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0) - mb(); - - /* It should now be out of any other write domains, and we can update - * the domain values for our changes. - */ - GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0); - obj->read_domains |= I915_GEM_DOMAIN_WC; - if (write) { - obj->read_domains = I915_GEM_DOMAIN_WC; - obj->write_domain = I915_GEM_DOMAIN_WC; - obj->mm.dirty = true; - } - - i915_gem_object_unpin_pages(obj); - return 0; -} - -/** - * Moves a single object to the GTT read, and possibly write domain. - * @obj: object to act on - * @write: ask for write access or read only - * - * This function returns when the move is complete, including waiting on - * flushes to occur. - */ -int -i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) -{ - int ret; - - lockdep_assert_held(&obj->base.dev->struct_mutex); - - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | - (write ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT); - if (ret) - return ret; - - if (obj->write_domain == I915_GEM_DOMAIN_GTT) - return 0; - - /* Flush and acquire obj->pages so that we are coherent through - * direct access in memory with previous cached writes through - * shmemfs and that our cache domain tracking remains valid. - * For example, if the obj->filp was moved to swap without us - * being notified and releasing the pages, we would mistakenly - * continue to assume that the obj remained out of the CPU cached - * domain. - */ - ret = i915_gem_object_pin_pages(obj); - if (ret) - return ret; - - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT); - - /* Serialise direct access to this object with the barriers for - * coherent writes from the GPU, by effectively invalidating the - * GTT domain upon first access. - */ - if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0) - mb(); - - /* It should now be out of any other write domains, and we can update - * the domain values for our changes. - */ - GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); - obj->read_domains |= I915_GEM_DOMAIN_GTT; - if (write) { - obj->read_domains = I915_GEM_DOMAIN_GTT; - obj->write_domain = I915_GEM_DOMAIN_GTT; - obj->mm.dirty = true; - } - - i915_gem_object_unpin_pages(obj); - return 0; -} - -/** - * Changes the cache-level of an object across all VMA. - * @obj: object to act on - * @cache_level: new cache level to set for the object - * - * After this function returns, the object will be in the new cache-level - * across all GTT and the contents of the backing storage will be coherent, - * with respect to the new cache-level. In order to keep the backing storage - * coherent for all users, we only allow a single cache level to be set - * globally on the object and prevent it from being changed whilst the - * hardware is reading from the object. That is if the object is currently - * on the scanout it will be set to uncached (or equivalent display - * cache coherency) and all non-MOCS GPU access will also be uncached so - * that all direct access to the scanout remains coherent. - */ -int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, - enum i915_cache_level cache_level) -{ - struct i915_vma *vma; - int ret; - - lockdep_assert_held(&obj->base.dev->struct_mutex); - - if (obj->cache_level == cache_level) - return 0; - - /* Inspect the list of currently bound VMA and unbind any that would - * be invalid given the new cache-level. This is principally to - * catch the issue of the CS prefetch crossing page boundaries and - * reading an invalid PTE on older architectures. - */ -restart: - list_for_each_entry(vma, &obj->vma.list, obj_link) { - if (!drm_mm_node_allocated(&vma->node)) - continue; - - if (i915_vma_is_pinned(vma)) { - DRM_DEBUG("can not change the cache level of pinned objects\n"); - return -EBUSY; - } - - if (!i915_vma_is_closed(vma) && - i915_gem_valid_gtt_space(vma, cache_level)) - continue; - - ret = i915_vma_unbind(vma); - if (ret) - return ret; - - /* As unbinding may affect other elements in the - * obj->vma_list (due to side-effects from retiring - * an active vma), play safe and restart the iterator. - */ - goto restart; - } - - /* We can reuse the existing drm_mm nodes but need to change the - * cache-level on the PTE. We could simply unbind them all and - * rebind with the correct cache-level on next use. However since - * we already have a valid slot, dma mapping, pages etc, we may as - * rewrite the PTE in the belief that doing so tramples upon less - * state and so involves less work. - */ - if (obj->bind_count) { - /* Before we change the PTE, the GPU must not be accessing it. - * If we wait upon the object, we know that all the bound - * VMA are no longer active. - */ - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | - I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT); - if (ret) - return ret; - - if (!HAS_LLC(to_i915(obj->base.dev)) && - cache_level != I915_CACHE_NONE) { - /* Access to snoopable pages through the GTT is - * incoherent and on some machines causes a hard - * lockup. Relinquish the CPU mmaping to force - * userspace to refault in the pages and we can - * then double check if the GTT mapping is still - * valid for that pointer access. - */ - i915_gem_object_release_mmap(obj); - - /* As we no longer need a fence for GTT access, - * we can relinquish it now (and so prevent having - * to steal a fence from someone else on the next - * fence request). Note GPU activity would have - * dropped the fence as all snoopable access is - * supposed to be linear. - */ - for_each_ggtt_vma(vma, obj) { - ret = i915_vma_put_fence(vma); - if (ret) - return ret; - } - } else { - /* We either have incoherent backing store and - * so no GTT access or the architecture is fully - * coherent. In such cases, existing GTT mmaps - * ignore the cache bit in the PTE and we can - * rewrite it without confusing the GPU or having - * to force userspace to fault back in its mmaps. - */ - } - - list_for_each_entry(vma, &obj->vma.list, obj_link) { - if (!drm_mm_node_allocated(&vma->node)) - continue; - - ret = i915_vma_bind(vma, cache_level, PIN_UPDATE); - if (ret) - return ret; - } - } - - list_for_each_entry(vma, &obj->vma.list, obj_link) - vma->node.color = cache_level; - i915_gem_object_set_cache_coherency(obj, cache_level); - obj->cache_dirty = true; /* Always invalidate stale cachelines */ - - return 0; -} - -int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_gem_caching *args = data; - struct drm_i915_gem_object *obj; - int err = 0; - - rcu_read_lock(); - obj = i915_gem_object_lookup_rcu(file, args->handle); - if (!obj) { - err = -ENOENT; - goto out; - } - - switch (obj->cache_level) { - case I915_CACHE_LLC: - case I915_CACHE_L3_LLC: - args->caching = I915_CACHING_CACHED; - break; - - case I915_CACHE_WT: - args->caching = I915_CACHING_DISPLAY; - break; - - default: - args->caching = I915_CACHING_NONE; - break; - } -out: - rcu_read_unlock(); - return err; -} - -int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_private *i915 = to_i915(dev); - struct drm_i915_gem_caching *args = data; - struct drm_i915_gem_object *obj; - enum i915_cache_level level; - int ret = 0; - - switch (args->caching) { - case I915_CACHING_NONE: - level = I915_CACHE_NONE; - break; - case I915_CACHING_CACHED: - /* - * Due to a HW issue on BXT A stepping, GPU stores via a - * snooped mapping may leave stale data in a corresponding CPU - * cacheline, whereas normally such cachelines would get - * invalidated. - */ - if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) - return -ENODEV; - - level = I915_CACHE_LLC; - break; - case I915_CACHING_DISPLAY: - level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE; - break; - default: - return -EINVAL; - } - - obj = i915_gem_object_lookup(file, args->handle); - if (!obj) - return -ENOENT; - - /* - * The caching mode of proxy object is handled by its generator, and - * not allowed to be changed by userspace. - */ - if (i915_gem_object_is_proxy(obj)) { - ret = -ENXIO; - goto out; - } - - if (obj->cache_level == level) - goto out; - - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT); - if (ret) - goto out; - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto out; - - ret = i915_gem_object_set_cache_level(obj, level); - mutex_unlock(&dev->struct_mutex); - -out: - i915_gem_object_put(obj); - return ret; -} - -/* - * Prepare buffer for display plane (scanout, cursors, etc). Can be called from - * an uninterruptible phase (modesetting) and allows any flushes to be pipelined - * (for pageflips). We only flush the caches while preparing the buffer for - * display, the callers are responsible for frontbuffer flush. - */ -struct i915_vma * -i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, - u32 alignment, - const struct i915_ggtt_view *view, - unsigned int flags) -{ - struct i915_vma *vma; - int ret; - - lockdep_assert_held(&obj->base.dev->struct_mutex); - - /* Mark the global pin early so that we account for the - * display coherency whilst setting up the cache domains. - */ - obj->pin_global++; - - /* The display engine is not coherent with the LLC cache on gen6. As - * a result, we make sure that the pinning that is about to occur is - * done with uncached PTEs. This is lowest common denominator for all - * chipsets. - * - * However for gen6+, we could do better by using the GFDT bit instead - * of uncaching, which would allow us to flush all the LLC-cached data - * with that bit in the PTE to main memory with just one PIPE_CONTROL. - */ - ret = i915_gem_object_set_cache_level(obj, - HAS_WT(to_i915(obj->base.dev)) ? - I915_CACHE_WT : I915_CACHE_NONE); - if (ret) { - vma = ERR_PTR(ret); - goto err_unpin_global; - } - - /* As the user may map the buffer once pinned in the display plane - * (e.g. libkms for the bootup splash), we have to ensure that we - * always use map_and_fenceable for all scanout buffers. However, - * it may simply be too big to fit into mappable, in which case - * put it anyway and hope that userspace can cope (but always first - * try to preserve the existing ABI). - */ - vma = ERR_PTR(-ENOSPC); - if ((flags & PIN_MAPPABLE) == 0 && - (!view || view->type == I915_GGTT_VIEW_NORMAL)) - vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, - flags | - PIN_MAPPABLE | - PIN_NONBLOCK); - if (IS_ERR(vma)) - vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); - if (IS_ERR(vma)) - goto err_unpin_global; - - vma->display_alignment = max_t(u64, vma->display_alignment, alignment); - - __i915_gem_object_flush_for_display(obj); - - /* It should now be out of any other write domains, and we can update - * the domain values for our changes. - */ - obj->read_domains |= I915_GEM_DOMAIN_GTT; - - return vma; - -err_unpin_global: - obj->pin_global--; - return vma; -} - -void -i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) -{ - lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); - - if (WARN_ON(vma->obj->pin_global == 0)) - return; - - if (--vma->obj->pin_global == 0) - vma->display_alignment = I915_GTT_MIN_ALIGNMENT; - - /* Bump the LRU to try and avoid premature eviction whilst flipping */ - i915_gem_object_bump_inactive_ggtt(vma->obj); - - i915_vma_unpin(vma); -} - -/** - * Moves a single object to the CPU read, and possibly write domain. - * @obj: object to act on - * @write: requesting write or read-only access - * - * This function returns when the move is complete, including waiting on - * flushes to occur. - */ -int -i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) -{ - int ret; - - lockdep_assert_held(&obj->base.dev->struct_mutex); - - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | - (write ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT); - if (ret) - return ret; - - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); - - /* Flush the CPU cache if it's still invalid. */ - if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { - i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); - obj->read_domains |= I915_GEM_DOMAIN_CPU; - } - - /* It should now be out of any other write domains, and we can update - * the domain values for our changes. - */ - GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU); - - /* If we're writing through the CPU, then the GPU read domains will - * need to be invalidated at next use. - */ - if (write) - __start_cpu_write(obj); - - return 0; -} - /* Throttle our rendering by waiting until the ring has completed our requests * emitted over 20 msec ago. * diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 908fddcc57c3..699f3f180d8a 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1026,7 +1026,7 @@ static void reloc_cache_reset(struct reloc_cache *cache) mb(); kunmap_atomic(vaddr); - i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm); + i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm); } else { wmb(); io_mapping_unmap_atomic((void __iomem *)vaddr); @@ -1058,7 +1058,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj, unsigned int flushes; int err; - err = i915_gem_obj_prepare_shmem_write(obj, &flushes); + err = i915_gem_object_prepare_write(obj, &flushes); if (err) return ERR_PTR(err); diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 9440024c763f..f3b42b026fff 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -84,7 +84,7 @@ static int render_state_setup(struct intel_render_state *so, u32 *d; int ret; - ret = i915_gem_obj_prepare_shmem_write(so->obj, &needs_clflush); + ret = i915_gem_object_prepare_write(so->obj, &needs_clflush); if (ret) return ret; @@ -166,7 +166,7 @@ static int render_state_setup(struct intel_render_state *so, ret = 0; out: - i915_gem_obj_finish_shmem_access(so->obj); + i915_gem_object_finish_access(so->obj); return ret; err: diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index ce4ec87698f6..b22b8249dfbd 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -1017,7 +1017,7 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) unsigned long n; int err; - err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush); + err = i915_gem_object_prepare_read(obj, &needs_flush); if (err) return err; @@ -1038,7 +1038,7 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) kunmap_atomic(ptr); } - i915_gem_obj_finish_shmem_access(obj); + i915_gem_object_finish_access(obj); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index 046a38743152..cb25b5fc8027 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -37,7 +37,7 @@ static int cpu_set(struct drm_i915_gem_object *obj, u32 *cpu; int err; - err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); + err = i915_gem_object_prepare_write(obj, &needs_clflush); if (err) return err; @@ -54,7 +54,7 @@ static int cpu_set(struct drm_i915_gem_object *obj, drm_clflush_virt_range(cpu, sizeof(*cpu)); kunmap_atomic(map); - i915_gem_obj_finish_shmem_access(obj); + i915_gem_object_finish_access(obj); return 0; } @@ -69,7 +69,7 @@ static int cpu_get(struct drm_i915_gem_object *obj, u32 *cpu; int err; - err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); + err = i915_gem_object_prepare_read(obj, &needs_clflush); if (err) return err; @@ -83,7 +83,7 @@ static int cpu_get(struct drm_i915_gem_object *obj, *v = *cpu; kunmap_atomic(map); - i915_gem_obj_finish_shmem_access(obj); + i915_gem_object_finish_access(obj); return 0; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 34ac5cc6d59f..c69c6d9a998b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -354,7 +354,7 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value) unsigned int n, m, need_flush; int err; - err = i915_gem_obj_prepare_shmem_write(obj, &need_flush); + err = i915_gem_object_prepare_write(obj, &need_flush); if (err) return err; @@ -369,7 +369,7 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value) kunmap_atomic(map); } - i915_gem_obj_finish_shmem_access(obj); + i915_gem_object_finish_access(obj); obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU; obj->write_domain = 0; return 0; @@ -381,7 +381,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj, unsigned int n, m, needs_flush; int err; - err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush); + err = i915_gem_object_prepare_read(obj, &needs_flush); if (err) return err; @@ -419,7 +419,7 @@ out_unmap: break; } - i915_gem_obj_finish_shmem_access(obj); + i915_gem_object_finish_access(obj); return err; } -- cgit v1.2.3 From 10be98a77c558f8cfb823cd2777171fbb35040f6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:49 +0100 Subject: drm/i915: Move more GEM objects under gem/ Continuing the theme of separating out the GEM clutter. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-8-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 26 +- drivers/gpu/drm/i915/Makefile.header-test | 2 - drivers/gpu/drm/i915/gem/i915_gem_clflush.c | 160 ++ drivers/gpu/drm/i915/gem/i915_gem_clflush.h | 20 + drivers/gpu/drm/i915/gem/i915_gem_context.c | 2453 +++++++++++++++++ drivers/gpu/drm/i915/gem/i915_gem_context.h | 240 ++ drivers/gpu/drm/i915/gem/i915_gem_context_types.h | 208 ++ drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 318 +++ drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 2768 +++++++++++++++++++ drivers/gpu/drm/i915/gem/i915_gem_internal.c | 197 ++ drivers/gpu/drm/i915/gem/i915_gem_object.c | 10 +- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 251 ++ drivers/gpu/drm/i915/gem/i915_gem_pm.h | 25 + drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 555 ++++ drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 704 +++++ drivers/gpu/drm/i915/gem/i915_gem_tiling.c | 440 +++ drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 832 ++++++ drivers/gpu/drm/i915/gem/i915_gemfs.c | 57 + drivers/gpu/drm/i915/gem/i915_gemfs.h | 16 + .../gpu/drm/i915/gem/selftests/huge_gem_object.c | 121 + .../gpu/drm/i915/gem/selftests/huge_gem_object.h | 27 + drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 1780 +++++++++++++ .../drm/i915/gem/selftests/i915_gem_coherency.c | 379 +++ .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 1736 ++++++++++++ .../gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c | 386 +++ drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 2 +- .../gpu/drm/i915/gem/selftests/i915_gem_object.c | 99 + drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c | 34 + drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h | 17 + drivers/gpu/drm/i915/gem/selftests/mock_context.c | 111 + drivers/gpu/drm/i915/gem/selftests/mock_context.h | 24 + drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c | 144 + drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h | 22 + .../gpu/drm/i915/gem/selftests/mock_gem_object.h | 14 + drivers/gpu/drm/i915/gt/intel_context.c | 4 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 3 + drivers/gpu/drm/i915/gt/intel_lrc.c | 2 + drivers/gpu/drm/i915/gt/intel_lrc.h | 14 +- drivers/gpu/drm/i915/gt/intel_reset.c | 2 + drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 3 + drivers/gpu/drm/i915/gt/intel_workarounds.c | 1 + drivers/gpu/drm/i915/gt/mock_engine.c | 3 +- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 6 +- drivers/gpu/drm/i915/gt/selftest_lrc.c | 7 +- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 6 +- drivers/gpu/drm/i915/gvt/mmio_context.c | 1 + drivers/gpu/drm/i915/gvt/scheduler.c | 5 +- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 11 +- drivers/gpu/drm/i915/i915_gem_clflush.c | 178 -- drivers/gpu/drm/i915/i915_gem_clflush.h | 36 - drivers/gpu/drm/i915/i915_gem_context.c | 2474 ----------------- drivers/gpu/drm/i915/i915_gem_context.h | 258 -- drivers/gpu/drm/i915/i915_gem_context_types.h | 208 -- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 337 --- drivers/gpu/drm/i915/i915_gem_evict.c | 2 + drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2788 -------------------- drivers/gpu/drm/i915/i915_gem_internal.c | 207 -- drivers/gpu/drm/i915/i915_gem_pm.c | 251 -- drivers/gpu/drm/i915/i915_gem_pm.h | 25 - drivers/gpu/drm/i915/i915_gem_shrinker.c | 574 ---- drivers/gpu/drm/i915/i915_gem_stolen.c | 721 ----- drivers/gpu/drm/i915/i915_gem_tiling.c | 460 ---- drivers/gpu/drm/i915/i915_gem_userptr.c | 851 ------ drivers/gpu/drm/i915/i915_gemfs.c | 75 - drivers/gpu/drm/i915/i915_gemfs.h | 34 - drivers/gpu/drm/i915/i915_globals.c | 2 +- drivers/gpu/drm/i915/i915_gpu_error.c | 2 + drivers/gpu/drm/i915/i915_perf.c | 2 + drivers/gpu/drm/i915/i915_request.c | 3 + drivers/gpu/drm/i915/intel_display.c | 1 - drivers/gpu/drm/i915/intel_guc_submission.c | 2 + drivers/gpu/drm/i915/intel_overlay.c | 2 + drivers/gpu/drm/i915/selftests/huge_gem_object.c | 139 - drivers/gpu/drm/i915/selftests/huge_gem_object.h | 45 - drivers/gpu/drm/i915/selftests/huge_pages.c | 1793 ------------- drivers/gpu/drm/i915/selftests/i915_active.c | 4 +- drivers/gpu/drm/i915/selftests/i915_gem.c | 8 +- .../gpu/drm/i915/selftests/i915_gem_coherency.c | 397 --- drivers/gpu/drm/i915/selftests/i915_gem_context.c | 1752 ------------ drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c | 404 --- drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 8 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 5 +- drivers/gpu/drm/i915/selftests/i915_gem_object.c | 117 - drivers/gpu/drm/i915/selftests/i915_request.c | 6 +- drivers/gpu/drm/i915/selftests/i915_timeline.c | 4 +- drivers/gpu/drm/i915/selftests/i915_vma.c | 5 +- drivers/gpu/drm/i915/selftests/igt_flush_test.c | 6 +- drivers/gpu/drm/i915/selftests/igt_gem_utils.c | 34 - drivers/gpu/drm/i915/selftests/igt_gem_utils.h | 17 - drivers/gpu/drm/i915/selftests/igt_spinner.c | 3 +- drivers/gpu/drm/i915/selftests/igt_spinner.h | 9 +- drivers/gpu/drm/i915/selftests/intel_guc.c | 3 +- drivers/gpu/drm/i915/selftests/mock_context.c | 129 - drivers/gpu/drm/i915/selftests/mock_context.h | 42 - drivers/gpu/drm/i915/selftests/mock_dmabuf.c | 162 -- drivers/gpu/drm/i915/selftests/mock_dmabuf.h | 41 - drivers/gpu/drm/i915/selftests/mock_gem_device.c | 5 +- drivers/gpu/drm/i915/selftests/mock_gem_object.h | 9 - drivers/gpu/drm/i915/selftests/mock_request.c | 2 +- 102 files changed, 14267 insertions(+), 14626 deletions(-) create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_clflush.c create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_clflush.h create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_context.c create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_context.h create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_context_types.h create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_internal.c create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_pm.c create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_pm.h create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_stolen.c create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_tiling.c create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_userptr.c create mode 100644 drivers/gpu/drm/i915/gem/i915_gemfs.c create mode 100644 drivers/gpu/drm/i915/gem/i915_gemfs.h create mode 100644 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c create mode 100644 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h create mode 100644 drivers/gpu/drm/i915/gem/selftests/huge_pages.c create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c create mode 100644 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c create mode 100644 drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h create mode 100644 drivers/gpu/drm/i915/gem/selftests/mock_context.c create mode 100644 drivers/gpu/drm/i915/gem/selftests/mock_context.h create mode 100644 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c create mode 100644 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h create mode 100644 drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h delete mode 100644 drivers/gpu/drm/i915/i915_gem_clflush.c delete mode 100644 drivers/gpu/drm/i915/i915_gem_clflush.h delete mode 100644 drivers/gpu/drm/i915/i915_gem_context.c delete mode 100644 drivers/gpu/drm/i915/i915_gem_context.h delete mode 100644 drivers/gpu/drm/i915/i915_gem_context_types.h delete mode 100644 drivers/gpu/drm/i915/i915_gem_dmabuf.c delete mode 100644 drivers/gpu/drm/i915/i915_gem_execbuffer.c delete mode 100644 drivers/gpu/drm/i915/i915_gem_internal.c delete mode 100644 drivers/gpu/drm/i915/i915_gem_pm.c delete mode 100644 drivers/gpu/drm/i915/i915_gem_pm.h delete mode 100644 drivers/gpu/drm/i915/i915_gem_shrinker.c delete mode 100644 drivers/gpu/drm/i915/i915_gem_stolen.c delete mode 100644 drivers/gpu/drm/i915/i915_gem_tiling.c delete mode 100644 drivers/gpu/drm/i915/i915_gem_userptr.c delete mode 100644 drivers/gpu/drm/i915/i915_gemfs.c delete mode 100644 drivers/gpu/drm/i915/i915_gemfs.h delete mode 100644 drivers/gpu/drm/i915/selftests/huge_gem_object.c delete mode 100644 drivers/gpu/drm/i915/selftests/huge_gem_object.h delete mode 100644 drivers/gpu/drm/i915/selftests/huge_pages.c delete mode 100644 drivers/gpu/drm/i915/selftests/i915_gem_coherency.c delete mode 100644 drivers/gpu/drm/i915/selftests/i915_gem_context.c delete mode 100644 drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c delete mode 100644 drivers/gpu/drm/i915/selftests/i915_gem_object.c delete mode 100644 drivers/gpu/drm/i915/selftests/igt_gem_utils.c delete mode 100644 drivers/gpu/drm/i915/selftests/igt_gem_utils.h delete mode 100644 drivers/gpu/drm/i915/selftests/mock_context.c delete mode 100644 drivers/gpu/drm/i915/selftests/mock_context.h delete mode 100644 drivers/gpu/drm/i915/selftests/mock_dmabuf.c delete mode 100644 drivers/gpu/drm/i915/selftests/mock_dmabuf.h delete mode 100644 drivers/gpu/drm/i915/selftests/mock_gem_object.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 5ffd7e9b19ad..3f3d378f467d 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -87,33 +87,33 @@ i915-y += $(gt-y) # GEM (Graphics Execution Management) code obj-y += gem/ gem-y += \ + gem/i915_gem_clflush.o \ + gem/i915_gem_context.o \ + gem/i915_gem_dmabuf.o \ gem/i915_gem_domain.o \ + gem/i915_gem_execbuffer.o \ + gem/i915_gem_internal.o \ gem/i915_gem_object.o \ gem/i915_gem_mman.o \ gem/i915_gem_pages.o \ gem/i915_gem_phys.o \ - gem/i915_gem_shmem.o + gem/i915_gem_pm.o \ + gem/i915_gem_shmem.o \ + gem/i915_gem_shrinker.o \ + gem/i915_gem_stolen.o \ + gem/i915_gem_tiling.o \ + gem/i915_gem_userptr.o \ + gem/i915_gemfs.o i915-y += \ $(gem-y) \ i915_active.o \ i915_cmd_parser.o \ i915_gem_batch_pool.o \ - i915_gem_clflush.o \ - i915_gem_context.o \ - i915_gem_dmabuf.o \ i915_gem_evict.o \ - i915_gem_execbuffer.o \ i915_gem_fence_reg.o \ i915_gem_gtt.o \ - i915_gem_internal.o \ i915_gem.o \ - i915_gem_pm.o \ i915_gem_render_state.o \ - i915_gem_shrinker.o \ - i915_gem_stolen.o \ - i915_gem_tiling.o \ - i915_gem_userptr.o \ - i915_gemfs.o \ i915_globals.o \ i915_query.o \ i915_request.o \ @@ -199,10 +199,10 @@ i915-y += dvo_ch7017.o \ # Post-mortem debug and GPU hang state capture i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o i915-$(CONFIG_DRM_I915_SELFTEST) += \ + gem/selftests/igt_gem_utils.o \ selftests/i915_random.o \ selftests/i915_selftest.o \ selftests/igt_flush_test.o \ - selftests/igt_gem_utils.o \ selftests/igt_live_test.o \ selftests/igt_reset.o \ selftests/igt_spinner.o diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index 3a9663002d4a..e01cd91dc1c8 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -6,8 +6,6 @@ header_test := \ i915_active_types.h \ i915_debugfs.h \ i915_drv.h \ - i915_gem_context_types.h \ - i915_gem_pm.h \ i915_irq.h \ i915_params.h \ i915_priolist_types.h \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c new file mode 100644 index 000000000000..45d238d784fc --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_gem_clflush.h" +#include "intel_frontbuffer.h" + +static DEFINE_SPINLOCK(clflush_lock); + +struct clflush { + struct dma_fence dma; /* Must be first for dma_fence_free() */ + struct i915_sw_fence wait; + struct work_struct work; + struct drm_i915_gem_object *obj; +}; + +static const char *i915_clflush_get_driver_name(struct dma_fence *fence) +{ + return DRIVER_NAME; +} + +static const char *i915_clflush_get_timeline_name(struct dma_fence *fence) +{ + return "clflush"; +} + +static void i915_clflush_release(struct dma_fence *fence) +{ + struct clflush *clflush = container_of(fence, typeof(*clflush), dma); + + i915_sw_fence_fini(&clflush->wait); + + BUILD_BUG_ON(offsetof(typeof(*clflush), dma)); + dma_fence_free(&clflush->dma); +} + +static const struct dma_fence_ops i915_clflush_ops = { + .get_driver_name = i915_clflush_get_driver_name, + .get_timeline_name = i915_clflush_get_timeline_name, + .release = i915_clflush_release, +}; + +static void __i915_do_clflush(struct drm_i915_gem_object *obj) +{ + GEM_BUG_ON(!i915_gem_object_has_pages(obj)); + drm_clflush_sg(obj->mm.pages); + intel_fb_obj_flush(obj, ORIGIN_CPU); +} + +static void i915_clflush_work(struct work_struct *work) +{ + struct clflush *clflush = container_of(work, typeof(*clflush), work); + struct drm_i915_gem_object *obj = clflush->obj; + + if (i915_gem_object_pin_pages(obj)) { + DRM_ERROR("Failed to acquire obj->pages for clflushing\n"); + goto out; + } + + __i915_do_clflush(obj); + + i915_gem_object_unpin_pages(obj); + +out: + i915_gem_object_put(obj); + + dma_fence_signal(&clflush->dma); + dma_fence_put(&clflush->dma); +} + +static int __i915_sw_fence_call +i915_clflush_notify(struct i915_sw_fence *fence, + enum i915_sw_fence_notify state) +{ + struct clflush *clflush = container_of(fence, typeof(*clflush), wait); + + switch (state) { + case FENCE_COMPLETE: + schedule_work(&clflush->work); + break; + + case FENCE_FREE: + dma_fence_put(&clflush->dma); + break; + } + + return NOTIFY_DONE; +} + +bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, + unsigned int flags) +{ + struct clflush *clflush; + + /* + * Stolen memory is always coherent with the GPU as it is explicitly + * marked as wc by the system, or the system is cache-coherent. + * Similarly, we only access struct pages through the CPU cache, so + * anything not backed by physical memory we consider to be always + * coherent and not need clflushing. + */ + if (!i915_gem_object_has_struct_page(obj)) { + obj->cache_dirty = false; + return false; + } + + /* If the GPU is snooping the contents of the CPU cache, + * we do not need to manually clear the CPU cache lines. However, + * the caches are only snooped when the render cache is + * flushed/invalidated. As we always have to emit invalidations + * and flushes when moving into and out of the RENDER domain, correct + * snooping behaviour occurs naturally as the result of our domain + * tracking. + */ + if (!(flags & I915_CLFLUSH_FORCE) && + obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ) + return false; + + trace_i915_gem_object_clflush(obj); + + clflush = NULL; + if (!(flags & I915_CLFLUSH_SYNC)) + clflush = kmalloc(sizeof(*clflush), GFP_KERNEL); + if (clflush) { + GEM_BUG_ON(!obj->cache_dirty); + + dma_fence_init(&clflush->dma, + &i915_clflush_ops, + &clflush_lock, + to_i915(obj->base.dev)->mm.unordered_timeline, + 0); + i915_sw_fence_init(&clflush->wait, i915_clflush_notify); + + clflush->obj = i915_gem_object_get(obj); + INIT_WORK(&clflush->work, i915_clflush_work); + + dma_fence_get(&clflush->dma); + + i915_sw_fence_await_reservation(&clflush->wait, + obj->resv, NULL, + true, I915_FENCE_TIMEOUT, + I915_FENCE_GFP); + + reservation_object_lock(obj->resv, NULL); + reservation_object_add_excl_fence(obj->resv, &clflush->dma); + reservation_object_unlock(obj->resv); + + i915_sw_fence_commit(&clflush->wait); + } else if (obj->mm.pages) { + __i915_do_clflush(obj); + } else { + GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); + } + + obj->cache_dirty = false; + return true; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.h b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h new file mode 100644 index 000000000000..e6c382973129 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.h @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#ifndef __I915_GEM_CLFLUSH_H__ +#define __I915_GEM_CLFLUSH_H__ + +#include + +struct drm_i915_private; +struct drm_i915_gem_object; + +bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, + unsigned int flags); +#define I915_CLFLUSH_FORCE BIT(0) +#define I915_CLFLUSH_SYNC BIT(1) + +#endif /* __I915_GEM_CLFLUSH_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c new file mode 100644 index 000000000000..5dcdf6540f43 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -0,0 +1,2453 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2011-2012 Intel Corporation + */ + +/* + * This file implements HW context support. On gen5+ a HW context consists of an + * opaque GPU object which is referenced at times of context saves and restores. + * With RC6 enabled, the context is also referenced as the GPU enters and exists + * from RC6 (GPU has it's own internal power context, except on gen5). Though + * something like a context does exist for the media ring, the code only + * supports contexts for the render ring. + * + * In software, there is a distinction between contexts created by the user, + * and the default HW context. The default HW context is used by GPU clients + * that do not request setup of their own hardware context. The default + * context's state is never restored to help prevent programming errors. This + * would happen if a client ran and piggy-backed off another clients GPU state. + * The default context only exists to give the GPU some offset to load as the + * current to invoke a save of the context we actually care about. In fact, the + * code could likely be constructed, albeit in a more complicated fashion, to + * never use the default context, though that limits the driver's ability to + * swap out, and/or destroy other contexts. + * + * All other contexts are created as a request by the GPU client. These contexts + * store GPU state, and thus allow GPU clients to not re-emit state (and + * potentially query certain state) at any time. The kernel driver makes + * certain that the appropriate commands are inserted. + * + * The context life cycle is semi-complicated in that context BOs may live + * longer than the context itself because of the way the hardware, and object + * tracking works. Below is a very crude representation of the state machine + * describing the context life. + * refcount pincount active + * S0: initial state 0 0 0 + * S1: context created 1 0 0 + * S2: context is currently running 2 1 X + * S3: GPU referenced, but not current 2 0 1 + * S4: context is current, but destroyed 1 1 0 + * S5: like S3, but destroyed 1 0 1 + * + * The most common (but not all) transitions: + * S0->S1: client creates a context + * S1->S2: client submits execbuf with context + * S2->S3: other clients submits execbuf with context + * S3->S1: context object was retired + * S3->S2: clients submits another execbuf + * S2->S4: context destroy called with current context + * S3->S5->S0: destroy path + * S4->S5->S0: destroy path on current context + * + * There are two confusing terms used above: + * The "current context" means the context which is currently running on the + * GPU. The GPU has loaded its state already and has stored away the gtt + * offset of the BO. The GPU is not actively referencing the data at this + * offset, but it will on the next context switch. The only way to avoid this + * is to do a GPU reset. + * + * An "active context' is one which was previously the "current context" and is + * on the active list waiting for the next context switch to occur. Until this + * happens, the object must remain at the same gtt offset. It is therefore + * possible to destroy a context, but it is still active. + * + */ + +#include +#include + +#include + +#include "gt/intel_lrc_reg.h" + +#include "i915_gem_context.h" +#include "i915_globals.h" +#include "i915_trace.h" +#include "i915_user_extensions.h" + +#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 + +static struct i915_global_gem_context { + struct i915_global base; + struct kmem_cache *slab_luts; +} global; + +struct i915_lut_handle *i915_lut_handle_alloc(void) +{ + return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); +} + +void i915_lut_handle_free(struct i915_lut_handle *lut) +{ + return kmem_cache_free(global.slab_luts, lut); +} + +static void lut_close(struct i915_gem_context *ctx) +{ + struct i915_lut_handle *lut, *ln; + struct radix_tree_iter iter; + void __rcu **slot; + + list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) { + list_del(&lut->obj_link); + i915_lut_handle_free(lut); + } + INIT_LIST_HEAD(&ctx->handles_list); + + rcu_read_lock(); + radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { + struct i915_vma *vma = rcu_dereference_raw(*slot); + + radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); + + vma->open_count--; + __i915_gem_object_release_unless_active(vma->obj); + } + rcu_read_unlock(); +} + +static struct intel_context * +lookup_user_engine(struct i915_gem_context *ctx, + unsigned long flags, + const struct i915_engine_class_instance *ci) +#define LOOKUP_USER_INDEX BIT(0) +{ + int idx; + + if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) + return ERR_PTR(-EINVAL); + + if (!i915_gem_context_user_engines(ctx)) { + struct intel_engine_cs *engine; + + engine = intel_engine_lookup_user(ctx->i915, + ci->engine_class, + ci->engine_instance); + if (!engine) + return ERR_PTR(-EINVAL); + + idx = engine->id; + } else { + idx = ci->engine_instance; + } + + return i915_gem_context_get_engine(ctx, idx); +} + +static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp) +{ + unsigned int max; + + lockdep_assert_held(&i915->contexts.mutex); + + if (INTEL_GEN(i915) >= 11) + max = GEN11_MAX_CONTEXT_HW_ID; + else if (USES_GUC_SUBMISSION(i915)) + /* + * When using GuC in proxy submission, GuC consumes the + * highest bit in the context id to indicate proxy submission. + */ + max = MAX_GUC_CONTEXT_HW_ID; + else + max = MAX_CONTEXT_HW_ID; + + return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp); +} + +static int steal_hw_id(struct drm_i915_private *i915) +{ + struct i915_gem_context *ctx, *cn; + LIST_HEAD(pinned); + int id = -ENOSPC; + + lockdep_assert_held(&i915->contexts.mutex); + + list_for_each_entry_safe(ctx, cn, + &i915->contexts.hw_id_list, hw_id_link) { + if (atomic_read(&ctx->hw_id_pin_count)) { + list_move_tail(&ctx->hw_id_link, &pinned); + continue; + } + + GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */ + list_del_init(&ctx->hw_id_link); + id = ctx->hw_id; + break; + } + + /* + * Remember how far we got up on the last repossesion scan, so the + * list is kept in a "least recently scanned" order. + */ + list_splice_tail(&pinned, &i915->contexts.hw_id_list); + return id; +} + +static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out) +{ + int ret; + + lockdep_assert_held(&i915->contexts.mutex); + + /* + * We prefer to steal/stall ourselves and our users over that of the + * entire system. That may be a little unfair to our users, and + * even hurt high priority clients. The choice is whether to oomkill + * something else, or steal a context id. + */ + ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + if (unlikely(ret < 0)) { + ret = steal_hw_id(i915); + if (ret < 0) /* once again for the correct errno code */ + ret = new_hw_id(i915, GFP_KERNEL); + if (ret < 0) + return ret; + } + + *out = ret; + return 0; +} + +static void release_hw_id(struct i915_gem_context *ctx) +{ + struct drm_i915_private *i915 = ctx->i915; + + if (list_empty(&ctx->hw_id_link)) + return; + + mutex_lock(&i915->contexts.mutex); + if (!list_empty(&ctx->hw_id_link)) { + ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id); + list_del_init(&ctx->hw_id_link); + } + mutex_unlock(&i915->contexts.mutex); +} + +static void __free_engines(struct i915_gem_engines *e, unsigned int count) +{ + while (count--) { + if (!e->engines[count]) + continue; + + intel_context_put(e->engines[count]); + } + kfree(e); +} + +static void free_engines(struct i915_gem_engines *e) +{ + __free_engines(e, e->num_engines); +} + +static void free_engines_rcu(struct work_struct *wrk) +{ + struct i915_gem_engines *e = + container_of(wrk, struct i915_gem_engines, rcu.work); + struct drm_i915_private *i915 = e->i915; + + mutex_lock(&i915->drm.struct_mutex); + free_engines(e); + mutex_unlock(&i915->drm.struct_mutex); +} + +static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) +{ + struct intel_engine_cs *engine; + struct i915_gem_engines *e; + enum intel_engine_id id; + + e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL); + if (!e) + return ERR_PTR(-ENOMEM); + + e->i915 = ctx->i915; + for_each_engine(engine, ctx->i915, id) { + struct intel_context *ce; + + ce = intel_context_create(ctx, engine); + if (IS_ERR(ce)) { + __free_engines(e, id); + return ERR_CAST(ce); + } + + e->engines[id] = ce; + } + e->num_engines = id; + + return e; +} + +static void i915_gem_context_free(struct i915_gem_context *ctx) +{ + lockdep_assert_held(&ctx->i915->drm.struct_mutex); + GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); + + release_hw_id(ctx); + i915_ppgtt_put(ctx->ppgtt); + + free_engines(rcu_access_pointer(ctx->engines)); + mutex_destroy(&ctx->engines_mutex); + + if (ctx->timeline) + i915_timeline_put(ctx->timeline); + + kfree(ctx->name); + put_pid(ctx->pid); + + list_del(&ctx->link); + mutex_destroy(&ctx->mutex); + + kfree_rcu(ctx, rcu); +} + +static void contexts_free(struct drm_i915_private *i915) +{ + struct llist_node *freed = llist_del_all(&i915->contexts.free_list); + struct i915_gem_context *ctx, *cn; + + lockdep_assert_held(&i915->drm.struct_mutex); + + llist_for_each_entry_safe(ctx, cn, freed, free_link) + i915_gem_context_free(ctx); +} + +static void contexts_free_first(struct drm_i915_private *i915) +{ + struct i915_gem_context *ctx; + struct llist_node *freed; + + lockdep_assert_held(&i915->drm.struct_mutex); + + freed = llist_del_first(&i915->contexts.free_list); + if (!freed) + return; + + ctx = container_of(freed, typeof(*ctx), free_link); + i915_gem_context_free(ctx); +} + +static void contexts_free_worker(struct work_struct *work) +{ + struct drm_i915_private *i915 = + container_of(work, typeof(*i915), contexts.free_work); + + mutex_lock(&i915->drm.struct_mutex); + contexts_free(i915); + mutex_unlock(&i915->drm.struct_mutex); +} + +void i915_gem_context_release(struct kref *ref) +{ + struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); + struct drm_i915_private *i915 = ctx->i915; + + trace_i915_context_free(ctx); + if (llist_add(&ctx->free_link, &i915->contexts.free_list)) + queue_work(i915->wq, &i915->contexts.free_work); +} + +static void context_close(struct i915_gem_context *ctx) +{ + i915_gem_context_set_closed(ctx); + + /* + * This context will never again be assinged to HW, so we can + * reuse its ID for the next context. + */ + release_hw_id(ctx); + + /* + * The LUT uses the VMA as a backpointer to unref the object, + * so we need to clear the LUT before we close all the VMA (inside + * the ppgtt). + */ + lut_close(ctx); + + ctx->file_priv = ERR_PTR(-EBADF); + i915_gem_context_put(ctx); +} + +static u32 default_desc_template(const struct drm_i915_private *i915, + const struct i915_hw_ppgtt *ppgtt) +{ + u32 address_mode; + u32 desc; + + desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; + + address_mode = INTEL_LEGACY_32B_CONTEXT; + if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm)) + address_mode = INTEL_LEGACY_64B_CONTEXT; + desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; + + if (IS_GEN(i915, 8)) + desc |= GEN8_CTX_L3LLC_COHERENT; + + /* TODO: WaDisableLiteRestore when we start using semaphore + * signalling between Command Streamers + * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; + */ + + return desc; +} + +static struct i915_gem_context * +__create_context(struct drm_i915_private *dev_priv) +{ + struct i915_gem_context *ctx; + struct i915_gem_engines *e; + int err; + int i; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + kref_init(&ctx->ref); + list_add_tail(&ctx->link, &dev_priv->contexts.list); + ctx->i915 = dev_priv; + ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); + mutex_init(&ctx->mutex); + + mutex_init(&ctx->engines_mutex); + e = default_engines(ctx); + if (IS_ERR(e)) { + err = PTR_ERR(e); + goto err_free; + } + RCU_INIT_POINTER(ctx->engines, e); + + INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); + INIT_LIST_HEAD(&ctx->handles_list); + INIT_LIST_HEAD(&ctx->hw_id_link); + + /* NB: Mark all slices as needing a remap so that when the context first + * loads it will restore whatever remap state already exists. If there + * is no remap info, it will be a NOP. */ + ctx->remap_slice = ALL_L3_SLICES(dev_priv); + + i915_gem_context_set_bannable(ctx); + i915_gem_context_set_recoverable(ctx); + + ctx->ring_size = 4 * PAGE_SIZE; + ctx->desc_template = + default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt); + + for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) + ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; + + return ctx; + +err_free: + kfree(ctx); + return ERR_PTR(err); +} + +static struct i915_hw_ppgtt * +__set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt) +{ + struct i915_hw_ppgtt *old = ctx->ppgtt; + + ctx->ppgtt = i915_ppgtt_get(ppgtt); + ctx->desc_template = default_desc_template(ctx->i915, ppgtt); + + return old; +} + +static void __assign_ppgtt(struct i915_gem_context *ctx, + struct i915_hw_ppgtt *ppgtt) +{ + if (ppgtt == ctx->ppgtt) + return; + + ppgtt = __set_ppgtt(ctx, ppgtt); + if (ppgtt) + i915_ppgtt_put(ppgtt); +} + +static struct i915_gem_context * +i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) +{ + struct i915_gem_context *ctx; + + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && + !HAS_EXECLISTS(dev_priv)) + return ERR_PTR(-EINVAL); + + /* Reap the most stale context */ + contexts_free_first(dev_priv); + + ctx = __create_context(dev_priv); + if (IS_ERR(ctx)) + return ctx; + + if (HAS_FULL_PPGTT(dev_priv)) { + struct i915_hw_ppgtt *ppgtt; + + ppgtt = i915_ppgtt_create(dev_priv); + if (IS_ERR(ppgtt)) { + DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", + PTR_ERR(ppgtt)); + context_close(ctx); + return ERR_CAST(ppgtt); + } + + __assign_ppgtt(ctx, ppgtt); + i915_ppgtt_put(ppgtt); + } + + if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { + struct i915_timeline *timeline; + + timeline = i915_timeline_create(dev_priv, NULL); + if (IS_ERR(timeline)) { + context_close(ctx); + return ERR_CAST(timeline); + } + + ctx->timeline = timeline; + } + + trace_i915_context_create(ctx); + + return ctx; +} + +/** + * i915_gem_context_create_gvt - create a GVT GEM context + * @dev: drm device * + * + * This function is used to create a GVT specific GEM context. + * + * Returns: + * pointer to i915_gem_context on success, error pointer if failed + * + */ +struct i915_gem_context * +i915_gem_context_create_gvt(struct drm_device *dev) +{ + struct i915_gem_context *ctx; + int ret; + + if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) + return ERR_PTR(-ENODEV); + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ERR_PTR(ret); + + ctx = i915_gem_create_context(to_i915(dev), 0); + if (IS_ERR(ctx)) + goto out; + + ret = i915_gem_context_pin_hw_id(ctx); + if (ret) { + context_close(ctx); + ctx = ERR_PTR(ret); + goto out; + } + + ctx->file_priv = ERR_PTR(-EBADF); + i915_gem_context_set_closed(ctx); /* not user accessible */ + i915_gem_context_clear_bannable(ctx); + i915_gem_context_set_force_single_submission(ctx); + if (!USES_GUC_SUBMISSION(to_i915(dev))) + ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */ + + GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); +out: + mutex_unlock(&dev->struct_mutex); + return ctx; +} + +static void +destroy_kernel_context(struct i915_gem_context **ctxp) +{ + struct i915_gem_context *ctx; + + /* Keep the context ref so that we can free it immediately ourselves */ + ctx = i915_gem_context_get(fetch_and_zero(ctxp)); + GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); + + context_close(ctx); + i915_gem_context_free(ctx); +} + +struct i915_gem_context * +i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) +{ + struct i915_gem_context *ctx; + int err; + + ctx = i915_gem_create_context(i915, 0); + if (IS_ERR(ctx)) + return ctx; + + err = i915_gem_context_pin_hw_id(ctx); + if (err) { + destroy_kernel_context(&ctx); + return ERR_PTR(err); + } + + i915_gem_context_clear_bannable(ctx); + ctx->sched.priority = I915_USER_PRIORITY(prio); + ctx->ring_size = PAGE_SIZE; + + GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); + + return ctx; +} + +static void init_contexts(struct drm_i915_private *i915) +{ + mutex_init(&i915->contexts.mutex); + INIT_LIST_HEAD(&i915->contexts.list); + + /* Using the simple ida interface, the max is limited by sizeof(int) */ + BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); + BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX); + ida_init(&i915->contexts.hw_ida); + INIT_LIST_HEAD(&i915->contexts.hw_id_list); + + INIT_WORK(&i915->contexts.free_work, contexts_free_worker); + init_llist_head(&i915->contexts.free_list); +} + +static bool needs_preempt_context(struct drm_i915_private *i915) +{ + return HAS_EXECLISTS(i915); +} + +int i915_gem_contexts_init(struct drm_i915_private *dev_priv) +{ + struct i915_gem_context *ctx; + + /* Reassure ourselves we are only called once */ + GEM_BUG_ON(dev_priv->kernel_context); + GEM_BUG_ON(dev_priv->preempt_context); + + intel_engine_init_ctx_wa(dev_priv->engine[RCS0]); + init_contexts(dev_priv); + + /* lowest priority; idle task */ + ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN); + if (IS_ERR(ctx)) { + DRM_ERROR("Failed to create default global context\n"); + return PTR_ERR(ctx); + } + /* + * For easy recognisablity, we want the kernel context to be 0 and then + * all user contexts will have non-zero hw_id. Kernel contexts are + * permanently pinned, so that we never suffer a stall and can + * use them from any allocation context (e.g. for evicting other + * contexts and from inside the shrinker). + */ + GEM_BUG_ON(ctx->hw_id); + GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count)); + dev_priv->kernel_context = ctx; + + /* highest priority; preempting task */ + if (needs_preempt_context(dev_priv)) { + ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX); + if (!IS_ERR(ctx)) + dev_priv->preempt_context = ctx; + else + DRM_ERROR("Failed to create preempt context; disabling preemption\n"); + } + + DRM_DEBUG_DRIVER("%s context support initialized\n", + DRIVER_CAPS(dev_priv)->has_logical_contexts ? + "logical" : "fake"); + return 0; +} + +void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + for_each_engine(engine, dev_priv, id) + intel_engine_lost_context(engine); +} + +void i915_gem_contexts_fini(struct drm_i915_private *i915) +{ + lockdep_assert_held(&i915->drm.struct_mutex); + + if (i915->preempt_context) + destroy_kernel_context(&i915->preempt_context); + destroy_kernel_context(&i915->kernel_context); + + /* Must free all deferred contexts (via flush_workqueue) first */ + GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list)); + ida_destroy(&i915->contexts.hw_ida); +} + +static int context_idr_cleanup(int id, void *p, void *data) +{ + context_close(p); + return 0; +} + +static int vm_idr_cleanup(int id, void *p, void *data) +{ + i915_ppgtt_put(p); + return 0; +} + +static int gem_context_register(struct i915_gem_context *ctx, + struct drm_i915_file_private *fpriv) +{ + int ret; + + ctx->file_priv = fpriv; + if (ctx->ppgtt) + ctx->ppgtt->vm.file = fpriv; + + ctx->pid = get_task_pid(current, PIDTYPE_PID); + ctx->name = kasprintf(GFP_KERNEL, "%s[%d]", + current->comm, pid_nr(ctx->pid)); + if (!ctx->name) { + ret = -ENOMEM; + goto err_pid; + } + + /* And finally expose ourselves to userspace via the idr */ + mutex_lock(&fpriv->context_idr_lock); + ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL); + mutex_unlock(&fpriv->context_idr_lock); + if (ret >= 0) + goto out; + + kfree(fetch_and_zero(&ctx->name)); +err_pid: + put_pid(fetch_and_zero(&ctx->pid)); +out: + return ret; +} + +int i915_gem_context_open(struct drm_i915_private *i915, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_gem_context *ctx; + int err; + + mutex_init(&file_priv->context_idr_lock); + mutex_init(&file_priv->vm_idr_lock); + + idr_init(&file_priv->context_idr); + idr_init_base(&file_priv->vm_idr, 1); + + mutex_lock(&i915->drm.struct_mutex); + ctx = i915_gem_create_context(i915, 0); + mutex_unlock(&i915->drm.struct_mutex); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto err; + } + + err = gem_context_register(ctx, file_priv); + if (err < 0) + goto err_ctx; + + GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); + GEM_BUG_ON(err > 0); + + return 0; + +err_ctx: + mutex_lock(&i915->drm.struct_mutex); + context_close(ctx); + mutex_unlock(&i915->drm.struct_mutex); +err: + idr_destroy(&file_priv->vm_idr); + idr_destroy(&file_priv->context_idr); + mutex_destroy(&file_priv->vm_idr_lock); + mutex_destroy(&file_priv->context_idr_lock); + return err; +} + +void i915_gem_context_close(struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + + lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex); + + idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); + idr_destroy(&file_priv->context_idr); + mutex_destroy(&file_priv->context_idr_lock); + + idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL); + idr_destroy(&file_priv->vm_idr); + mutex_destroy(&file_priv->vm_idr_lock); +} + +int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *i915 = to_i915(dev); + struct drm_i915_gem_vm_control *args = data; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_hw_ppgtt *ppgtt; + int err; + + if (!HAS_FULL_PPGTT(i915)) + return -ENODEV; + + if (args->flags) + return -EINVAL; + + ppgtt = i915_ppgtt_create(i915); + if (IS_ERR(ppgtt)) + return PTR_ERR(ppgtt); + + ppgtt->vm.file = file_priv; + + if (args->extensions) { + err = i915_user_extensions(u64_to_user_ptr(args->extensions), + NULL, 0, + ppgtt); + if (err) + goto err_put; + } + + err = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (err) + goto err_put; + + err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL); + if (err < 0) + goto err_unlock; + + GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */ + + mutex_unlock(&file_priv->vm_idr_lock); + + args->vm_id = err; + return 0; + +err_unlock: + mutex_unlock(&file_priv->vm_idr_lock); +err_put: + i915_ppgtt_put(ppgtt); + return err; +} + +int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_gem_vm_control *args = data; + struct i915_hw_ppgtt *ppgtt; + int err; + u32 id; + + if (args->flags) + return -EINVAL; + + if (args->extensions) + return -EINVAL; + + id = args->vm_id; + if (!id) + return -ENOENT; + + err = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (err) + return err; + + ppgtt = idr_remove(&file_priv->vm_idr, id); + + mutex_unlock(&file_priv->vm_idr_lock); + if (!ppgtt) + return -ENOENT; + + i915_ppgtt_put(ppgtt); + return 0; +} + +struct context_barrier_task { + struct i915_active base; + void (*task)(void *data); + void *data; +}; + +static void cb_retire(struct i915_active *base) +{ + struct context_barrier_task *cb = container_of(base, typeof(*cb), base); + + if (cb->task) + cb->task(cb->data); + + i915_active_fini(&cb->base); + kfree(cb); +} + +I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); +static int context_barrier_task(struct i915_gem_context *ctx, + intel_engine_mask_t engines, + int (*emit)(struct i915_request *rq, void *data), + void (*task)(void *data), + void *data) +{ + struct drm_i915_private *i915 = ctx->i915; + struct context_barrier_task *cb; + struct i915_gem_engines_iter it; + struct intel_context *ce; + int err = 0; + + lockdep_assert_held(&i915->drm.struct_mutex); + GEM_BUG_ON(!task); + + cb = kmalloc(sizeof(*cb), GFP_KERNEL); + if (!cb) + return -ENOMEM; + + i915_active_init(i915, &cb->base, cb_retire); + i915_active_acquire(&cb->base); + + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + struct i915_request *rq; + + if (I915_SELFTEST_ONLY(context_barrier_inject_fault & + ce->engine->mask)) { + err = -ENXIO; + break; + } + + if (!(ce->engine->mask & engines) || !ce->state) + continue; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + err = 0; + if (emit) + err = emit(rq, data); + if (err == 0) + err = i915_active_ref(&cb->base, rq->fence.context, rq); + + i915_request_add(rq); + if (err) + break; + } + i915_gem_context_unlock_engines(ctx); + + cb->task = err ? NULL : task; /* caller needs to unwind instead */ + cb->data = data; + + i915_active_release(&cb->base); + + return err; +} + +static int get_ppgtt(struct drm_i915_file_private *file_priv, + struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + struct i915_hw_ppgtt *ppgtt; + int ret; + + if (!ctx->ppgtt) + return -ENODEV; + + /* XXX rcu acquire? */ + ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); + if (ret) + return ret; + + ppgtt = i915_ppgtt_get(ctx->ppgtt); + mutex_unlock(&ctx->i915->drm.struct_mutex); + + ret = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (ret) + goto err_put; + + ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL); + GEM_BUG_ON(!ret); + if (ret < 0) + goto err_unlock; + + i915_ppgtt_get(ppgtt); + + args->size = 0; + args->value = ret; + + ret = 0; +err_unlock: + mutex_unlock(&file_priv->vm_idr_lock); +err_put: + i915_ppgtt_put(ppgtt); + return ret; +} + +static void set_ppgtt_barrier(void *data) +{ + struct i915_hw_ppgtt *old = data; + + if (INTEL_GEN(old->vm.i915) < 8) + gen6_ppgtt_unpin_all(old); + + i915_ppgtt_put(old); +} + +static int emit_ppgtt_update(struct i915_request *rq, void *data) +{ + struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt; + struct intel_engine_cs *engine = rq->engine; + u32 base = engine->mmio_base; + u32 *cs; + int i; + + if (i915_vm_is_4lvl(&ppgtt->vm)) { + const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4); + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(2); + + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); + *cs++ = upper_32_bits(pd_daddr); + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); + *cs++ = lower_32_bits(pd_daddr); + + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { + cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES); + for (i = GEN8_3LVL_PDPES; i--; ) { + const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); + + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); + *cs++ = upper_32_bits(pd_daddr); + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); + *cs++ = lower_32_bits(pd_daddr); + } + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + } else { + /* ppGTT is not part of the legacy context image */ + gen6_ppgtt_pin(ppgtt); + } + + return 0; +} + +static int set_ppgtt(struct drm_i915_file_private *file_priv, + struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + struct i915_hw_ppgtt *ppgtt, *old; + int err; + + if (args->size) + return -EINVAL; + + if (!ctx->ppgtt) + return -ENODEV; + + if (upper_32_bits(args->value)) + return -ENOENT; + + err = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (err) + return err; + + ppgtt = idr_find(&file_priv->vm_idr, args->value); + if (ppgtt) + i915_ppgtt_get(ppgtt); + mutex_unlock(&file_priv->vm_idr_lock); + if (!ppgtt) + return -ENOENT; + + err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); + if (err) + goto out; + + if (ppgtt == ctx->ppgtt) + goto unlock; + + /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ + lut_close(ctx); + + old = __set_ppgtt(ctx, ppgtt); + + /* + * We need to flush any requests using the current ppgtt before + * we release it as the requests do not hold a reference themselves, + * only indirectly through the context. + */ + err = context_barrier_task(ctx, ALL_ENGINES, + emit_ppgtt_update, + set_ppgtt_barrier, + old); + if (err) { + ctx->ppgtt = old; + ctx->desc_template = default_desc_template(ctx->i915, old); + i915_ppgtt_put(ppgtt); + } + +unlock: + mutex_unlock(&ctx->i915->drm.struct_mutex); + +out: + i915_ppgtt_put(ppgtt); + return err; +} + +static int gen8_emit_rpcs_config(struct i915_request *rq, + struct intel_context *ce, + struct intel_sseu sseu) +{ + u64 offset; + u32 *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + offset = i915_ggtt_offset(ce->state) + + LRC_STATE_PN * PAGE_SIZE + + (CTX_R_PWR_CLK_STATE + 1) * 4; + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = lower_32_bits(offset); + *cs++ = upper_32_bits(offset); + *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu); + + intel_ring_advance(rq, cs); + + return 0; +} + +static int +gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) +{ + struct i915_request *rq; + int ret; + + lockdep_assert_held(&ce->pin_mutex); + + /* + * If the context is not idle, we have to submit an ordered request to + * modify its context image via the kernel context (writing to our own + * image, or into the registers directory, does not stick). Pristine + * and idle contexts will be configured on pinning. + */ + if (!intel_context_is_pinned(ce)) + return 0; + + rq = i915_request_create(ce->engine->kernel_context); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + /* Queue this switch after all other activity by this context. */ + ret = i915_active_request_set(&ce->ring->timeline->last_request, rq); + if (ret) + goto out_add; + + ret = gen8_emit_rpcs_config(rq, ce, sseu); + if (ret) + goto out_add; + + /* + * Guarantee context image and the timeline remains pinned until the + * modifying request is retired by setting the ce activity tracker. + * + * But we only need to take one pin on the account of it. Or in other + * words transfer the pinned ce object to tracked active request. + */ + if (!i915_active_request_isset(&ce->active_tracker)) + __intel_context_pin(ce); + __i915_active_request_set(&ce->active_tracker, rq); + +out_add: + i915_request_add(rq); + return ret; +} + +static int +__intel_context_reconfigure_sseu(struct intel_context *ce, + struct intel_sseu sseu) +{ + int ret; + + GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8); + + ret = intel_context_lock_pinned(ce); + if (ret) + return ret; + + /* Nothing to do if unmodified. */ + if (!memcmp(&ce->sseu, &sseu, sizeof(sseu))) + goto unlock; + + ret = gen8_modify_rpcs(ce, sseu); + if (!ret) + ce->sseu = sseu; + +unlock: + intel_context_unlock_pinned(ce); + return ret; +} + +static int +intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu) +{ + struct drm_i915_private *i915 = ce->gem_context->i915; + int ret; + + ret = mutex_lock_interruptible(&i915->drm.struct_mutex); + if (ret) + return ret; + + ret = __intel_context_reconfigure_sseu(ce, sseu); + + mutex_unlock(&i915->drm.struct_mutex); + + return ret; +} + +static int +user_to_context_sseu(struct drm_i915_private *i915, + const struct drm_i915_gem_context_param_sseu *user, + struct intel_sseu *context) +{ + const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu; + + /* No zeros in any field. */ + if (!user->slice_mask || !user->subslice_mask || + !user->min_eus_per_subslice || !user->max_eus_per_subslice) + return -EINVAL; + + /* Max > min. */ + if (user->max_eus_per_subslice < user->min_eus_per_subslice) + return -EINVAL; + + /* + * Some future proofing on the types since the uAPI is wider than the + * current internal implementation. + */ + if (overflows_type(user->slice_mask, context->slice_mask) || + overflows_type(user->subslice_mask, context->subslice_mask) || + overflows_type(user->min_eus_per_subslice, + context->min_eus_per_subslice) || + overflows_type(user->max_eus_per_subslice, + context->max_eus_per_subslice)) + return -EINVAL; + + /* Check validity against hardware. */ + if (user->slice_mask & ~device->slice_mask) + return -EINVAL; + + if (user->subslice_mask & ~device->subslice_mask[0]) + return -EINVAL; + + if (user->max_eus_per_subslice > device->max_eus_per_subslice) + return -EINVAL; + + context->slice_mask = user->slice_mask; + context->subslice_mask = user->subslice_mask; + context->min_eus_per_subslice = user->min_eus_per_subslice; + context->max_eus_per_subslice = user->max_eus_per_subslice; + + /* Part specific restrictions. */ + if (IS_GEN(i915, 11)) { + unsigned int hw_s = hweight8(device->slice_mask); + unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); + unsigned int req_s = hweight8(context->slice_mask); + unsigned int req_ss = hweight8(context->subslice_mask); + + /* + * Only full subslice enablement is possible if more than one + * slice is turned on. + */ + if (req_s > 1 && req_ss != hw_ss_per_s) + return -EINVAL; + + /* + * If more than four (SScount bitfield limit) subslices are + * requested then the number has to be even. + */ + if (req_ss > 4 && (req_ss & 1)) + return -EINVAL; + + /* + * If only one slice is enabled and subslice count is below the + * device full enablement, it must be at most half of the all + * available subslices. + */ + if (req_s == 1 && req_ss < hw_ss_per_s && + req_ss > (hw_ss_per_s / 2)) + return -EINVAL; + + /* ABI restriction - VME use case only. */ + + /* All slices or one slice only. */ + if (req_s != 1 && req_s != hw_s) + return -EINVAL; + + /* + * Half subslices or full enablement only when one slice is + * enabled. + */ + if (req_s == 1 && + (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) + return -EINVAL; + + /* No EU configuration changes. */ + if ((user->min_eus_per_subslice != + device->max_eus_per_subslice) || + (user->max_eus_per_subslice != + device->max_eus_per_subslice)) + return -EINVAL; + } + + return 0; +} + +static int set_sseu(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_gem_context_param_sseu user_sseu; + struct intel_context *ce; + struct intel_sseu sseu; + unsigned long lookup; + int ret; + + if (args->size < sizeof(user_sseu)) + return -EINVAL; + + if (!IS_GEN(i915, 11)) + return -ENODEV; + + if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), + sizeof(user_sseu))) + return -EFAULT; + + if (user_sseu.rsvd) + return -EINVAL; + + if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) + return -EINVAL; + + lookup = 0; + if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) + lookup |= LOOKUP_USER_INDEX; + + ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + /* Only render engine supports RPCS configuration. */ + if (ce->engine->class != RENDER_CLASS) { + ret = -ENODEV; + goto out_ce; + } + + ret = user_to_context_sseu(i915, &user_sseu, &sseu); + if (ret) + goto out_ce; + + ret = intel_context_reconfigure_sseu(ce, sseu); + if (ret) + goto out_ce; + + args->size = sizeof(user_sseu); + +out_ce: + intel_context_put(ce); + return ret; +} + +struct set_engines { + struct i915_gem_context *ctx; + struct i915_gem_engines *engines; +}; + +static int +set_engines__load_balance(struct i915_user_extension __user *base, void *data) +{ + struct i915_context_engines_load_balance __user *ext = + container_of_user(base, typeof(*ext), base); + const struct set_engines *set = data; + struct intel_engine_cs *stack[16]; + struct intel_engine_cs **siblings; + struct intel_context *ce; + u16 num_siblings, idx; + unsigned int n; + int err; + + if (!HAS_EXECLISTS(set->ctx->i915)) + return -ENODEV; + + if (USES_GUC_SUBMISSION(set->ctx->i915)) + return -ENODEV; /* not implement yet */ + + if (get_user(idx, &ext->engine_index)) + return -EFAULT; + + if (idx >= set->engines->num_engines) { + DRM_DEBUG("Invalid placement value, %d >= %d\n", + idx, set->engines->num_engines); + return -EINVAL; + } + + idx = array_index_nospec(idx, set->engines->num_engines); + if (set->engines->engines[idx]) { + DRM_DEBUG("Invalid placement[%d], already occupied\n", idx); + return -EEXIST; + } + + if (get_user(num_siblings, &ext->num_siblings)) + return -EFAULT; + + err = check_user_mbz(&ext->flags); + if (err) + return err; + + err = check_user_mbz(&ext->mbz64); + if (err) + return err; + + siblings = stack; + if (num_siblings > ARRAY_SIZE(stack)) { + siblings = kmalloc_array(num_siblings, + sizeof(*siblings), + GFP_KERNEL); + if (!siblings) + return -ENOMEM; + } + + for (n = 0; n < num_siblings; n++) { + struct i915_engine_class_instance ci; + + if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { + err = -EFAULT; + goto out_siblings; + } + + siblings[n] = intel_engine_lookup_user(set->ctx->i915, + ci.engine_class, + ci.engine_instance); + if (!siblings[n]) { + DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n", + n, ci.engine_class, ci.engine_instance); + err = -EINVAL; + goto out_siblings; + } + } + + ce = intel_execlists_create_virtual(set->ctx, siblings, n); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out_siblings; + } + + if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { + intel_context_put(ce); + err = -EEXIST; + goto out_siblings; + } + +out_siblings: + if (siblings != stack) + kfree(siblings); + + return err; +} + +static int +set_engines__bond(struct i915_user_extension __user *base, void *data) +{ + struct i915_context_engines_bond __user *ext = + container_of_user(base, typeof(*ext), base); + const struct set_engines *set = data; + struct i915_engine_class_instance ci; + struct intel_engine_cs *virtual; + struct intel_engine_cs *master; + u16 idx, num_bonds; + int err, n; + + if (get_user(idx, &ext->virtual_index)) + return -EFAULT; + + if (idx >= set->engines->num_engines) { + DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n", + idx, set->engines->num_engines); + return -EINVAL; + } + + idx = array_index_nospec(idx, set->engines->num_engines); + if (!set->engines->engines[idx]) { + DRM_DEBUG("Invalid engine at %d\n", idx); + return -EINVAL; + } + virtual = set->engines->engines[idx]->engine; + + err = check_user_mbz(&ext->flags); + if (err) + return err; + + for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { + err = check_user_mbz(&ext->mbz64[n]); + if (err) + return err; + } + + if (copy_from_user(&ci, &ext->master, sizeof(ci))) + return -EFAULT; + + master = intel_engine_lookup_user(set->ctx->i915, + ci.engine_class, ci.engine_instance); + if (!master) { + DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n", + ci.engine_class, ci.engine_instance); + return -EINVAL; + } + + if (get_user(num_bonds, &ext->num_bonds)) + return -EFAULT; + + for (n = 0; n < num_bonds; n++) { + struct intel_engine_cs *bond; + + if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) + return -EFAULT; + + bond = intel_engine_lookup_user(set->ctx->i915, + ci.engine_class, + ci.engine_instance); + if (!bond) { + DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", + n, ci.engine_class, ci.engine_instance); + return -EINVAL; + } + + /* + * A non-virtual engine has no siblings to choose between; and + * a submit fence will always be directed to the one engine. + */ + if (intel_engine_is_virtual(virtual)) { + err = intel_virtual_engine_attach_bond(virtual, + master, + bond); + if (err) + return err; + } + } + + return 0; +} + +static const i915_user_extension_fn set_engines__extensions[] = { + [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance, + [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond, +}; + +static int +set_engines(struct i915_gem_context *ctx, + const struct drm_i915_gem_context_param *args) +{ + struct i915_context_param_engines __user *user = + u64_to_user_ptr(args->value); + struct set_engines set = { .ctx = ctx }; + unsigned int num_engines, n; + u64 extensions; + int err; + + if (!args->size) { /* switch back to legacy user_ring_map */ + if (!i915_gem_context_user_engines(ctx)) + return 0; + + set.engines = default_engines(ctx); + if (IS_ERR(set.engines)) + return PTR_ERR(set.engines); + + goto replace; + } + + BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines))); + if (args->size < sizeof(*user) || + !IS_ALIGNED(args->size, sizeof(*user->engines))) { + DRM_DEBUG("Invalid size for engine array: %d\n", + args->size); + return -EINVAL; + } + + /* + * Note that I915_EXEC_RING_MASK limits execbuf to only using the + * first 64 engines defined here. + */ + num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); + + set.engines = kmalloc(struct_size(set.engines, engines, num_engines), + GFP_KERNEL); + if (!set.engines) + return -ENOMEM; + + set.engines->i915 = ctx->i915; + for (n = 0; n < num_engines; n++) { + struct i915_engine_class_instance ci; + struct intel_engine_cs *engine; + + if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { + __free_engines(set.engines, n); + return -EFAULT; + } + + if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && + ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) { + set.engines->engines[n] = NULL; + continue; + } + + engine = intel_engine_lookup_user(ctx->i915, + ci.engine_class, + ci.engine_instance); + if (!engine) { + DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n", + n, ci.engine_class, ci.engine_instance); + __free_engines(set.engines, n); + return -ENOENT; + } + + set.engines->engines[n] = intel_context_create(ctx, engine); + if (!set.engines->engines[n]) { + __free_engines(set.engines, n); + return -ENOMEM; + } + } + set.engines->num_engines = num_engines; + + err = -EFAULT; + if (!get_user(extensions, &user->extensions)) + err = i915_user_extensions(u64_to_user_ptr(extensions), + set_engines__extensions, + ARRAY_SIZE(set_engines__extensions), + &set); + if (err) { + free_engines(set.engines); + return err; + } + +replace: + mutex_lock(&ctx->engines_mutex); + if (args->size) + i915_gem_context_set_user_engines(ctx); + else + i915_gem_context_clear_user_engines(ctx); + rcu_swap_protected(ctx->engines, set.engines, 1); + mutex_unlock(&ctx->engines_mutex); + + INIT_RCU_WORK(&set.engines->rcu, free_engines_rcu); + queue_rcu_work(system_wq, &set.engines->rcu); + + return 0; +} + +static struct i915_gem_engines * +__copy_engines(struct i915_gem_engines *e) +{ + struct i915_gem_engines *copy; + unsigned int n; + + copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); + if (!copy) + return ERR_PTR(-ENOMEM); + + copy->i915 = e->i915; + for (n = 0; n < e->num_engines; n++) { + if (e->engines[n]) + copy->engines[n] = intel_context_get(e->engines[n]); + else + copy->engines[n] = NULL; + } + copy->num_engines = n; + + return copy; +} + +static int +get_engines(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + struct i915_context_param_engines __user *user; + struct i915_gem_engines *e; + size_t n, count, size; + int err = 0; + + err = mutex_lock_interruptible(&ctx->engines_mutex); + if (err) + return err; + + e = NULL; + if (i915_gem_context_user_engines(ctx)) + e = __copy_engines(i915_gem_context_engines(ctx)); + mutex_unlock(&ctx->engines_mutex); + if (IS_ERR_OR_NULL(e)) { + args->size = 0; + return PTR_ERR_OR_ZERO(e); + } + + count = e->num_engines; + + /* Be paranoid in case we have an impedance mismatch */ + if (!check_struct_size(user, engines, count, &size)) { + err = -EINVAL; + goto err_free; + } + if (overflows_type(size, args->size)) { + err = -EINVAL; + goto err_free; + } + + if (!args->size) { + args->size = size; + goto err_free; + } + + if (args->size < size) { + err = -EINVAL; + goto err_free; + } + + user = u64_to_user_ptr(args->value); + if (!access_ok(user, size)) { + err = -EFAULT; + goto err_free; + } + + if (put_user(0, &user->extensions)) { + err = -EFAULT; + goto err_free; + } + + for (n = 0; n < count; n++) { + struct i915_engine_class_instance ci = { + .engine_class = I915_ENGINE_CLASS_INVALID, + .engine_instance = I915_ENGINE_CLASS_INVALID_NONE, + }; + + if (e->engines[n]) { + ci.engine_class = e->engines[n]->engine->uabi_class; + ci.engine_instance = e->engines[n]->engine->instance; + } + + if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { + err = -EFAULT; + goto err_free; + } + } + + args->size = size; + +err_free: + INIT_RCU_WORK(&e->rcu, free_engines_rcu); + queue_rcu_work(system_wq, &e->rcu); + return err; +} + +static int ctx_setparam(struct drm_i915_file_private *fpriv, + struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + int ret = 0; + + switch (args->param) { + case I915_CONTEXT_PARAM_NO_ZEROMAP: + if (args->size) + ret = -EINVAL; + else if (args->value) + set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); + else + clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); + break; + + case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: + if (args->size) + ret = -EINVAL; + else if (args->value) + i915_gem_context_set_no_error_capture(ctx); + else + i915_gem_context_clear_no_error_capture(ctx); + break; + + case I915_CONTEXT_PARAM_BANNABLE: + if (args->size) + ret = -EINVAL; + else if (!capable(CAP_SYS_ADMIN) && !args->value) + ret = -EPERM; + else if (args->value) + i915_gem_context_set_bannable(ctx); + else + i915_gem_context_clear_bannable(ctx); + break; + + case I915_CONTEXT_PARAM_RECOVERABLE: + if (args->size) + ret = -EINVAL; + else if (args->value) + i915_gem_context_set_recoverable(ctx); + else + i915_gem_context_clear_recoverable(ctx); + break; + + case I915_CONTEXT_PARAM_PRIORITY: + { + s64 priority = args->value; + + if (args->size) + ret = -EINVAL; + else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) + ret = -ENODEV; + else if (priority > I915_CONTEXT_MAX_USER_PRIORITY || + priority < I915_CONTEXT_MIN_USER_PRIORITY) + ret = -EINVAL; + else if (priority > I915_CONTEXT_DEFAULT_PRIORITY && + !capable(CAP_SYS_NICE)) + ret = -EPERM; + else + ctx->sched.priority = + I915_USER_PRIORITY(priority); + } + break; + + case I915_CONTEXT_PARAM_SSEU: + ret = set_sseu(ctx, args); + break; + + case I915_CONTEXT_PARAM_VM: + ret = set_ppgtt(fpriv, ctx, args); + break; + + case I915_CONTEXT_PARAM_ENGINES: + ret = set_engines(ctx, args); + break; + + case I915_CONTEXT_PARAM_BAN_PERIOD: + default: + ret = -EINVAL; + break; + } + + return ret; +} + +struct create_ext { + struct i915_gem_context *ctx; + struct drm_i915_file_private *fpriv; +}; + +static int create_setparam(struct i915_user_extension __user *ext, void *data) +{ + struct drm_i915_gem_context_create_ext_setparam local; + const struct create_ext *arg = data; + + if (copy_from_user(&local, ext, sizeof(local))) + return -EFAULT; + + if (local.param.ctx_id) + return -EINVAL; + + return ctx_setparam(arg->fpriv, arg->ctx, &local.param); +} + +static int clone_engines(struct i915_gem_context *dst, + struct i915_gem_context *src) +{ + struct i915_gem_engines *e = i915_gem_context_lock_engines(src); + struct i915_gem_engines *clone; + bool user_engines; + unsigned long n; + + clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); + if (!clone) + goto err_unlock; + + clone->i915 = dst->i915; + for (n = 0; n < e->num_engines; n++) { + struct intel_engine_cs *engine; + + if (!e->engines[n]) { + clone->engines[n] = NULL; + continue; + } + engine = e->engines[n]->engine; + + /* + * Virtual engines are singletons; they can only exist + * inside a single context, because they embed their + * HW context... As each virtual context implies a single + * timeline (each engine can only dequeue a single request + * at any time), it would be surprising for two contexts + * to use the same engine. So let's create a copy of + * the virtual engine instead. + */ + if (intel_engine_is_virtual(engine)) + clone->engines[n] = + intel_execlists_clone_virtual(dst, engine); + else + clone->engines[n] = intel_context_create(dst, engine); + if (IS_ERR_OR_NULL(clone->engines[n])) { + __free_engines(clone, n); + goto err_unlock; + } + } + clone->num_engines = n; + + user_engines = i915_gem_context_user_engines(src); + i915_gem_context_unlock_engines(src); + + free_engines(dst->engines); + RCU_INIT_POINTER(dst->engines, clone); + if (user_engines) + i915_gem_context_set_user_engines(dst); + else + i915_gem_context_clear_user_engines(dst); + return 0; + +err_unlock: + i915_gem_context_unlock_engines(src); + return -ENOMEM; +} + +static int clone_flags(struct i915_gem_context *dst, + struct i915_gem_context *src) +{ + dst->user_flags = src->user_flags; + return 0; +} + +static int clone_schedattr(struct i915_gem_context *dst, + struct i915_gem_context *src) +{ + dst->sched = src->sched; + return 0; +} + +static int clone_sseu(struct i915_gem_context *dst, + struct i915_gem_context *src) +{ + struct i915_gem_engines *e = i915_gem_context_lock_engines(src); + struct i915_gem_engines *clone; + unsigned long n; + int err; + + clone = dst->engines; /* no locking required; sole access */ + if (e->num_engines != clone->num_engines) { + err = -EINVAL; + goto unlock; + } + + for (n = 0; n < e->num_engines; n++) { + struct intel_context *ce = e->engines[n]; + + if (clone->engines[n]->engine->class != ce->engine->class) { + /* Must have compatible engine maps! */ + err = -EINVAL; + goto unlock; + } + + /* serialises with set_sseu */ + err = intel_context_lock_pinned(ce); + if (err) + goto unlock; + + clone->engines[n]->sseu = ce->sseu; + intel_context_unlock_pinned(ce); + } + + err = 0; +unlock: + i915_gem_context_unlock_engines(src); + return err; +} + +static int clone_timeline(struct i915_gem_context *dst, + struct i915_gem_context *src) +{ + if (src->timeline) { + GEM_BUG_ON(src->timeline == dst->timeline); + + if (dst->timeline) + i915_timeline_put(dst->timeline); + dst->timeline = i915_timeline_get(src->timeline); + } + + return 0; +} + +static int clone_vm(struct i915_gem_context *dst, + struct i915_gem_context *src) +{ + struct i915_hw_ppgtt *ppgtt; + + rcu_read_lock(); + do { + ppgtt = READ_ONCE(src->ppgtt); + if (!ppgtt) + break; + + if (!kref_get_unless_zero(&ppgtt->ref)) + continue; + + /* + * This ppgtt may have be reallocated between + * the read and the kref, and reassigned to a third + * context. In order to avoid inadvertent sharing + * of this ppgtt with that third context (and not + * src), we have to confirm that we have the same + * ppgtt after passing through the strong memory + * barrier implied by a successful + * kref_get_unless_zero(). + * + * Once we have acquired the current ppgtt of src, + * we no longer care if it is released from src, as + * it cannot be reallocated elsewhere. + */ + + if (ppgtt == READ_ONCE(src->ppgtt)) + break; + + i915_ppgtt_put(ppgtt); + } while (1); + rcu_read_unlock(); + + if (ppgtt) { + __assign_ppgtt(dst, ppgtt); + i915_ppgtt_put(ppgtt); + } + + return 0; +} + +static int create_clone(struct i915_user_extension __user *ext, void *data) +{ + static int (* const fn[])(struct i915_gem_context *dst, + struct i915_gem_context *src) = { +#define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y + MAP(ENGINES, clone_engines), + MAP(FLAGS, clone_flags), + MAP(SCHEDATTR, clone_schedattr), + MAP(SSEU, clone_sseu), + MAP(TIMELINE, clone_timeline), + MAP(VM, clone_vm), +#undef MAP + }; + struct drm_i915_gem_context_create_ext_clone local; + const struct create_ext *arg = data; + struct i915_gem_context *dst = arg->ctx; + struct i915_gem_context *src; + int err, bit; + + if (copy_from_user(&local, ext, sizeof(local))) + return -EFAULT; + + BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) != + I915_CONTEXT_CLONE_UNKNOWN); + + if (local.flags & I915_CONTEXT_CLONE_UNKNOWN) + return -EINVAL; + + if (local.rsvd) + return -EINVAL; + + rcu_read_lock(); + src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id); + rcu_read_unlock(); + if (!src) + return -ENOENT; + + GEM_BUG_ON(src == dst); + + for (bit = 0; bit < ARRAY_SIZE(fn); bit++) { + if (!(local.flags & BIT(bit))) + continue; + + err = fn[bit](dst, src); + if (err) + return err; + } + + return 0; +} + +static const i915_user_extension_fn create_extensions[] = { + [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, + [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone, +}; + +static bool client_is_banned(struct drm_i915_file_private *file_priv) +{ + return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; +} + +int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *i915 = to_i915(dev); + struct drm_i915_gem_context_create_ext *args = data; + struct create_ext ext_data; + int ret; + + if (!DRIVER_CAPS(i915)->has_logical_contexts) + return -ENODEV; + + if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) + return -EINVAL; + + ret = i915_terminally_wedged(i915); + if (ret) + return ret; + + ext_data.fpriv = file->driver_priv; + if (client_is_banned(ext_data.fpriv)) { + DRM_DEBUG("client %s[%d] banned from creating ctx\n", + current->comm, + pid_nr(get_task_pid(current, PIDTYPE_PID))); + return -EIO; + } + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ext_data.ctx = i915_gem_create_context(i915, args->flags); + mutex_unlock(&dev->struct_mutex); + if (IS_ERR(ext_data.ctx)) + return PTR_ERR(ext_data.ctx); + + if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { + ret = i915_user_extensions(u64_to_user_ptr(args->extensions), + create_extensions, + ARRAY_SIZE(create_extensions), + &ext_data); + if (ret) + goto err_ctx; + } + + ret = gem_context_register(ext_data.ctx, ext_data.fpriv); + if (ret < 0) + goto err_ctx; + + args->ctx_id = ret; + DRM_DEBUG("HW context %d created\n", args->ctx_id); + + return 0; + +err_ctx: + mutex_lock(&dev->struct_mutex); + context_close(ext_data.ctx); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_context_destroy *args = data; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_gem_context *ctx; + + if (args->pad != 0) + return -EINVAL; + + if (!args->ctx_id) + return -ENOENT; + + if (mutex_lock_interruptible(&file_priv->context_idr_lock)) + return -EINTR; + + ctx = idr_remove(&file_priv->context_idr, args->ctx_id); + mutex_unlock(&file_priv->context_idr_lock); + if (!ctx) + return -ENOENT; + + mutex_lock(&dev->struct_mutex); + context_close(ctx); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int get_sseu(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + struct drm_i915_gem_context_param_sseu user_sseu; + struct intel_context *ce; + unsigned long lookup; + int err; + + if (args->size == 0) + goto out; + else if (args->size < sizeof(user_sseu)) + return -EINVAL; + + if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), + sizeof(user_sseu))) + return -EFAULT; + + if (user_sseu.rsvd) + return -EINVAL; + + if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) + return -EINVAL; + + lookup = 0; + if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) + lookup |= LOOKUP_USER_INDEX; + + ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ + if (err) { + intel_context_put(ce); + return err; + } + + user_sseu.slice_mask = ce->sseu.slice_mask; + user_sseu.subslice_mask = ce->sseu.subslice_mask; + user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; + user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; + + intel_context_unlock_pinned(ce); + intel_context_put(ce); + + if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, + sizeof(user_sseu))) + return -EFAULT; + +out: + args->size = sizeof(user_sseu); + + return 0; +} + +int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_gem_context_param *args = data; + struct i915_gem_context *ctx; + int ret = 0; + + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); + if (!ctx) + return -ENOENT; + + switch (args->param) { + case I915_CONTEXT_PARAM_NO_ZEROMAP: + args->size = 0; + args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); + break; + + case I915_CONTEXT_PARAM_GTT_SIZE: + args->size = 0; + if (ctx->ppgtt) + args->value = ctx->ppgtt->vm.total; + else if (to_i915(dev)->mm.aliasing_ppgtt) + args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total; + else + args->value = to_i915(dev)->ggtt.vm.total; + break; + + case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: + args->size = 0; + args->value = i915_gem_context_no_error_capture(ctx); + break; + + case I915_CONTEXT_PARAM_BANNABLE: + args->size = 0; + args->value = i915_gem_context_is_bannable(ctx); + break; + + case I915_CONTEXT_PARAM_RECOVERABLE: + args->size = 0; + args->value = i915_gem_context_is_recoverable(ctx); + break; + + case I915_CONTEXT_PARAM_PRIORITY: + args->size = 0; + args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; + break; + + case I915_CONTEXT_PARAM_SSEU: + ret = get_sseu(ctx, args); + break; + + case I915_CONTEXT_PARAM_VM: + ret = get_ppgtt(file_priv, ctx, args); + break; + + case I915_CONTEXT_PARAM_ENGINES: + ret = get_engines(ctx, args); + break; + + case I915_CONTEXT_PARAM_BAN_PERIOD: + default: + ret = -EINVAL; + break; + } + + i915_gem_context_put(ctx); + return ret; +} + +int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_gem_context_param *args = data; + struct i915_gem_context *ctx; + int ret; + + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); + if (!ctx) + return -ENOENT; + + ret = ctx_setparam(file_priv, ctx, args); + + i915_gem_context_put(ctx); + return ret; +} + +int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, + void *data, struct drm_file *file) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_reset_stats *args = data; + struct i915_gem_context *ctx; + int ret; + + if (args->flags || args->pad) + return -EINVAL; + + ret = -ENOENT; + rcu_read_lock(); + ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id); + if (!ctx) + goto out; + + /* + * We opt for unserialised reads here. This may result in tearing + * in the extremely unlikely event of a GPU hang on this context + * as we are querying them. If we need that extra layer of protection, + * we should wrap the hangstats with a seqlock. + */ + + if (capable(CAP_SYS_ADMIN)) + args->reset_count = i915_reset_count(&dev_priv->gpu_error); + else + args->reset_count = 0; + + args->batch_active = atomic_read(&ctx->guilty_count); + args->batch_pending = atomic_read(&ctx->active_count); + + ret = 0; +out: + rcu_read_unlock(); + return ret; +} + +int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx) +{ + struct drm_i915_private *i915 = ctx->i915; + int err = 0; + + mutex_lock(&i915->contexts.mutex); + + GEM_BUG_ON(i915_gem_context_is_closed(ctx)); + + if (list_empty(&ctx->hw_id_link)) { + GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count)); + + err = assign_hw_id(i915, &ctx->hw_id); + if (err) + goto out_unlock; + + list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list); + } + + GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u); + atomic_inc(&ctx->hw_id_pin_count); + +out_unlock: + mutex_unlock(&i915->contexts.mutex); + return err; +} + +/* GEM context-engines iterator: for_each_gem_engine() */ +struct intel_context * +i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) +{ + const struct i915_gem_engines *e = it->engines; + struct intel_context *ctx; + + do { + if (it->idx >= e->num_engines) + return NULL; + + ctx = e->engines[it->idx++]; + } while (!ctx); + + return ctx; +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/mock_context.c" +#include "selftests/i915_gem_context.c" +#endif + +static void i915_global_gem_context_shrink(void) +{ + kmem_cache_shrink(global.slab_luts); +} + +static void i915_global_gem_context_exit(void) +{ + kmem_cache_destroy(global.slab_luts); +} + +static struct i915_global_gem_context global = { { + .shrink = i915_global_gem_context_shrink, + .exit = i915_global_gem_context_exit, +} }; + +int __init i915_global_gem_context_init(void) +{ + global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); + if (!global.slab_luts) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h new file mode 100644 index 000000000000..630392c77e48 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -0,0 +1,240 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#ifndef __I915_GEM_CONTEXT_H__ +#define __I915_GEM_CONTEXT_H__ + +#include "i915_gem_context_types.h" + +#include "gt/intel_context.h" + +#include "i915_gem.h" +#include "i915_scheduler.h" +#include "intel_device_info.h" + +struct drm_device; +struct drm_file; + +static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx) +{ + return test_bit(CONTEXT_CLOSED, &ctx->flags); +} + +static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx) +{ + GEM_BUG_ON(i915_gem_context_is_closed(ctx)); + set_bit(CONTEXT_CLOSED, &ctx->flags); +} + +static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx) +{ + return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags); +} + +static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx) +{ + set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags); +} + +static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx) +{ + clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags); +} + +static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx) +{ + return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags); +} + +static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx) +{ + set_bit(UCONTEXT_BANNABLE, &ctx->user_flags); +} + +static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx) +{ + clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags); +} + +static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx) +{ + return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); +} + +static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx) +{ + set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); +} + +static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx) +{ + clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); +} + +static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx) +{ + return test_bit(CONTEXT_BANNED, &ctx->flags); +} + +static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx) +{ + set_bit(CONTEXT_BANNED, &ctx->flags); +} + +static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx) +{ + return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags); +} + +static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx) +{ + __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags); +} + +static inline bool +i915_gem_context_user_engines(const struct i915_gem_context *ctx) +{ + return test_bit(CONTEXT_USER_ENGINES, &ctx->flags); +} + +static inline void +i915_gem_context_set_user_engines(struct i915_gem_context *ctx) +{ + set_bit(CONTEXT_USER_ENGINES, &ctx->flags); +} + +static inline void +i915_gem_context_clear_user_engines(struct i915_gem_context *ctx) +{ + clear_bit(CONTEXT_USER_ENGINES, &ctx->flags); +} + +int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx); +static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx) +{ + if (atomic_inc_not_zero(&ctx->hw_id_pin_count)) + return 0; + + return __i915_gem_context_pin_hw_id(ctx); +} + +static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx) +{ + GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u); + atomic_dec(&ctx->hw_id_pin_count); +} + +static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx) +{ + return !ctx->file_priv; +} + +/* i915_gem_context.c */ +int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv); +void i915_gem_contexts_lost(struct drm_i915_private *dev_priv); +void i915_gem_contexts_fini(struct drm_i915_private *dev_priv); + +int i915_gem_context_open(struct drm_i915_private *i915, + struct drm_file *file); +void i915_gem_context_close(struct drm_file *file); + +void i915_gem_context_release(struct kref *ctx_ref); +struct i915_gem_context * +i915_gem_context_create_gvt(struct drm_device *dev); + +int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); + +int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); + +struct i915_gem_context * +i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio); + +static inline struct i915_gem_context * +i915_gem_context_get(struct i915_gem_context *ctx) +{ + kref_get(&ctx->ref); + return ctx; +} + +static inline void i915_gem_context_put(struct i915_gem_context *ctx) +{ + kref_put(&ctx->ref, i915_gem_context_release); +} + +static inline struct i915_gem_engines * +i915_gem_context_engines(struct i915_gem_context *ctx) +{ + return rcu_dereference_protected(ctx->engines, + lockdep_is_held(&ctx->engines_mutex)); +} + +static inline struct i915_gem_engines * +i915_gem_context_lock_engines(struct i915_gem_context *ctx) + __acquires(&ctx->engines_mutex) +{ + mutex_lock(&ctx->engines_mutex); + return i915_gem_context_engines(ctx); +} + +static inline void +i915_gem_context_unlock_engines(struct i915_gem_context *ctx) + __releases(&ctx->engines_mutex) +{ + mutex_unlock(&ctx->engines_mutex); +} + +static inline struct intel_context * +i915_gem_context_lookup_engine(struct i915_gem_context *ctx, unsigned int idx) +{ + return i915_gem_context_engines(ctx)->engines[idx]; +} + +static inline struct intel_context * +i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx) +{ + struct intel_context *ce = ERR_PTR(-EINVAL); + + rcu_read_lock(); { + struct i915_gem_engines *e = rcu_dereference(ctx->engines); + if (likely(idx < e->num_engines && e->engines[idx])) + ce = intel_context_get(e->engines[idx]); + } rcu_read_unlock(); + + return ce; +} + +static inline void +i915_gem_engines_iter_init(struct i915_gem_engines_iter *it, + struct i915_gem_engines *engines) +{ + GEM_BUG_ON(!engines); + it->engines = engines; + it->idx = 0; +} + +struct intel_context * +i915_gem_engines_iter_next(struct i915_gem_engines_iter *it); + +#define for_each_gem_engine(ce, engines, it) \ + for (i915_gem_engines_iter_init(&(it), (engines)); \ + ((ce) = i915_gem_engines_iter_next(&(it)));) + +struct i915_lut_handle *i915_lut_handle_alloc(void); +void i915_lut_handle_free(struct i915_lut_handle *lut); + +#endif /* !__I915_GEM_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h new file mode 100644 index 000000000000..fb965ded2508 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -0,0 +1,208 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __I915_GEM_CONTEXT_TYPES_H__ +#define __I915_GEM_CONTEXT_TYPES_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gt/intel_context_types.h" + +#include "i915_scheduler.h" + +struct pid; + +struct drm_i915_private; +struct drm_i915_file_private; +struct i915_hw_ppgtt; +struct i915_timeline; +struct intel_ring; + +struct i915_gem_engines { + struct rcu_work rcu; + struct drm_i915_private *i915; + unsigned int num_engines; + struct intel_context *engines[]; +}; + +struct i915_gem_engines_iter { + unsigned int idx; + const struct i915_gem_engines *engines; +}; + +/** + * struct i915_gem_context - client state + * + * The struct i915_gem_context represents the combined view of the driver and + * logical hardware state for a particular client. + */ +struct i915_gem_context { + /** i915: i915 device backpointer */ + struct drm_i915_private *i915; + + /** file_priv: owning file descriptor */ + struct drm_i915_file_private *file_priv; + + /** + * @engines: User defined engines for this context + * + * Various uAPI offer the ability to lookup up an + * index from this array to select an engine operate on. + * + * Multiple logically distinct instances of the same engine + * may be defined in the array, as well as composite virtual + * engines. + * + * Execbuf uses the I915_EXEC_RING_MASK as an index into this + * array to select which HW context + engine to execute on. For + * the default array, the user_ring_map[] is used to translate + * the legacy uABI onto the approprate index (e.g. both + * I915_EXEC_DEFAULT and I915_EXEC_RENDER select the same + * context, and I915_EXEC_BSD is weird). For a use defined + * array, execbuf uses I915_EXEC_RING_MASK as a plain index. + * + * User defined by I915_CONTEXT_PARAM_ENGINE (when the + * CONTEXT_USER_ENGINES flag is set). + */ + struct i915_gem_engines __rcu *engines; + struct mutex engines_mutex; /* guards writes to engines */ + + struct i915_timeline *timeline; + + /** + * @ppgtt: unique address space (GTT) + * + * In full-ppgtt mode, each context has its own address space ensuring + * complete seperation of one client from all others. + * + * In other modes, this is a NULL pointer with the expectation that + * the caller uses the shared global GTT. + */ + struct i915_hw_ppgtt *ppgtt; + + /** + * @pid: process id of creator + * + * Note that who created the context may not be the principle user, + * as the context may be shared across a local socket. However, + * that should only affect the default context, all contexts created + * explicitly by the client are expected to be isolated. + */ + struct pid *pid; + + /** + * @name: arbitrary name + * + * A name is constructed for the context from the creator's process + * name, pid and user handle in order to uniquely identify the + * context in messages. + */ + const char *name; + + /** link: place with &drm_i915_private.context_list */ + struct list_head link; + struct llist_node free_link; + + /** + * @ref: reference count + * + * A reference to a context is held by both the client who created it + * and on each request submitted to the hardware using the request + * (to ensure the hardware has access to the state until it has + * finished all pending writes). See i915_gem_context_get() and + * i915_gem_context_put() for access. + */ + struct kref ref; + + /** + * @rcu: rcu_head for deferred freeing. + */ + struct rcu_head rcu; + + /** + * @user_flags: small set of booleans controlled by the user + */ + unsigned long user_flags; +#define UCONTEXT_NO_ZEROMAP 0 +#define UCONTEXT_NO_ERROR_CAPTURE 1 +#define UCONTEXT_BANNABLE 2 +#define UCONTEXT_RECOVERABLE 3 + + /** + * @flags: small set of booleans + */ + unsigned long flags; +#define CONTEXT_BANNED 0 +#define CONTEXT_CLOSED 1 +#define CONTEXT_FORCE_SINGLE_SUBMISSION 2 +#define CONTEXT_USER_ENGINES 3 + + /** + * @hw_id: - unique identifier for the context + * + * The hardware needs to uniquely identify the context for a few + * functions like fault reporting, PASID, scheduling. The + * &drm_i915_private.context_hw_ida is used to assign a unqiue + * id for the lifetime of the context. + * + * @hw_id_pin_count: - number of times this context had been pinned + * for use (should be, at most, once per engine). + * + * @hw_id_link: - all contexts with an assigned id are tracked + * for possible repossession. + */ + unsigned int hw_id; + atomic_t hw_id_pin_count; + struct list_head hw_id_link; + + struct mutex mutex; + + struct i915_sched_attr sched; + + /** ring_size: size for allocating the per-engine ring buffer */ + u32 ring_size; + /** desc_template: invariant fields for the HW context descriptor */ + u32 desc_template; + + /** guilty_count: How many times this context has caused a GPU hang. */ + atomic_t guilty_count; + /** + * @active_count: How many times this context was active during a GPU + * hang, but did not cause it. + */ + atomic_t active_count; + + /** + * @hang_timestamp: The last time(s) this context caused a GPU hang + */ + unsigned long hang_timestamp[2]; +#define CONTEXT_FAST_HANG_JIFFIES (120 * HZ) /* 3 hangs within 120s? Banned! */ + + /** remap_slice: Bitmask of cache lines that need remapping */ + u8 remap_slice; + + /** handles_vma: rbtree to look up our context specific obj/vma for + * the user handle. (user handles are per fd, but the binding is + * per vm, which may be one per context or shared with the global GTT) + */ + struct radix_tree_root handles_vma; + + /** handles_list: reverse list of all the rbtree entries in use for + * this context, which allows us to free all the allocations on + * context close. + */ + struct list_head handles_list; +}; + +#endif /* __I915_GEM_CONTEXT_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c new file mode 100644 index 000000000000..600fc926f81e --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -0,0 +1,318 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright 2012 Red Hat Inc + */ + +#include +#include +#include + +#include "i915_drv.h" +#include "i915_gem_object.h" + +static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) +{ + return to_intel_bo(buf->priv); +} + +static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction dir) +{ + struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); + struct sg_table *st; + struct scatterlist *src, *dst; + int ret, i; + + ret = i915_gem_object_pin_pages(obj); + if (ret) + goto err; + + /* Copy sg so that we make an independent mapping */ + st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (st == NULL) { + ret = -ENOMEM; + goto err_unpin_pages; + } + + ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL); + if (ret) + goto err_free; + + src = obj->mm.pages->sgl; + dst = st->sgl; + for (i = 0; i < obj->mm.pages->nents; i++) { + sg_set_page(dst, sg_page(src), src->length, 0); + dst = sg_next(dst); + src = sg_next(src); + } + + if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { + ret = -ENOMEM; + goto err_free_sg; + } + + return st; + +err_free_sg: + sg_free_table(st); +err_free: + kfree(st); +err_unpin_pages: + i915_gem_object_unpin_pages(obj); +err: + return ERR_PTR(ret); +} + +static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *sg, + enum dma_data_direction dir) +{ + struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); + + dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); + sg_free_table(sg); + kfree(sg); + + i915_gem_object_unpin_pages(obj); +} + +static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) +{ + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); + + return i915_gem_object_pin_map(obj, I915_MAP_WB); +} + +static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) +{ + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); + + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); +} + +static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) +{ + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); + struct page *page; + + if (page_num >= obj->base.size >> PAGE_SHIFT) + return NULL; + + if (!i915_gem_object_has_struct_page(obj)) + return NULL; + + if (i915_gem_object_pin_pages(obj)) + return NULL; + + /* Synchronisation is left to the caller (via .begin_cpu_access()) */ + page = i915_gem_object_get_page(obj, page_num); + if (IS_ERR(page)) + goto err_unpin; + + return kmap(page); + +err_unpin: + i915_gem_object_unpin_pages(obj); + return NULL; +} + +static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) +{ + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); + + kunmap(virt_to_page(addr)); + i915_gem_object_unpin_pages(obj); +} + +static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) +{ + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); + int ret; + + if (obj->base.size < vma->vm_end - vma->vm_start) + return -EINVAL; + + if (!obj->base.filp) + return -ENODEV; + + ret = call_mmap(obj->base.filp, vma); + if (ret) + return ret; + + fput(vma->vm_file); + vma->vm_file = get_file(obj->base.filp); + + return 0; +} + +static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) +{ + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); + struct drm_device *dev = obj->base.dev; + bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); + int err; + + err = i915_gem_object_pin_pages(obj); + if (err) + return err; + + err = i915_mutex_lock_interruptible(dev); + if (err) + goto out; + + err = i915_gem_object_set_to_cpu_domain(obj, write); + mutex_unlock(&dev->struct_mutex); + +out: + i915_gem_object_unpin_pages(obj); + return err; +} + +static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) +{ + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); + struct drm_device *dev = obj->base.dev; + int err; + + err = i915_gem_object_pin_pages(obj); + if (err) + return err; + + err = i915_mutex_lock_interruptible(dev); + if (err) + goto out; + + err = i915_gem_object_set_to_gtt_domain(obj, false); + mutex_unlock(&dev->struct_mutex); + +out: + i915_gem_object_unpin_pages(obj); + return err; +} + +static const struct dma_buf_ops i915_dmabuf_ops = { + .map_dma_buf = i915_gem_map_dma_buf, + .unmap_dma_buf = i915_gem_unmap_dma_buf, + .release = drm_gem_dmabuf_release, + .map = i915_gem_dmabuf_kmap, + .unmap = i915_gem_dmabuf_kunmap, + .mmap = i915_gem_dmabuf_mmap, + .vmap = i915_gem_dmabuf_vmap, + .vunmap = i915_gem_dmabuf_vunmap, + .begin_cpu_access = i915_gem_begin_cpu_access, + .end_cpu_access = i915_gem_end_cpu_access, +}; + +struct dma_buf *i915_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gem_obj, int flags) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &i915_dmabuf_ops; + exp_info.size = gem_obj->size; + exp_info.flags = flags; + exp_info.priv = gem_obj; + exp_info.resv = obj->resv; + + if (obj->ops->dmabuf_export) { + int ret = obj->ops->dmabuf_export(obj); + if (ret) + return ERR_PTR(ret); + } + + return drm_gem_dmabuf_export(dev, &exp_info); +} + +static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) +{ + struct sg_table *pages; + unsigned int sg_page_sizes; + + pages = dma_buf_map_attachment(obj->base.import_attach, + DMA_BIDIRECTIONAL); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + sg_page_sizes = i915_sg_page_sizes(pages->sgl); + + __i915_gem_object_set_pages(obj, pages, sg_page_sizes); + + return 0; +} + +static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + dma_buf_unmap_attachment(obj->base.import_attach, pages, + DMA_BIDIRECTIONAL); +} + +static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { + .get_pages = i915_gem_object_get_pages_dmabuf, + .put_pages = i915_gem_object_put_pages_dmabuf, +}; + +struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct dma_buf_attachment *attach; + struct drm_i915_gem_object *obj; + int ret; + + /* is this one of own objects? */ + if (dma_buf->ops == &i915_dmabuf_ops) { + obj = dma_buf_to_obj(dma_buf); + /* is it from our device? */ + if (obj->base.dev == dev) { + /* + * Importing dmabuf exported from out own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + return &i915_gem_object_get(obj)->base; + } + } + + /* need to attach */ + attach = dma_buf_attach(dma_buf, dev->dev); + if (IS_ERR(attach)) + return ERR_CAST(attach); + + get_dma_buf(dma_buf); + + obj = i915_gem_object_alloc(); + if (obj == NULL) { + ret = -ENOMEM; + goto fail_detach; + } + + drm_gem_private_object_init(dev, &obj->base, dma_buf->size); + i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); + obj->base.import_attach = attach; + obj->resv = dma_buf->resv; + + /* We use GTT as shorthand for a coherent domain, one that is + * neither in the GPU cache nor in the CPU cache, where all + * writes are immediately visible in memory. (That's not strictly + * true, but it's close! There are internal buffers such as the + * write-combined buffer or a delay through the chipset for GTT + * writes that do require us to treat GTT as a separate cache domain.) + */ + obj->read_domains = I915_GEM_DOMAIN_GTT; + obj->write_domain = 0; + + return &obj->base; + +fail_detach: + dma_buf_detach(dma_buf, attach); + dma_buf_put(dma_buf); + + return ERR_PTR(ret); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/mock_dmabuf.c" +#include "selftests/i915_gem_dmabuf.c" +#endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c new file mode 100644 index 000000000000..09e64bf33842 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -0,0 +1,2768 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2008,2010 Intel Corporation + */ + +#include +#include +#include +#include + +#include +#include + +#include "gem/i915_gem_ioctls.h" +#include "gt/intel_context.h" +#include "gt/intel_gt_pm.h" + +#include "i915_gem_ioctls.h" +#include "i915_gem_clflush.h" +#include "i915_gem_context.h" +#include "i915_trace.h" +#include "intel_drv.h" +#include "intel_frontbuffer.h" + +enum { + FORCE_CPU_RELOC = 1, + FORCE_GTT_RELOC, + FORCE_GPU_RELOC, +#define DBG_FORCE_RELOC 0 /* choose one of the above! */ +}; + +#define __EXEC_OBJECT_HAS_REF BIT(31) +#define __EXEC_OBJECT_HAS_PIN BIT(30) +#define __EXEC_OBJECT_HAS_FENCE BIT(29) +#define __EXEC_OBJECT_NEEDS_MAP BIT(28) +#define __EXEC_OBJECT_NEEDS_BIAS BIT(27) +#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */ +#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE) + +#define __EXEC_HAS_RELOC BIT(31) +#define __EXEC_VALIDATED BIT(30) +#define __EXEC_INTERNAL_FLAGS (~0u << 30) +#define UPDATE PIN_OFFSET_FIXED + +#define BATCH_OFFSET_BIAS (256*1024) + +#define __I915_EXEC_ILLEGAL_FLAGS \ + (__I915_EXEC_UNKNOWN_FLAGS | \ + I915_EXEC_CONSTANTS_MASK | \ + I915_EXEC_RESOURCE_STREAMER) + +/* Catch emission of unexpected errors for CI! */ +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) +#undef EINVAL +#define EINVAL ({ \ + DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \ + 22; \ +}) +#endif + +/** + * DOC: User command execution + * + * Userspace submits commands to be executed on the GPU as an instruction + * stream within a GEM object we call a batchbuffer. This instructions may + * refer to other GEM objects containing auxiliary state such as kernels, + * samplers, render targets and even secondary batchbuffers. Userspace does + * not know where in the GPU memory these objects reside and so before the + * batchbuffer is passed to the GPU for execution, those addresses in the + * batchbuffer and auxiliary objects are updated. This is known as relocation, + * or patching. To try and avoid having to relocate each object on the next + * execution, userspace is told the location of those objects in this pass, + * but this remains just a hint as the kernel may choose a new location for + * any object in the future. + * + * At the level of talking to the hardware, submitting a batchbuffer for the + * GPU to execute is to add content to a buffer from which the HW + * command streamer is reading. + * + * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e. + * Execlists, this command is not placed on the same buffer as the + * remaining items. + * + * 2. Add a command to invalidate caches to the buffer. + * + * 3. Add a batchbuffer start command to the buffer; the start command is + * essentially a token together with the GPU address of the batchbuffer + * to be executed. + * + * 4. Add a pipeline flush to the buffer. + * + * 5. Add a memory write command to the buffer to record when the GPU + * is done executing the batchbuffer. The memory write writes the + * global sequence number of the request, ``i915_request::global_seqno``; + * the i915 driver uses the current value in the register to determine + * if the GPU has completed the batchbuffer. + * + * 6. Add a user interrupt command to the buffer. This command instructs + * the GPU to issue an interrupt when the command, pipeline flush and + * memory write are completed. + * + * 7. Inform the hardware of the additional commands added to the buffer + * (by updating the tail pointer). + * + * Processing an execbuf ioctl is conceptually split up into a few phases. + * + * 1. Validation - Ensure all the pointers, handles and flags are valid. + * 2. Reservation - Assign GPU address space for every object + * 3. Relocation - Update any addresses to point to the final locations + * 4. Serialisation - Order the request with respect to its dependencies + * 5. Construction - Construct a request to execute the batchbuffer + * 6. Submission (at some point in the future execution) + * + * Reserving resources for the execbuf is the most complicated phase. We + * neither want to have to migrate the object in the address space, nor do + * we want to have to update any relocations pointing to this object. Ideally, + * we want to leave the object where it is and for all the existing relocations + * to match. If the object is given a new address, or if userspace thinks the + * object is elsewhere, we have to parse all the relocation entries and update + * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that + * all the target addresses in all of its objects match the value in the + * relocation entries and that they all match the presumed offsets given by the + * list of execbuffer objects. Using this knowledge, we know that if we haven't + * moved any buffers, all the relocation entries are valid and we can skip + * the update. (If userspace is wrong, the likely outcome is an impromptu GPU + * hang.) The requirement for using I915_EXEC_NO_RELOC are: + * + * The addresses written in the objects must match the corresponding + * reloc.presumed_offset which in turn must match the corresponding + * execobject.offset. + * + * Any render targets written to in the batch must be flagged with + * EXEC_OBJECT_WRITE. + * + * To avoid stalling, execobject.offset should match the current + * address of that object within the active context. + * + * The reservation is done is multiple phases. First we try and keep any + * object already bound in its current location - so as long as meets the + * constraints imposed by the new execbuffer. Any object left unbound after the + * first pass is then fitted into any available idle space. If an object does + * not fit, all objects are removed from the reservation and the process rerun + * after sorting the objects into a priority order (more difficult to fit + * objects are tried first). Failing that, the entire VM is cleared and we try + * to fit the execbuf once last time before concluding that it simply will not + * fit. + * + * A small complication to all of this is that we allow userspace not only to + * specify an alignment and a size for the object in the address space, but + * we also allow userspace to specify the exact offset. This objects are + * simpler to place (the location is known a priori) all we have to do is make + * sure the space is available. + * + * Once all the objects are in place, patching up the buried pointers to point + * to the final locations is a fairly simple job of walking over the relocation + * entry arrays, looking up the right address and rewriting the value into + * the object. Simple! ... The relocation entries are stored in user memory + * and so to access them we have to copy them into a local buffer. That copy + * has to avoid taking any pagefaults as they may lead back to a GEM object + * requiring the struct_mutex (i.e. recursive deadlock). So once again we split + * the relocation into multiple passes. First we try to do everything within an + * atomic context (avoid the pagefaults) which requires that we never wait. If + * we detect that we may wait, or if we need to fault, then we have to fallback + * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm + * bells yet?) Dropping the mutex means that we lose all the state we have + * built up so far for the execbuf and we must reset any global data. However, + * we do leave the objects pinned in their final locations - which is a + * potential issue for concurrent execbufs. Once we have left the mutex, we can + * allocate and copy all the relocation entries into a large array at our + * leisure, reacquire the mutex, reclaim all the objects and other state and + * then proceed to update any incorrect addresses with the objects. + * + * As we process the relocation entries, we maintain a record of whether the + * object is being written to. Using NORELOC, we expect userspace to provide + * this information instead. We also check whether we can skip the relocation + * by comparing the expected value inside the relocation entry with the target's + * final address. If they differ, we have to map the current object and rewrite + * the 4 or 8 byte pointer within. + * + * Serialising an execbuf is quite simple according to the rules of the GEM + * ABI. Execution within each context is ordered by the order of submission. + * Writes to any GEM object are in order of submission and are exclusive. Reads + * from a GEM object are unordered with respect to other reads, but ordered by + * writes. A write submitted after a read cannot occur before the read, and + * similarly any read submitted after a write cannot occur before the write. + * Writes are ordered between engines such that only one write occurs at any + * time (completing any reads beforehand) - using semaphores where available + * and CPU serialisation otherwise. Other GEM access obey the same rules, any + * write (either via mmaps using set-domain, or via pwrite) must flush all GPU + * reads before starting, and any read (either using set-domain or pread) must + * flush all GPU writes before starting. (Note we only employ a barrier before, + * we currently rely on userspace not concurrently starting a new execution + * whilst reading or writing to an object. This may be an advantage or not + * depending on how much you trust userspace not to shoot themselves in the + * foot.) Serialisation may just result in the request being inserted into + * a DAG awaiting its turn, but most simple is to wait on the CPU until + * all dependencies are resolved. + * + * After all of that, is just a matter of closing the request and handing it to + * the hardware (well, leaving it in a queue to be executed). However, we also + * offer the ability for batchbuffers to be run with elevated privileges so + * that they access otherwise hidden registers. (Used to adjust L3 cache etc.) + * Before any batch is given extra privileges we first must check that it + * contains no nefarious instructions, we check that each instruction is from + * our whitelist and all registers are also from an allowed list. We first + * copy the user's batchbuffer to a shadow (so that the user doesn't have + * access to it, either by the CPU or GPU as we scan it) and then parse each + * instruction. If everything is ok, we set a flag telling the hardware to run + * the batchbuffer in trusted mode, otherwise the ioctl is rejected. + */ + +struct i915_execbuffer { + struct drm_i915_private *i915; /** i915 backpointer */ + struct drm_file *file; /** per-file lookup tables and limits */ + struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */ + struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */ + struct i915_vma **vma; + unsigned int *flags; + + struct intel_engine_cs *engine; /** engine to queue the request to */ + struct intel_context *context; /* logical state for the request */ + struct i915_gem_context *gem_context; /** caller's context */ + struct i915_address_space *vm; /** GTT and vma for the request */ + + struct i915_request *request; /** our request to build */ + struct i915_vma *batch; /** identity of the batch obj/vma */ + + /** actual size of execobj[] as we may extend it for the cmdparser */ + unsigned int buffer_count; + + /** list of vma not yet bound during reservation phase */ + struct list_head unbound; + + /** list of vma that have execobj.relocation_count */ + struct list_head relocs; + + /** + * Track the most recently used object for relocations, as we + * frequently have to perform multiple relocations within the same + * obj/page + */ + struct reloc_cache { + struct drm_mm_node node; /** temporary GTT binding */ + unsigned long vaddr; /** Current kmap address */ + unsigned long page; /** Currently mapped page index */ + unsigned int gen; /** Cached value of INTEL_GEN */ + bool use_64bit_reloc : 1; + bool has_llc : 1; + bool has_fence : 1; + bool needs_unfenced : 1; + + struct i915_request *rq; + u32 *rq_cmd; + unsigned int rq_size; + } reloc_cache; + + u64 invalid_flags; /** Set of execobj.flags that are invalid */ + u32 context_flags; /** Set of execobj.flags to insert from the ctx */ + + u32 batch_start_offset; /** Location within object of batch */ + u32 batch_len; /** Length of batch within object */ + u32 batch_flags; /** Flags composed for emit_bb_start() */ + + /** + * Indicate either the size of the hastable used to resolve + * relocation handles, or if negative that we are using a direct + * index into the execobj[]. + */ + int lut_size; + struct hlist_head *buckets; /** ht for relocation handles */ +}; + +#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags]) + +/* + * Used to convert any address to canonical form. + * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS, + * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the + * addresses to be in a canonical form: + * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct + * canonical form [63:48] == [47]." + */ +#define GEN8_HIGH_ADDRESS_BIT 47 +static inline u64 gen8_canonical_addr(u64 address) +{ + return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT); +} + +static inline u64 gen8_noncanonical_addr(u64 address) +{ + return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0); +} + +static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) +{ + return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len; +} + +static int eb_create(struct i915_execbuffer *eb) +{ + if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { + unsigned int size = 1 + ilog2(eb->buffer_count); + + /* + * Without a 1:1 association between relocation handles and + * the execobject[] index, we instead create a hashtable. + * We size it dynamically based on available memory, starting + * first with 1:1 assocative hash and scaling back until + * the allocation succeeds. + * + * Later on we use a positive lut_size to indicate we are + * using this hashtable, and a negative value to indicate a + * direct lookup. + */ + do { + gfp_t flags; + + /* While we can still reduce the allocation size, don't + * raise a warning and allow the allocation to fail. + * On the last pass though, we want to try as hard + * as possible to perform the allocation and warn + * if it fails. + */ + flags = GFP_KERNEL; + if (size > 1) + flags |= __GFP_NORETRY | __GFP_NOWARN; + + eb->buckets = kzalloc(sizeof(struct hlist_head) << size, + flags); + if (eb->buckets) + break; + } while (--size); + + if (unlikely(!size)) + return -ENOMEM; + + eb->lut_size = size; + } else { + eb->lut_size = -eb->buffer_count; + } + + return 0; +} + +static bool +eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, + const struct i915_vma *vma, + unsigned int flags) +{ + if (vma->node.size < entry->pad_to_size) + return true; + + if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment)) + return true; + + if (flags & EXEC_OBJECT_PINNED && + vma->node.start != entry->offset) + return true; + + if (flags & __EXEC_OBJECT_NEEDS_BIAS && + vma->node.start < BATCH_OFFSET_BIAS) + return true; + + if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) && + (vma->node.start + vma->node.size - 1) >> 32) + return true; + + if (flags & __EXEC_OBJECT_NEEDS_MAP && + !i915_vma_is_map_and_fenceable(vma)) + return true; + + return false; +} + +static inline bool +eb_pin_vma(struct i915_execbuffer *eb, + const struct drm_i915_gem_exec_object2 *entry, + struct i915_vma *vma) +{ + unsigned int exec_flags = *vma->exec_flags; + u64 pin_flags; + + if (vma->node.size) + pin_flags = vma->node.start; + else + pin_flags = entry->offset & PIN_OFFSET_MASK; + + pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED; + if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT)) + pin_flags |= PIN_GLOBAL; + + if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) + return false; + + if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) { + if (unlikely(i915_vma_pin_fence(vma))) { + i915_vma_unpin(vma); + return false; + } + + if (vma->fence) + exec_flags |= __EXEC_OBJECT_HAS_FENCE; + } + + *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN; + return !eb_vma_misplaced(entry, vma, exec_flags); +} + +static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags) +{ + GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN)); + + if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE)) + __i915_vma_unpin_fence(vma); + + __i915_vma_unpin(vma); +} + +static inline void +eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags) +{ + if (!(*flags & __EXEC_OBJECT_HAS_PIN)) + return; + + __eb_unreserve_vma(vma, *flags); + *flags &= ~__EXEC_OBJECT_RESERVED; +} + +static int +eb_validate_vma(struct i915_execbuffer *eb, + struct drm_i915_gem_exec_object2 *entry, + struct i915_vma *vma) +{ + if (unlikely(entry->flags & eb->invalid_flags)) + return -EINVAL; + + if (unlikely(entry->alignment && !is_power_of_2(entry->alignment))) + return -EINVAL; + + /* + * Offset can be used as input (EXEC_OBJECT_PINNED), reject + * any non-page-aligned or non-canonical addresses. + */ + if (unlikely(entry->flags & EXEC_OBJECT_PINNED && + entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK))) + return -EINVAL; + + /* pad_to_size was once a reserved field, so sanitize it */ + if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) { + if (unlikely(offset_in_page(entry->pad_to_size))) + return -EINVAL; + } else { + entry->pad_to_size = 0; + } + + if (unlikely(vma->exec_flags)) { + DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n", + entry->handle, (int)(entry - eb->exec)); + return -EINVAL; + } + + /* + * From drm_mm perspective address space is continuous, + * so from this point we're always using non-canonical + * form internally. + */ + entry->offset = gen8_noncanonical_addr(entry->offset); + + if (!eb->reloc_cache.has_fence) { + entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; + } else { + if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE || + eb->reloc_cache.needs_unfenced) && + i915_gem_object_is_tiled(vma->obj)) + entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP; + } + + if (!(entry->flags & EXEC_OBJECT_PINNED)) + entry->flags |= eb->context_flags; + + return 0; +} + +static int +eb_add_vma(struct i915_execbuffer *eb, + unsigned int i, unsigned batch_idx, + struct i915_vma *vma) +{ + struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; + int err; + + GEM_BUG_ON(i915_vma_is_closed(vma)); + + if (!(eb->args->flags & __EXEC_VALIDATED)) { + err = eb_validate_vma(eb, entry, vma); + if (unlikely(err)) + return err; + } + + if (eb->lut_size > 0) { + vma->exec_handle = entry->handle; + hlist_add_head(&vma->exec_node, + &eb->buckets[hash_32(entry->handle, + eb->lut_size)]); + } + + if (entry->relocation_count) + list_add_tail(&vma->reloc_link, &eb->relocs); + + /* + * Stash a pointer from the vma to execobj, so we can query its flags, + * size, alignment etc as provided by the user. Also we stash a pointer + * to the vma inside the execobj so that we can use a direct lookup + * to find the right target VMA when doing relocations. + */ + eb->vma[i] = vma; + eb->flags[i] = entry->flags; + vma->exec_flags = &eb->flags[i]; + + /* + * SNA is doing fancy tricks with compressing batch buffers, which leads + * to negative relocation deltas. Usually that works out ok since the + * relocate address is still positive, except when the batch is placed + * very low in the GTT. Ensure this doesn't happen. + * + * Note that actual hangs have only been observed on gen7, but for + * paranoia do it everywhere. + */ + if (i == batch_idx) { + if (entry->relocation_count && + !(eb->flags[i] & EXEC_OBJECT_PINNED)) + eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; + if (eb->reloc_cache.has_fence) + eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; + + eb->batch = vma; + } + + err = 0; + if (eb_pin_vma(eb, entry, vma)) { + if (entry->offset != vma->node.start) { + entry->offset = vma->node.start | UPDATE; + eb->args->flags |= __EXEC_HAS_RELOC; + } + } else { + eb_unreserve_vma(vma, vma->exec_flags); + + list_add_tail(&vma->exec_link, &eb->unbound); + if (drm_mm_node_allocated(&vma->node)) + err = i915_vma_unbind(vma); + if (unlikely(err)) + vma->exec_flags = NULL; + } + return err; +} + +static inline int use_cpu_reloc(const struct reloc_cache *cache, + const struct drm_i915_gem_object *obj) +{ + if (!i915_gem_object_has_struct_page(obj)) + return false; + + if (DBG_FORCE_RELOC == FORCE_CPU_RELOC) + return true; + + if (DBG_FORCE_RELOC == FORCE_GTT_RELOC) + return false; + + return (cache->has_llc || + obj->cache_dirty || + obj->cache_level != I915_CACHE_NONE); +} + +static int eb_reserve_vma(const struct i915_execbuffer *eb, + struct i915_vma *vma) +{ + struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); + unsigned int exec_flags = *vma->exec_flags; + u64 pin_flags; + int err; + + pin_flags = PIN_USER | PIN_NONBLOCK; + if (exec_flags & EXEC_OBJECT_NEEDS_GTT) + pin_flags |= PIN_GLOBAL; + + /* + * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset, + * limit address to the first 4GBs for unflagged objects. + */ + if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) + pin_flags |= PIN_ZONE_4G; + + if (exec_flags & __EXEC_OBJECT_NEEDS_MAP) + pin_flags |= PIN_MAPPABLE; + + if (exec_flags & EXEC_OBJECT_PINNED) { + pin_flags |= entry->offset | PIN_OFFSET_FIXED; + pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */ + } else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) { + pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; + } + + err = i915_vma_pin(vma, + entry->pad_to_size, entry->alignment, + pin_flags); + if (err) + return err; + + if (entry->offset != vma->node.start) { + entry->offset = vma->node.start | UPDATE; + eb->args->flags |= __EXEC_HAS_RELOC; + } + + if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) { + err = i915_vma_pin_fence(vma); + if (unlikely(err)) { + i915_vma_unpin(vma); + return err; + } + + if (vma->fence) + exec_flags |= __EXEC_OBJECT_HAS_FENCE; + } + + *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN; + GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags)); + + return 0; +} + +static int eb_reserve(struct i915_execbuffer *eb) +{ + const unsigned int count = eb->buffer_count; + struct list_head last; + struct i915_vma *vma; + unsigned int i, pass; + int err; + + /* + * Attempt to pin all of the buffers into the GTT. + * This is done in 3 phases: + * + * 1a. Unbind all objects that do not match the GTT constraints for + * the execbuffer (fenceable, mappable, alignment etc). + * 1b. Increment pin count for already bound objects. + * 2. Bind new objects. + * 3. Decrement pin count. + * + * This avoid unnecessary unbinding of later objects in order to make + * room for the earlier objects *unless* we need to defragment. + */ + + pass = 0; + err = 0; + do { + list_for_each_entry(vma, &eb->unbound, exec_link) { + err = eb_reserve_vma(eb, vma); + if (err) + break; + } + if (err != -ENOSPC) + return err; + + /* Resort *all* the objects into priority order */ + INIT_LIST_HEAD(&eb->unbound); + INIT_LIST_HEAD(&last); + for (i = 0; i < count; i++) { + unsigned int flags = eb->flags[i]; + struct i915_vma *vma = eb->vma[i]; + + if (flags & EXEC_OBJECT_PINNED && + flags & __EXEC_OBJECT_HAS_PIN) + continue; + + eb_unreserve_vma(vma, &eb->flags[i]); + + if (flags & EXEC_OBJECT_PINNED) + /* Pinned must have their slot */ + list_add(&vma->exec_link, &eb->unbound); + else if (flags & __EXEC_OBJECT_NEEDS_MAP) + /* Map require the lowest 256MiB (aperture) */ + list_add_tail(&vma->exec_link, &eb->unbound); + else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) + /* Prioritise 4GiB region for restricted bo */ + list_add(&vma->exec_link, &last); + else + list_add_tail(&vma->exec_link, &last); + } + list_splice_tail(&last, &eb->unbound); + + switch (pass++) { + case 0: + break; + + case 1: + /* Too fragmented, unbind everything and retry */ + err = i915_gem_evict_vm(eb->vm); + if (err) + return err; + break; + + default: + return -ENOSPC; + } + } while (1); +} + +static unsigned int eb_batch_index(const struct i915_execbuffer *eb) +{ + if (eb->args->flags & I915_EXEC_BATCH_FIRST) + return 0; + else + return eb->buffer_count - 1; +} + +static int eb_select_context(struct i915_execbuffer *eb) +{ + struct i915_gem_context *ctx; + + ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1); + if (unlikely(!ctx)) + return -ENOENT; + + eb->gem_context = ctx; + if (ctx->ppgtt) { + eb->vm = &ctx->ppgtt->vm; + eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; + } else { + eb->vm = &eb->i915->ggtt.vm; + } + + eb->context_flags = 0; + if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags)) + eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS; + + return 0; +} + +static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring) +{ + struct i915_request *rq; + + /* + * Completely unscientific finger-in-the-air estimates for suitable + * maximum user request size (to avoid blocking) and then backoff. + */ + if (intel_ring_update_space(ring) >= PAGE_SIZE) + return NULL; + + /* + * Find a request that after waiting upon, there will be at least half + * the ring available. The hysteresis allows us to compete for the + * shared ring and should mean that we sleep less often prior to + * claiming our resources, but not so long that the ring completely + * drains before we can submit our next request. + */ + list_for_each_entry(rq, &ring->request_list, ring_link) { + if (__intel_ring_space(rq->postfix, + ring->emit, ring->size) > ring->size / 2) + break; + } + if (&rq->ring_link == &ring->request_list) + return NULL; /* weird, we will check again later for real */ + + return i915_request_get(rq); +} + +static int eb_wait_for_ring(const struct i915_execbuffer *eb) +{ + struct i915_request *rq; + int ret = 0; + + /* + * Apply a light amount of backpressure to prevent excessive hogs + * from blocking waiting for space whilst holding struct_mutex and + * keeping all of their resources pinned. + */ + + rq = __eb_wait_for_ring(eb->context->ring); + if (rq) { + mutex_unlock(&eb->i915->drm.struct_mutex); + + if (i915_request_wait(rq, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT) < 0) + ret = -EINTR; + + i915_request_put(rq); + + mutex_lock(&eb->i915->drm.struct_mutex); + } + + return ret; +} + +static int eb_lookup_vmas(struct i915_execbuffer *eb) +{ + struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma; + struct drm_i915_gem_object *obj; + unsigned int i, batch; + int err; + + if (unlikely(i915_gem_context_is_closed(eb->gem_context))) + return -ENOENT; + + if (unlikely(i915_gem_context_is_banned(eb->gem_context))) + return -EIO; + + INIT_LIST_HEAD(&eb->relocs); + INIT_LIST_HEAD(&eb->unbound); + + batch = eb_batch_index(eb); + + for (i = 0; i < eb->buffer_count; i++) { + u32 handle = eb->exec[i].handle; + struct i915_lut_handle *lut; + struct i915_vma *vma; + + vma = radix_tree_lookup(handles_vma, handle); + if (likely(vma)) + goto add_vma; + + obj = i915_gem_object_lookup(eb->file, handle); + if (unlikely(!obj)) { + err = -ENOENT; + goto err_vma; + } + + vma = i915_vma_instance(obj, eb->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + lut = i915_lut_handle_alloc(); + if (unlikely(!lut)) { + err = -ENOMEM; + goto err_obj; + } + + err = radix_tree_insert(handles_vma, handle, vma); + if (unlikely(err)) { + i915_lut_handle_free(lut); + goto err_obj; + } + + /* transfer ref to ctx */ + if (!vma->open_count++) + i915_vma_reopen(vma); + list_add(&lut->obj_link, &obj->lut_list); + list_add(&lut->ctx_link, &eb->gem_context->handles_list); + lut->ctx = eb->gem_context; + lut->handle = handle; + +add_vma: + err = eb_add_vma(eb, i, batch, vma); + if (unlikely(err)) + goto err_vma; + + GEM_BUG_ON(vma != eb->vma[i]); + GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); + GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && + eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i])); + } + + eb->args->flags |= __EXEC_VALIDATED; + return eb_reserve(eb); + +err_obj: + i915_gem_object_put(obj); +err_vma: + eb->vma[i] = NULL; + return err; +} + +static struct i915_vma * +eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) +{ + if (eb->lut_size < 0) { + if (handle >= -eb->lut_size) + return NULL; + return eb->vma[handle]; + } else { + struct hlist_head *head; + struct i915_vma *vma; + + head = &eb->buckets[hash_32(handle, eb->lut_size)]; + hlist_for_each_entry(vma, head, exec_node) { + if (vma->exec_handle == handle) + return vma; + } + return NULL; + } +} + +static void eb_release_vmas(const struct i915_execbuffer *eb) +{ + const unsigned int count = eb->buffer_count; + unsigned int i; + + for (i = 0; i < count; i++) { + struct i915_vma *vma = eb->vma[i]; + unsigned int flags = eb->flags[i]; + + if (!vma) + break; + + GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); + vma->exec_flags = NULL; + eb->vma[i] = NULL; + + if (flags & __EXEC_OBJECT_HAS_PIN) + __eb_unreserve_vma(vma, flags); + + if (flags & __EXEC_OBJECT_HAS_REF) + i915_vma_put(vma); + } +} + +static void eb_reset_vmas(const struct i915_execbuffer *eb) +{ + eb_release_vmas(eb); + if (eb->lut_size > 0) + memset(eb->buckets, 0, + sizeof(struct hlist_head) << eb->lut_size); +} + +static void eb_destroy(const struct i915_execbuffer *eb) +{ + GEM_BUG_ON(eb->reloc_cache.rq); + + if (eb->lut_size > 0) + kfree(eb->buckets); +} + +static inline u64 +relocation_target(const struct drm_i915_gem_relocation_entry *reloc, + const struct i915_vma *target) +{ + return gen8_canonical_addr((int)reloc->delta + target->node.start); +} + +static void reloc_cache_init(struct reloc_cache *cache, + struct drm_i915_private *i915) +{ + cache->page = -1; + cache->vaddr = 0; + /* Must be a variable in the struct to allow GCC to unroll. */ + cache->gen = INTEL_GEN(i915); + cache->has_llc = HAS_LLC(i915); + cache->use_64bit_reloc = HAS_64BIT_RELOC(i915); + cache->has_fence = cache->gen < 4; + cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; + cache->node.allocated = false; + cache->rq = NULL; + cache->rq_size = 0; +} + +static inline void *unmask_page(unsigned long p) +{ + return (void *)(uintptr_t)(p & PAGE_MASK); +} + +static inline unsigned int unmask_flags(unsigned long p) +{ + return p & ~PAGE_MASK; +} + +#define KMAP 0x4 /* after CLFLUSH_FLAGS */ + +static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) +{ + struct drm_i915_private *i915 = + container_of(cache, struct i915_execbuffer, reloc_cache)->i915; + return &i915->ggtt; +} + +static void reloc_gpu_flush(struct reloc_cache *cache) +{ + GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); + cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; + + __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size); + i915_gem_object_unpin_map(cache->rq->batch->obj); + + i915_gem_chipset_flush(cache->rq->i915); + + i915_request_add(cache->rq); + cache->rq = NULL; +} + +static void reloc_cache_reset(struct reloc_cache *cache) +{ + void *vaddr; + + if (cache->rq) + reloc_gpu_flush(cache); + + if (!cache->vaddr) + return; + + vaddr = unmask_page(cache->vaddr); + if (cache->vaddr & KMAP) { + if (cache->vaddr & CLFLUSH_AFTER) + mb(); + + kunmap_atomic(vaddr); + i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm); + } else { + wmb(); + io_mapping_unmap_atomic((void __iomem *)vaddr); + if (cache->node.allocated) { + struct i915_ggtt *ggtt = cache_to_ggtt(cache); + + ggtt->vm.clear_range(&ggtt->vm, + cache->node.start, + cache->node.size); + drm_mm_remove_node(&cache->node); + } else { + i915_vma_unpin((struct i915_vma *)cache->node.mm); + } + } + + cache->vaddr = 0; + cache->page = -1; +} + +static void *reloc_kmap(struct drm_i915_gem_object *obj, + struct reloc_cache *cache, + unsigned long page) +{ + void *vaddr; + + if (cache->vaddr) { + kunmap_atomic(unmask_page(cache->vaddr)); + } else { + unsigned int flushes; + int err; + + err = i915_gem_object_prepare_write(obj, &flushes); + if (err) + return ERR_PTR(err); + + BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS); + BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK); + + cache->vaddr = flushes | KMAP; + cache->node.mm = (void *)obj; + if (flushes) + mb(); + } + + vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page)); + cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr; + cache->page = page; + + return vaddr; +} + +static void *reloc_iomap(struct drm_i915_gem_object *obj, + struct reloc_cache *cache, + unsigned long page) +{ + struct i915_ggtt *ggtt = cache_to_ggtt(cache); + unsigned long offset; + void *vaddr; + + if (cache->vaddr) { + io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); + } else { + struct i915_vma *vma; + int err; + + if (use_cpu_reloc(cache, obj)) + return NULL; + + err = i915_gem_object_set_to_gtt_domain(obj, true); + if (err) + return ERR_PTR(err); + + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, + PIN_MAPPABLE | + PIN_NONBLOCK | + PIN_NONFAULT); + if (IS_ERR(vma)) { + memset(&cache->node, 0, sizeof(cache->node)); + err = drm_mm_insert_node_in_range + (&ggtt->vm.mm, &cache->node, + PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, + 0, ggtt->mappable_end, + DRM_MM_INSERT_LOW); + if (err) /* no inactive aperture space, use cpu reloc */ + return NULL; + } else { + err = i915_vma_put_fence(vma); + if (err) { + i915_vma_unpin(vma); + return ERR_PTR(err); + } + + cache->node.start = vma->node.start; + cache->node.mm = (void *)vma; + } + } + + offset = cache->node.start; + if (cache->node.allocated) { + wmb(); + ggtt->vm.insert_page(&ggtt->vm, + i915_gem_object_get_dma_address(obj, page), + offset, I915_CACHE_NONE, 0); + } else { + offset += page << PAGE_SHIFT; + } + + vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap, + offset); + cache->page = page; + cache->vaddr = (unsigned long)vaddr; + + return vaddr; +} + +static void *reloc_vaddr(struct drm_i915_gem_object *obj, + struct reloc_cache *cache, + unsigned long page) +{ + void *vaddr; + + if (cache->page == page) { + vaddr = unmask_page(cache->vaddr); + } else { + vaddr = NULL; + if ((cache->vaddr & KMAP) == 0) + vaddr = reloc_iomap(obj, cache, page); + if (!vaddr) + vaddr = reloc_kmap(obj, cache, page); + } + + return vaddr; +} + +static void clflush_write32(u32 *addr, u32 value, unsigned int flushes) +{ + if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) { + if (flushes & CLFLUSH_BEFORE) { + clflushopt(addr); + mb(); + } + + *addr = value; + + /* + * Writes to the same cacheline are serialised by the CPU + * (including clflush). On the write path, we only require + * that it hits memory in an orderly fashion and place + * mb barriers at the start and end of the relocation phase + * to ensure ordering of clflush wrt to the system. + */ + if (flushes & CLFLUSH_AFTER) + clflushopt(addr); + } else + *addr = value; +} + +static int __reloc_gpu_alloc(struct i915_execbuffer *eb, + struct i915_vma *vma, + unsigned int len) +{ + struct reloc_cache *cache = &eb->reloc_cache; + struct drm_i915_gem_object *obj; + struct i915_request *rq; + struct i915_vma *batch; + u32 *cmd; + int err; + + if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) { + obj = vma->obj; + if (obj->cache_dirty & ~obj->cache_coherent) + i915_gem_clflush_object(obj, 0); + obj->write_domain = 0; + } + + GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU); + + obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + cmd = i915_gem_object_pin_map(obj, + cache->has_llc ? + I915_MAP_FORCE_WB : + I915_MAP_FORCE_WC); + i915_gem_object_unpin_pages(obj); + if (IS_ERR(cmd)) + return PTR_ERR(cmd); + + batch = i915_vma_instance(obj, vma->vm, NULL); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto err_unmap; + } + + err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK); + if (err) + goto err_unmap; + + rq = i915_request_create(eb->context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_unpin; + } + + err = i915_request_await_object(rq, vma->obj, true); + if (err) + goto err_request; + + err = eb->engine->emit_bb_start(rq, + batch->node.start, PAGE_SIZE, + cache->gen > 5 ? 0 : I915_DISPATCH_SECURE); + if (err) + goto err_request; + + GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto skip_request; + + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto skip_request; + + rq->batch = batch; + i915_vma_unpin(batch); + + cache->rq = rq; + cache->rq_cmd = cmd; + cache->rq_size = 0; + + /* Return with batch mapping (cmd) still pinned */ + return 0; + +skip_request: + i915_request_skip(rq, err); +err_request: + i915_request_add(rq); +err_unpin: + i915_vma_unpin(batch); +err_unmap: + i915_gem_object_unpin_map(obj); + return err; +} + +static u32 *reloc_gpu(struct i915_execbuffer *eb, + struct i915_vma *vma, + unsigned int len) +{ + struct reloc_cache *cache = &eb->reloc_cache; + u32 *cmd; + + if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1)) + reloc_gpu_flush(cache); + + if (unlikely(!cache->rq)) { + int err; + + /* If we need to copy for the cmdparser, we will stall anyway */ + if (eb_use_cmdparser(eb)) + return ERR_PTR(-EWOULDBLOCK); + + if (!intel_engine_can_store_dword(eb->engine)) + return ERR_PTR(-ENODEV); + + err = __reloc_gpu_alloc(eb, vma, len); + if (unlikely(err)) + return ERR_PTR(err); + } + + cmd = cache->rq_cmd + cache->rq_size; + cache->rq_size += len; + + return cmd; +} + +static u64 +relocate_entry(struct i915_vma *vma, + const struct drm_i915_gem_relocation_entry *reloc, + struct i915_execbuffer *eb, + const struct i915_vma *target) +{ + u64 offset = reloc->offset; + u64 target_offset = relocation_target(reloc, target); + bool wide = eb->reloc_cache.use_64bit_reloc; + void *vaddr; + + if (!eb->reloc_cache.vaddr && + (DBG_FORCE_RELOC == FORCE_GPU_RELOC || + !reservation_object_test_signaled_rcu(vma->resv, true))) { + const unsigned int gen = eb->reloc_cache.gen; + unsigned int len; + u32 *batch; + u64 addr; + + if (wide) + len = offset & 7 ? 8 : 5; + else if (gen >= 4) + len = 4; + else + len = 3; + + batch = reloc_gpu(eb, vma, len); + if (IS_ERR(batch)) + goto repeat; + + addr = gen8_canonical_addr(vma->node.start + offset); + if (wide) { + if (offset & 7) { + *batch++ = MI_STORE_DWORD_IMM_GEN4; + *batch++ = lower_32_bits(addr); + *batch++ = upper_32_bits(addr); + *batch++ = lower_32_bits(target_offset); + + addr = gen8_canonical_addr(addr + 4); + + *batch++ = MI_STORE_DWORD_IMM_GEN4; + *batch++ = lower_32_bits(addr); + *batch++ = upper_32_bits(addr); + *batch++ = upper_32_bits(target_offset); + } else { + *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1; + *batch++ = lower_32_bits(addr); + *batch++ = upper_32_bits(addr); + *batch++ = lower_32_bits(target_offset); + *batch++ = upper_32_bits(target_offset); + } + } else if (gen >= 6) { + *batch++ = MI_STORE_DWORD_IMM_GEN4; + *batch++ = 0; + *batch++ = addr; + *batch++ = target_offset; + } else if (gen >= 4) { + *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *batch++ = 0; + *batch++ = addr; + *batch++ = target_offset; + } else { + *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *batch++ = addr; + *batch++ = target_offset; + } + + goto out; + } + +repeat: + vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + clflush_write32(vaddr + offset_in_page(offset), + lower_32_bits(target_offset), + eb->reloc_cache.vaddr); + + if (wide) { + offset += sizeof(u32); + target_offset >>= 32; + wide = false; + goto repeat; + } + +out: + return target->node.start | UPDATE; +} + +static u64 +eb_relocate_entry(struct i915_execbuffer *eb, + struct i915_vma *vma, + const struct drm_i915_gem_relocation_entry *reloc) +{ + struct i915_vma *target; + int err; + + /* we've already hold a reference to all valid objects */ + target = eb_get_vma(eb, reloc->target_handle); + if (unlikely(!target)) + return -ENOENT; + + /* Validate that the target is in a valid r/w GPU domain */ + if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { + DRM_DEBUG("reloc with multiple write domains: " + "target %d offset %d " + "read %08x write %08x", + reloc->target_handle, + (int) reloc->offset, + reloc->read_domains, + reloc->write_domain); + return -EINVAL; + } + if (unlikely((reloc->write_domain | reloc->read_domains) + & ~I915_GEM_GPU_DOMAINS)) { + DRM_DEBUG("reloc with read/write non-GPU domains: " + "target %d offset %d " + "read %08x write %08x", + reloc->target_handle, + (int) reloc->offset, + reloc->read_domains, + reloc->write_domain); + return -EINVAL; + } + + if (reloc->write_domain) { + *target->exec_flags |= EXEC_OBJECT_WRITE; + + /* + * Sandybridge PPGTT errata: We need a global gtt mapping + * for MI and pipe_control writes because the gpu doesn't + * properly redirect them through the ppgtt for non_secure + * batchbuffers. + */ + if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && + IS_GEN(eb->i915, 6)) { + err = i915_vma_bind(target, target->obj->cache_level, + PIN_GLOBAL); + if (WARN_ONCE(err, + "Unexpected failure to bind target VMA!")) + return err; + } + } + + /* + * If the relocation already has the right value in it, no + * more work needs to be done. + */ + if (!DBG_FORCE_RELOC && + gen8_canonical_addr(target->node.start) == reloc->presumed_offset) + return 0; + + /* Check that the relocation address is valid... */ + if (unlikely(reloc->offset > + vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) { + DRM_DEBUG("Relocation beyond object bounds: " + "target %d offset %d size %d.\n", + reloc->target_handle, + (int)reloc->offset, + (int)vma->size); + return -EINVAL; + } + if (unlikely(reloc->offset & 3)) { + DRM_DEBUG("Relocation not 4-byte aligned: " + "target %d offset %d.\n", + reloc->target_handle, + (int)reloc->offset); + return -EINVAL; + } + + /* + * If we write into the object, we need to force the synchronisation + * barrier, either with an asynchronous clflush or if we executed the + * patching using the GPU (though that should be serialised by the + * timeline). To be completely sure, and since we are required to + * do relocations we are already stalling, disable the user's opt + * out of our synchronisation. + */ + *vma->exec_flags &= ~EXEC_OBJECT_ASYNC; + + /* and update the user's relocation entry */ + return relocate_entry(vma, reloc, eb, target); +} + +static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) +{ +#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) + struct drm_i915_gem_relocation_entry stack[N_RELOC(512)]; + struct drm_i915_gem_relocation_entry __user *urelocs; + const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); + unsigned int remain; + + urelocs = u64_to_user_ptr(entry->relocs_ptr); + remain = entry->relocation_count; + if (unlikely(remain > N_RELOC(ULONG_MAX))) + return -EINVAL; + + /* + * We must check that the entire relocation array is safe + * to read. However, if the array is not writable the user loses + * the updated relocation values. + */ + if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs)))) + return -EFAULT; + + do { + struct drm_i915_gem_relocation_entry *r = stack; + unsigned int count = + min_t(unsigned int, remain, ARRAY_SIZE(stack)); + unsigned int copied; + + /* + * This is the fast path and we cannot handle a pagefault + * whilst holding the struct mutex lest the user pass in the + * relocations contained within a mmaped bo. For in such a case + * we, the page fault handler would call i915_gem_fault() and + * we would try to acquire the struct mutex again. Obviously + * this is bad and so lockdep complains vehemently. + */ + pagefault_disable(); + copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0])); + pagefault_enable(); + if (unlikely(copied)) { + remain = -EFAULT; + goto out; + } + + remain -= count; + do { + u64 offset = eb_relocate_entry(eb, vma, r); + + if (likely(offset == 0)) { + } else if ((s64)offset < 0) { + remain = (int)offset; + goto out; + } else { + /* + * Note that reporting an error now + * leaves everything in an inconsistent + * state as we have *already* changed + * the relocation value inside the + * object. As we have not changed the + * reloc.presumed_offset or will not + * change the execobject.offset, on the + * call we may not rewrite the value + * inside the object, leaving it + * dangling and causing a GPU hang. Unless + * userspace dynamically rebuilds the + * relocations on each execbuf rather than + * presume a static tree. + * + * We did previously check if the relocations + * were writable (access_ok), an error now + * would be a strange race with mprotect, + * having already demonstrated that we + * can read from this userspace address. + */ + offset = gen8_canonical_addr(offset & ~UPDATE); + if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) { + remain = -EFAULT; + goto out; + } + } + } while (r++, --count); + urelocs += ARRAY_SIZE(stack); + } while (remain); +out: + reloc_cache_reset(&eb->reloc_cache); + return remain; +} + +static int +eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma) +{ + const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); + struct drm_i915_gem_relocation_entry *relocs = + u64_to_ptr(typeof(*relocs), entry->relocs_ptr); + unsigned int i; + int err; + + for (i = 0; i < entry->relocation_count; i++) { + u64 offset = eb_relocate_entry(eb, vma, &relocs[i]); + + if ((s64)offset < 0) { + err = (int)offset; + goto err; + } + } + err = 0; +err: + reloc_cache_reset(&eb->reloc_cache); + return err; +} + +static int check_relocations(const struct drm_i915_gem_exec_object2 *entry) +{ + const char __user *addr, *end; + unsigned long size; + char __maybe_unused c; + + size = entry->relocation_count; + if (size == 0) + return 0; + + if (size > N_RELOC(ULONG_MAX)) + return -EINVAL; + + addr = u64_to_user_ptr(entry->relocs_ptr); + size *= sizeof(struct drm_i915_gem_relocation_entry); + if (!access_ok(addr, size)) + return -EFAULT; + + end = addr + size; + for (; addr < end; addr += PAGE_SIZE) { + int err = __get_user(c, addr); + if (err) + return err; + } + return __get_user(c, end - 1); +} + +static int eb_copy_relocations(const struct i915_execbuffer *eb) +{ + const unsigned int count = eb->buffer_count; + unsigned int i; + int err; + + for (i = 0; i < count; i++) { + const unsigned int nreloc = eb->exec[i].relocation_count; + struct drm_i915_gem_relocation_entry __user *urelocs; + struct drm_i915_gem_relocation_entry *relocs; + unsigned long size; + unsigned long copied; + + if (nreloc == 0) + continue; + + err = check_relocations(&eb->exec[i]); + if (err) + goto err; + + urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); + size = nreloc * sizeof(*relocs); + + relocs = kvmalloc_array(size, 1, GFP_KERNEL); + if (!relocs) { + err = -ENOMEM; + goto err; + } + + /* copy_from_user is limited to < 4GiB */ + copied = 0; + do { + unsigned int len = + min_t(u64, BIT_ULL(31), size - copied); + + if (__copy_from_user((char *)relocs + copied, + (char __user *)urelocs + copied, + len)) { +end_user: + user_access_end(); +end: + kvfree(relocs); + err = -EFAULT; + goto err; + } + + copied += len; + } while (copied < size); + + /* + * As we do not update the known relocation offsets after + * relocating (due to the complexities in lock handling), + * we need to mark them as invalid now so that we force the + * relocation processing next time. Just in case the target + * object is evicted and then rebound into its old + * presumed_offset before the next execbuffer - if that + * happened we would make the mistake of assuming that the + * relocations were valid. + */ + if (!user_access_begin(urelocs, size)) + goto end; + + for (copied = 0; copied < nreloc; copied++) + unsafe_put_user(-1, + &urelocs[copied].presumed_offset, + end_user); + user_access_end(); + + eb->exec[i].relocs_ptr = (uintptr_t)relocs; + } + + return 0; + +err: + while (i--) { + struct drm_i915_gem_relocation_entry *relocs = + u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr); + if (eb->exec[i].relocation_count) + kvfree(relocs); + } + return err; +} + +static int eb_prefault_relocations(const struct i915_execbuffer *eb) +{ + const unsigned int count = eb->buffer_count; + unsigned int i; + + if (unlikely(i915_modparams.prefault_disable)) + return 0; + + for (i = 0; i < count; i++) { + int err; + + err = check_relocations(&eb->exec[i]); + if (err) + return err; + } + + return 0; +} + +static noinline int eb_relocate_slow(struct i915_execbuffer *eb) +{ + struct drm_device *dev = &eb->i915->drm; + bool have_copy = false; + struct i915_vma *vma; + int err = 0; + +repeat: + if (signal_pending(current)) { + err = -ERESTARTSYS; + goto out; + } + + /* We may process another execbuffer during the unlock... */ + eb_reset_vmas(eb); + mutex_unlock(&dev->struct_mutex); + + /* + * We take 3 passes through the slowpatch. + * + * 1 - we try to just prefault all the user relocation entries and + * then attempt to reuse the atomic pagefault disabled fast path again. + * + * 2 - we copy the user entries to a local buffer here outside of the + * local and allow ourselves to wait upon any rendering before + * relocations + * + * 3 - we already have a local copy of the relocation entries, but + * were interrupted (EAGAIN) whilst waiting for the objects, try again. + */ + if (!err) { + err = eb_prefault_relocations(eb); + } else if (!have_copy) { + err = eb_copy_relocations(eb); + have_copy = err == 0; + } else { + cond_resched(); + err = 0; + } + if (err) { + mutex_lock(&dev->struct_mutex); + goto out; + } + + /* A frequent cause for EAGAIN are currently unavailable client pages */ + flush_workqueue(eb->i915->mm.userptr_wq); + + err = i915_mutex_lock_interruptible(dev); + if (err) { + mutex_lock(&dev->struct_mutex); + goto out; + } + + /* reacquire the objects */ + err = eb_lookup_vmas(eb); + if (err) + goto err; + + GEM_BUG_ON(!eb->batch); + + list_for_each_entry(vma, &eb->relocs, reloc_link) { + if (!have_copy) { + pagefault_disable(); + err = eb_relocate_vma(eb, vma); + pagefault_enable(); + if (err) + goto repeat; + } else { + err = eb_relocate_vma_slow(eb, vma); + if (err) + goto err; + } + } + + /* + * Leave the user relocations as are, this is the painfully slow path, + * and we want to avoid the complication of dropping the lock whilst + * having buffers reserved in the aperture and so causing spurious + * ENOSPC for random operations. + */ + +err: + if (err == -EAGAIN) + goto repeat; + +out: + if (have_copy) { + const unsigned int count = eb->buffer_count; + unsigned int i; + + for (i = 0; i < count; i++) { + const struct drm_i915_gem_exec_object2 *entry = + &eb->exec[i]; + struct drm_i915_gem_relocation_entry *relocs; + + if (!entry->relocation_count) + continue; + + relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr); + kvfree(relocs); + } + } + + return err; +} + +static int eb_relocate(struct i915_execbuffer *eb) +{ + if (eb_lookup_vmas(eb)) + goto slow; + + /* The objects are in their final locations, apply the relocations. */ + if (eb->args->flags & __EXEC_HAS_RELOC) { + struct i915_vma *vma; + + list_for_each_entry(vma, &eb->relocs, reloc_link) { + if (eb_relocate_vma(eb, vma)) + goto slow; + } + } + + return 0; + +slow: + return eb_relocate_slow(eb); +} + +static int eb_move_to_gpu(struct i915_execbuffer *eb) +{ + const unsigned int count = eb->buffer_count; + unsigned int i; + int err; + + for (i = 0; i < count; i++) { + unsigned int flags = eb->flags[i]; + struct i915_vma *vma = eb->vma[i]; + struct drm_i915_gem_object *obj = vma->obj; + + if (flags & EXEC_OBJECT_CAPTURE) { + struct i915_capture_list *capture; + + capture = kmalloc(sizeof(*capture), GFP_KERNEL); + if (unlikely(!capture)) + return -ENOMEM; + + capture->next = eb->request->capture_list; + capture->vma = eb->vma[i]; + eb->request->capture_list = capture; + } + + /* + * If the GPU is not _reading_ through the CPU cache, we need + * to make sure that any writes (both previous GPU writes from + * before a change in snooping levels and normal CPU writes) + * caught in that cache are flushed to main memory. + * + * We want to say + * obj->cache_dirty && + * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ) + * but gcc's optimiser doesn't handle that as well and emits + * two jumps instead of one. Maybe one day... + */ + if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) { + if (i915_gem_clflush_object(obj, 0)) + flags &= ~EXEC_OBJECT_ASYNC; + } + + if (flags & EXEC_OBJECT_ASYNC) + continue; + + err = i915_request_await_object + (eb->request, obj, flags & EXEC_OBJECT_WRITE); + if (err) + return err; + } + + for (i = 0; i < count; i++) { + unsigned int flags = eb->flags[i]; + struct i915_vma *vma = eb->vma[i]; + + err = i915_vma_move_to_active(vma, eb->request, flags); + if (unlikely(err)) { + i915_request_skip(eb->request, err); + return err; + } + + __eb_unreserve_vma(vma, flags); + vma->exec_flags = NULL; + + if (unlikely(flags & __EXEC_OBJECT_HAS_REF)) + i915_vma_put(vma); + } + eb->exec = NULL; + + /* Unconditionally flush any chipset caches (for streaming writes). */ + i915_gem_chipset_flush(eb->i915); + + return 0; +} + +static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) +{ + if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS) + return false; + + /* Kernel clipping was a DRI1 misfeature */ + if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) { + if (exec->num_cliprects || exec->cliprects_ptr) + return false; + } + + if (exec->DR4 == 0xffffffff) { + DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); + exec->DR4 = 0; + } + if (exec->DR1 || exec->DR4) + return false; + + if ((exec->batch_start_offset | exec->batch_len) & 0x7) + return false; + + return true; +} + +static int i915_reset_gen7_sol_offsets(struct i915_request *rq) +{ + u32 *cs; + int i; + + if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) { + DRM_DEBUG("sol reset is gen7/rcs only\n"); + return -EINVAL; + } + + cs = intel_ring_begin(rq, 4 * 2 + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(4); + for (i = 0; i < 4; i++) { + *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i)); + *cs++ = 0; + } + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + + return 0; +} + +static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) +{ + struct drm_i915_gem_object *shadow_batch_obj; + struct i915_vma *vma; + int err; + + shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, + PAGE_ALIGN(eb->batch_len)); + if (IS_ERR(shadow_batch_obj)) + return ERR_CAST(shadow_batch_obj); + + err = intel_engine_cmd_parser(eb->engine, + eb->batch->obj, + shadow_batch_obj, + eb->batch_start_offset, + eb->batch_len, + is_master); + if (err) { + if (err == -EACCES) /* unhandled chained batch */ + vma = NULL; + else + vma = ERR_PTR(err); + goto out; + } + + vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0); + if (IS_ERR(vma)) + goto out; + + eb->vma[eb->buffer_count] = i915_vma_get(vma); + eb->flags[eb->buffer_count] = + __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF; + vma->exec_flags = &eb->flags[eb->buffer_count]; + eb->buffer_count++; + +out: + i915_gem_object_unpin_pages(shadow_batch_obj); + return vma; +} + +static void +add_to_client(struct i915_request *rq, struct drm_file *file) +{ + rq->file_priv = file->driver_priv; + list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list); +} + +static int eb_submit(struct i915_execbuffer *eb) +{ + int err; + + err = eb_move_to_gpu(eb); + if (err) + return err; + + if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) { + err = i915_reset_gen7_sol_offsets(eb->request); + if (err) + return err; + } + + /* + * After we completed waiting for other engines (using HW semaphores) + * then we can signal that this request/batch is ready to run. This + * allows us to determine if the batch is still waiting on the GPU + * or actually running by checking the breadcrumb. + */ + if (eb->engine->emit_init_breadcrumb) { + err = eb->engine->emit_init_breadcrumb(eb->request); + if (err) + return err; + } + + err = eb->engine->emit_bb_start(eb->request, + eb->batch->node.start + + eb->batch_start_offset, + eb->batch_len, + eb->batch_flags); + if (err) + return err; + + return 0; +} + +/* + * Find one BSD ring to dispatch the corresponding BSD command. + * The engine index is returned. + */ +static unsigned int +gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + + /* Check whether the file_priv has already selected one ring. */ + if ((int)file_priv->bsd_engine < 0) + file_priv->bsd_engine = atomic_fetch_xor(1, + &dev_priv->mm.bsd_engine_dispatch_index); + + return file_priv->bsd_engine; +} + +static const enum intel_engine_id user_ring_map[] = { + [I915_EXEC_DEFAULT] = RCS0, + [I915_EXEC_RENDER] = RCS0, + [I915_EXEC_BLT] = BCS0, + [I915_EXEC_BSD] = VCS0, + [I915_EXEC_VEBOX] = VECS0 +}; + +static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce) +{ + int err; + + /* + * ABI: Before userspace accesses the GPU (e.g. execbuffer), report + * EIO if the GPU is already wedged. + */ + err = i915_terminally_wedged(eb->i915); + if (err) + return err; + + /* + * Pinning the contexts may generate requests in order to acquire + * GGTT space, so do this first before we reserve a seqno for + * ourselves. + */ + err = intel_context_pin(ce); + if (err) + return err; + + eb->engine = ce->engine; + eb->context = ce; + return 0; +} + +static void eb_unpin_context(struct i915_execbuffer *eb) +{ + intel_context_unpin(eb->context); +} + +static unsigned int +eb_select_legacy_ring(struct i915_execbuffer *eb, + struct drm_file *file, + struct drm_i915_gem_execbuffer2 *args) +{ + struct drm_i915_private *i915 = eb->i915; + unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK; + + if (user_ring_id != I915_EXEC_BSD && + (args->flags & I915_EXEC_BSD_MASK)) { + DRM_DEBUG("execbuf with non bsd ring but with invalid " + "bsd dispatch flags: %d\n", (int)(args->flags)); + return -1; + } + + if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(i915, VCS1)) { + unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; + + if (bsd_idx == I915_EXEC_BSD_DEFAULT) { + bsd_idx = gen8_dispatch_bsd_engine(i915, file); + } else if (bsd_idx >= I915_EXEC_BSD_RING1 && + bsd_idx <= I915_EXEC_BSD_RING2) { + bsd_idx >>= I915_EXEC_BSD_SHIFT; + bsd_idx--; + } else { + DRM_DEBUG("execbuf with unknown bsd ring: %u\n", + bsd_idx); + return -1; + } + + return _VCS(bsd_idx); + } + + if (user_ring_id >= ARRAY_SIZE(user_ring_map)) { + DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id); + return -1; + } + + return user_ring_map[user_ring_id]; +} + +static int +eb_select_engine(struct i915_execbuffer *eb, + struct drm_file *file, + struct drm_i915_gem_execbuffer2 *args) +{ + struct intel_context *ce; + unsigned int idx; + int err; + + if (i915_gem_context_user_engines(eb->gem_context)) + idx = args->flags & I915_EXEC_RING_MASK; + else + idx = eb_select_legacy_ring(eb, file, args); + + ce = i915_gem_context_get_engine(eb->gem_context, idx); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + err = eb_pin_context(eb, ce); + intel_context_put(ce); + + return err; +} + +static void +__free_fence_array(struct drm_syncobj **fences, unsigned int n) +{ + while (n--) + drm_syncobj_put(ptr_mask_bits(fences[n], 2)); + kvfree(fences); +} + +static struct drm_syncobj ** +get_fence_array(struct drm_i915_gem_execbuffer2 *args, + struct drm_file *file) +{ + const unsigned long nfences = args->num_cliprects; + struct drm_i915_gem_exec_fence __user *user; + struct drm_syncobj **fences; + unsigned long n; + int err; + + if (!(args->flags & I915_EXEC_FENCE_ARRAY)) + return NULL; + + /* Check multiplication overflow for access_ok() and kvmalloc_array() */ + BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long)); + if (nfences > min_t(unsigned long, + ULONG_MAX / sizeof(*user), + SIZE_MAX / sizeof(*fences))) + return ERR_PTR(-EINVAL); + + user = u64_to_user_ptr(args->cliprects_ptr); + if (!access_ok(user, nfences * sizeof(*user))) + return ERR_PTR(-EFAULT); + + fences = kvmalloc_array(nfences, sizeof(*fences), + __GFP_NOWARN | GFP_KERNEL); + if (!fences) + return ERR_PTR(-ENOMEM); + + for (n = 0; n < nfences; n++) { + struct drm_i915_gem_exec_fence fence; + struct drm_syncobj *syncobj; + + if (__copy_from_user(&fence, user++, sizeof(fence))) { + err = -EFAULT; + goto err; + } + + if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) { + err = -EINVAL; + goto err; + } + + syncobj = drm_syncobj_find(file, fence.handle); + if (!syncobj) { + DRM_DEBUG("Invalid syncobj handle provided\n"); + err = -ENOENT; + goto err; + } + + BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) & + ~__I915_EXEC_FENCE_UNKNOWN_FLAGS); + + fences[n] = ptr_pack_bits(syncobj, fence.flags, 2); + } + + return fences; + +err: + __free_fence_array(fences, n); + return ERR_PTR(err); +} + +static void +put_fence_array(struct drm_i915_gem_execbuffer2 *args, + struct drm_syncobj **fences) +{ + if (fences) + __free_fence_array(fences, args->num_cliprects); +} + +static int +await_fence_array(struct i915_execbuffer *eb, + struct drm_syncobj **fences) +{ + const unsigned int nfences = eb->args->num_cliprects; + unsigned int n; + int err; + + for (n = 0; n < nfences; n++) { + struct drm_syncobj *syncobj; + struct dma_fence *fence; + unsigned int flags; + + syncobj = ptr_unpack_bits(fences[n], &flags, 2); + if (!(flags & I915_EXEC_FENCE_WAIT)) + continue; + + fence = drm_syncobj_fence_get(syncobj); + if (!fence) + return -EINVAL; + + err = i915_request_await_dma_fence(eb->request, fence); + dma_fence_put(fence); + if (err < 0) + return err; + } + + return 0; +} + +static void +signal_fence_array(struct i915_execbuffer *eb, + struct drm_syncobj **fences) +{ + const unsigned int nfences = eb->args->num_cliprects; + struct dma_fence * const fence = &eb->request->fence; + unsigned int n; + + for (n = 0; n < nfences; n++) { + struct drm_syncobj *syncobj; + unsigned int flags; + + syncobj = ptr_unpack_bits(fences[n], &flags, 2); + if (!(flags & I915_EXEC_FENCE_SIGNAL)) + continue; + + drm_syncobj_replace_fence(syncobj, fence); + } +} + +static int +i915_gem_do_execbuffer(struct drm_device *dev, + struct drm_file *file, + struct drm_i915_gem_execbuffer2 *args, + struct drm_i915_gem_exec_object2 *exec, + struct drm_syncobj **fences) +{ + struct i915_execbuffer eb; + struct dma_fence *in_fence = NULL; + struct dma_fence *exec_fence = NULL; + struct sync_file *out_fence = NULL; + int out_fence_fd = -1; + int err; + + BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS); + BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & + ~__EXEC_OBJECT_UNKNOWN_FLAGS); + + eb.i915 = to_i915(dev); + eb.file = file; + eb.args = args; + if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) + args->flags |= __EXEC_HAS_RELOC; + + eb.exec = exec; + eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1); + eb.vma[0] = NULL; + eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1); + + eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; + reloc_cache_init(&eb.reloc_cache, eb.i915); + + eb.buffer_count = args->buffer_count; + eb.batch_start_offset = args->batch_start_offset; + eb.batch_len = args->batch_len; + + eb.batch_flags = 0; + if (args->flags & I915_EXEC_SECURE) { + if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) + return -EPERM; + + eb.batch_flags |= I915_DISPATCH_SECURE; + } + if (args->flags & I915_EXEC_IS_PINNED) + eb.batch_flags |= I915_DISPATCH_PINNED; + + if (args->flags & I915_EXEC_FENCE_IN) { + in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); + if (!in_fence) + return -EINVAL; + } + + if (args->flags & I915_EXEC_FENCE_SUBMIT) { + if (in_fence) { + err = -EINVAL; + goto err_in_fence; + } + + exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); + if (!exec_fence) { + err = -EINVAL; + goto err_in_fence; + } + } + + if (args->flags & I915_EXEC_FENCE_OUT) { + out_fence_fd = get_unused_fd_flags(O_CLOEXEC); + if (out_fence_fd < 0) { + err = out_fence_fd; + goto err_exec_fence; + } + } + + err = eb_create(&eb); + if (err) + goto err_out_fence; + + GEM_BUG_ON(!eb.lut_size); + + err = eb_select_context(&eb); + if (unlikely(err)) + goto err_destroy; + + /* + * Take a local wakeref for preparing to dispatch the execbuf as + * we expect to access the hardware fairly frequently in the + * process. Upon first dispatch, we acquire another prolonged + * wakeref that we hold until the GPU has been idle for at least + * 100ms. + */ + intel_gt_pm_get(eb.i915); + + err = i915_mutex_lock_interruptible(dev); + if (err) + goto err_rpm; + + err = eb_select_engine(&eb, file, args); + if (unlikely(err)) + goto err_unlock; + + err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */ + if (unlikely(err)) + goto err_engine; + + err = eb_relocate(&eb); + if (err) { + /* + * If the user expects the execobject.offset and + * reloc.presumed_offset to be an exact match, + * as for using NO_RELOC, then we cannot update + * the execobject.offset until we have completed + * relocation. + */ + args->flags &= ~__EXEC_HAS_RELOC; + goto err_vma; + } + + if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) { + DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); + err = -EINVAL; + goto err_vma; + } + if (eb.batch_start_offset > eb.batch->size || + eb.batch_len > eb.batch->size - eb.batch_start_offset) { + DRM_DEBUG("Attempting to use out-of-bounds batch\n"); + err = -EINVAL; + goto err_vma; + } + + if (eb_use_cmdparser(&eb)) { + struct i915_vma *vma; + + vma = eb_parse(&eb, drm_is_current_master(file)); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_vma; + } + + if (vma) { + /* + * Batch parsed and accepted: + * + * Set the DISPATCH_SECURE bit to remove the NON_SECURE + * bit from MI_BATCH_BUFFER_START commands issued in + * the dispatch_execbuffer implementations. We + * specifically don't want that set on batches the + * command parser has accepted. + */ + eb.batch_flags |= I915_DISPATCH_SECURE; + eb.batch_start_offset = 0; + eb.batch = vma; + } + } + + if (eb.batch_len == 0) + eb.batch_len = eb.batch->size - eb.batch_start_offset; + + /* + * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure + * batch" bit. Hence we need to pin secure batches into the global gtt. + * hsw should have this fixed, but bdw mucks it up again. */ + if (eb.batch_flags & I915_DISPATCH_SECURE) { + struct i915_vma *vma; + + /* + * So on first glance it looks freaky that we pin the batch here + * outside of the reservation loop. But: + * - The batch is already pinned into the relevant ppgtt, so we + * already have the backing storage fully allocated. + * - No other BO uses the global gtt (well contexts, but meh), + * so we don't really have issues with multiple objects not + * fitting due to fragmentation. + * So this is actually safe. + */ + vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_vma; + } + + eb.batch = vma; + } + + /* All GPU relocation batches must be submitted prior to the user rq */ + GEM_BUG_ON(eb.reloc_cache.rq); + + /* Allocate a request for this batch buffer nice and early. */ + eb.request = i915_request_create(eb.context); + if (IS_ERR(eb.request)) { + err = PTR_ERR(eb.request); + goto err_batch_unpin; + } + + if (in_fence) { + err = i915_request_await_dma_fence(eb.request, in_fence); + if (err < 0) + goto err_request; + } + + if (exec_fence) { + err = i915_request_await_execution(eb.request, exec_fence, + eb.engine->bond_execute); + if (err < 0) + goto err_request; + } + + if (fences) { + err = await_fence_array(&eb, fences); + if (err) + goto err_request; + } + + if (out_fence_fd != -1) { + out_fence = sync_file_create(&eb.request->fence); + if (!out_fence) { + err = -ENOMEM; + goto err_request; + } + } + + /* + * Whilst this request exists, batch_obj will be on the + * active_list, and so will hold the active reference. Only when this + * request is retired will the the batch_obj be moved onto the + * inactive_list and lose its active reference. Hence we do not need + * to explicitly hold another reference here. + */ + eb.request->batch = eb.batch; + + trace_i915_request_queue(eb.request, eb.batch_flags); + err = eb_submit(&eb); +err_request: + add_to_client(eb.request, file); + i915_request_add(eb.request); + + if (fences) + signal_fence_array(&eb, fences); + + if (out_fence) { + if (err == 0) { + fd_install(out_fence_fd, out_fence->file); + args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */ + args->rsvd2 |= (u64)out_fence_fd << 32; + out_fence_fd = -1; + } else { + fput(out_fence->file); + } + } + +err_batch_unpin: + if (eb.batch_flags & I915_DISPATCH_SECURE) + i915_vma_unpin(eb.batch); +err_vma: + if (eb.exec) + eb_release_vmas(&eb); +err_engine: + eb_unpin_context(&eb); +err_unlock: + mutex_unlock(&dev->struct_mutex); +err_rpm: + intel_gt_pm_put(eb.i915); + i915_gem_context_put(eb.gem_context); +err_destroy: + eb_destroy(&eb); +err_out_fence: + if (out_fence_fd != -1) + put_unused_fd(out_fence_fd); +err_exec_fence: + dma_fence_put(exec_fence); +err_in_fence: + dma_fence_put(in_fence); + return err; +} + +static size_t eb_element_size(void) +{ + return (sizeof(struct drm_i915_gem_exec_object2) + + sizeof(struct i915_vma *) + + sizeof(unsigned int)); +} + +static bool check_buffer_count(size_t count) +{ + const size_t sz = eb_element_size(); + + /* + * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup + * array size (see eb_create()). Otherwise, we can accept an array as + * large as can be addressed (though use large arrays at your peril)! + */ + + return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1); +} + +/* + * Legacy execbuffer just creates an exec2 list from the original exec object + * list array and passes it to the real function. + */ +int +i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_execbuffer *args = data; + struct drm_i915_gem_execbuffer2 exec2; + struct drm_i915_gem_exec_object *exec_list = NULL; + struct drm_i915_gem_exec_object2 *exec2_list = NULL; + const size_t count = args->buffer_count; + unsigned int i; + int err; + + if (!check_buffer_count(count)) { + DRM_DEBUG("execbuf2 with %zd buffers\n", count); + return -EINVAL; + } + + exec2.buffers_ptr = args->buffers_ptr; + exec2.buffer_count = args->buffer_count; + exec2.batch_start_offset = args->batch_start_offset; + exec2.batch_len = args->batch_len; + exec2.DR1 = args->DR1; + exec2.DR4 = args->DR4; + exec2.num_cliprects = args->num_cliprects; + exec2.cliprects_ptr = args->cliprects_ptr; + exec2.flags = I915_EXEC_RENDER; + i915_execbuffer2_set_context_id(exec2, 0); + + if (!i915_gem_check_execbuffer(&exec2)) + return -EINVAL; + + /* Copy in the exec list from userland */ + exec_list = kvmalloc_array(count, sizeof(*exec_list), + __GFP_NOWARN | GFP_KERNEL); + exec2_list = kvmalloc_array(count + 1, eb_element_size(), + __GFP_NOWARN | GFP_KERNEL); + if (exec_list == NULL || exec2_list == NULL) { + DRM_DEBUG("Failed to allocate exec list for %d buffers\n", + args->buffer_count); + kvfree(exec_list); + kvfree(exec2_list); + return -ENOMEM; + } + err = copy_from_user(exec_list, + u64_to_user_ptr(args->buffers_ptr), + sizeof(*exec_list) * count); + if (err) { + DRM_DEBUG("copy %d exec entries failed %d\n", + args->buffer_count, err); + kvfree(exec_list); + kvfree(exec2_list); + return -EFAULT; + } + + for (i = 0; i < args->buffer_count; i++) { + exec2_list[i].handle = exec_list[i].handle; + exec2_list[i].relocation_count = exec_list[i].relocation_count; + exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; + exec2_list[i].alignment = exec_list[i].alignment; + exec2_list[i].offset = exec_list[i].offset; + if (INTEL_GEN(to_i915(dev)) < 4) + exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; + else + exec2_list[i].flags = 0; + } + + err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL); + if (exec2.flags & __EXEC_HAS_RELOC) { + struct drm_i915_gem_exec_object __user *user_exec_list = + u64_to_user_ptr(args->buffers_ptr); + + /* Copy the new buffer offsets back to the user's exec list. */ + for (i = 0; i < args->buffer_count; i++) { + if (!(exec2_list[i].offset & UPDATE)) + continue; + + exec2_list[i].offset = + gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK); + exec2_list[i].offset &= PIN_OFFSET_MASK; + if (__copy_to_user(&user_exec_list[i].offset, + &exec2_list[i].offset, + sizeof(user_exec_list[i].offset))) + break; + } + } + + kvfree(exec_list); + kvfree(exec2_list); + return err; +} + +int +i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_execbuffer2 *args = data; + struct drm_i915_gem_exec_object2 *exec2_list; + struct drm_syncobj **fences = NULL; + const size_t count = args->buffer_count; + int err; + + if (!check_buffer_count(count)) { + DRM_DEBUG("execbuf2 with %zd buffers\n", count); + return -EINVAL; + } + + if (!i915_gem_check_execbuffer(args)) + return -EINVAL; + + /* Allocate an extra slot for use by the command parser */ + exec2_list = kvmalloc_array(count + 1, eb_element_size(), + __GFP_NOWARN | GFP_KERNEL); + if (exec2_list == NULL) { + DRM_DEBUG("Failed to allocate exec list for %zd buffers\n", + count); + return -ENOMEM; + } + if (copy_from_user(exec2_list, + u64_to_user_ptr(args->buffers_ptr), + sizeof(*exec2_list) * count)) { + DRM_DEBUG("copy %zd exec entries failed\n", count); + kvfree(exec2_list); + return -EFAULT; + } + + if (args->flags & I915_EXEC_FENCE_ARRAY) { + fences = get_fence_array(args, file); + if (IS_ERR(fences)) { + kvfree(exec2_list); + return PTR_ERR(fences); + } + } + + err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences); + + /* + * Now that we have begun execution of the batchbuffer, we ignore + * any new error after this point. Also given that we have already + * updated the associated relocations, we try to write out the current + * object locations irrespective of any error. + */ + if (args->flags & __EXEC_HAS_RELOC) { + struct drm_i915_gem_exec_object2 __user *user_exec_list = + u64_to_user_ptr(args->buffers_ptr); + unsigned int i; + + /* Copy the new buffer offsets back to the user's exec list. */ + /* + * Note: count * sizeof(*user_exec_list) does not overflow, + * because we checked 'count' in check_buffer_count(). + * + * And this range already got effectively checked earlier + * when we did the "copy_from_user()" above. + */ + if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list))) + goto end; + + for (i = 0; i < args->buffer_count; i++) { + if (!(exec2_list[i].offset & UPDATE)) + continue; + + exec2_list[i].offset = + gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK); + unsafe_put_user(exec2_list[i].offset, + &user_exec_list[i].offset, + end_user); + } +end_user: + user_access_end(); +end:; + } + + args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS; + put_fence_array(args, fences); + kvfree(exec2_list); + return err; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c new file mode 100644 index 000000000000..85a05a2435e9 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -0,0 +1,197 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2016 Intel Corporation + */ + +#include +#include +#include + +#include + +#include "i915_drv.h" +#include "i915_gem.h" +#include "i915_gem_object.h" +#include "i915_utils.h" + +#define QUIET (__GFP_NORETRY | __GFP_NOWARN) +#define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN) + +static void internal_free_pages(struct sg_table *st) +{ + struct scatterlist *sg; + + for (sg = st->sgl; sg; sg = __sg_next(sg)) { + if (sg_page(sg)) + __free_pages(sg_page(sg), get_order(sg->length)); + } + + sg_free_table(st); + kfree(st); +} + +static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct sg_table *st; + struct scatterlist *sg; + unsigned int sg_page_sizes; + unsigned int npages; + int max_order; + gfp_t gfp; + + max_order = MAX_ORDER; +#ifdef CONFIG_SWIOTLB + if (swiotlb_nr_tbl()) { + unsigned int max_segment; + + max_segment = swiotlb_max_segment(); + if (max_segment) { + max_segment = max_t(unsigned int, max_segment, + PAGE_SIZE) >> PAGE_SHIFT; + max_order = min(max_order, ilog2(max_segment)); + } + } +#endif + + gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; + if (IS_I965GM(i915) || IS_I965G(i915)) { + /* 965gm cannot relocate objects above 4GiB. */ + gfp &= ~__GFP_HIGHMEM; + gfp |= __GFP_DMA32; + } + +create_st: + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + npages = obj->base.size / PAGE_SIZE; + if (sg_alloc_table(st, npages, GFP_KERNEL)) { + kfree(st); + return -ENOMEM; + } + + sg = st->sgl; + st->nents = 0; + sg_page_sizes = 0; + + do { + int order = min(fls(npages) - 1, max_order); + struct page *page; + + do { + page = alloc_pages(gfp | (order ? QUIET : MAYFAIL), + order); + if (page) + break; + if (!order--) + goto err; + + /* Limit subsequent allocations as well */ + max_order = order; + } while (1); + + sg_set_page(sg, page, PAGE_SIZE << order, 0); + sg_page_sizes |= PAGE_SIZE << order; + st->nents++; + + npages -= 1 << order; + if (!npages) { + sg_mark_end(sg); + break; + } + + sg = __sg_next(sg); + } while (1); + + if (i915_gem_gtt_prepare_pages(obj, st)) { + /* Failed to dma-map try again with single page sg segments */ + if (get_order(st->sgl->length)) { + internal_free_pages(st); + max_order = 0; + goto create_st; + } + goto err; + } + + /* Mark the pages as dontneed whilst they are still pinned. As soon + * as they are unpinned they are allowed to be reaped by the shrinker, + * and the caller is expected to repopulate - the contents of this + * object are only valid whilst active and pinned. + */ + obj->mm.madv = I915_MADV_DONTNEED; + + __i915_gem_object_set_pages(obj, st, sg_page_sizes); + + return 0; + +err: + sg_set_page(sg, NULL, 0, 0); + sg_mark_end(sg); + internal_free_pages(st); + + return -ENOMEM; +} + +static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + i915_gem_gtt_finish_pages(obj, pages); + internal_free_pages(pages); + + obj->mm.dirty = false; + obj->mm.madv = I915_MADV_WILLNEED; +} + +static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { + .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | + I915_GEM_OBJECT_IS_SHRINKABLE, + .get_pages = i915_gem_object_get_pages_internal, + .put_pages = i915_gem_object_put_pages_internal, +}; + +/** + * i915_gem_object_create_internal: create an object with volatile pages + * @i915: the i915 device + * @size: the size in bytes of backing storage to allocate for the object + * + * Creates a new object that wraps some internal memory for private use. + * This object is not backed by swappable storage, and as such its contents + * are volatile and only valid whilst pinned. If the object is reaped by the + * shrinker, its pages and data will be discarded. Equally, it is not a full + * GEM object and so not valid for access from userspace. This makes it useful + * for hardware interfaces like ringbuffers (which are pinned from the time + * the request is written to the time the hardware stops accessing it), but + * not for contexts (which need to be preserved when not active for later + * reuse). Note that it is not cleared upon allocation. + */ +struct drm_i915_gem_object * +i915_gem_object_create_internal(struct drm_i915_private *i915, + phys_addr_t size) +{ + struct drm_i915_gem_object *obj; + unsigned int cache_level; + + GEM_BUG_ON(!size); + GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); + + if (overflows_type(size, obj->base.size)) + return ERR_PTR(-E2BIG); + + obj = i915_gem_object_alloc(); + if (!obj) + return ERR_PTR(-ENOMEM); + + drm_gem_private_object_init(&i915->drm, &obj->base, size); + i915_gem_object_init(obj, &i915_gem_object_internal_ops); + + obj->read_domains = I915_GEM_DOMAIN_CPU; + obj->write_domain = I915_GEM_DOMAIN_CPU; + + cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; + i915_gem_object_set_cache_coherency(obj, cache_level); + + return obj; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 4ed28ac9ab3a..457e694a5c3f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -23,8 +23,9 @@ */ #include "i915_drv.h" -#include "i915_gem_object.h" #include "i915_gem_clflush.h" +#include "i915_gem_context.h" +#include "i915_gem_object.h" #include "i915_globals.h" #include "intel_frontbuffer.h" @@ -442,3 +443,10 @@ int __init i915_global_objects_init(void) i915_global_register(&global.base); return 0; } + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/huge_gem_object.c" +#include "selftests/huge_pages.c" +#include "selftests/i915_gem_object.c" +#include "selftests/i915_gem_coherency.c" +#endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c new file mode 100644 index 000000000000..ad662e558dfb --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -0,0 +1,251 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "gem/i915_gem_pm.h" +#include "gt/intel_gt_pm.h" + +#include "i915_drv.h" +#include "i915_globals.h" + +static void i915_gem_park(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + lockdep_assert_held(&i915->drm.struct_mutex); + + for_each_engine(engine, i915, id) + i915_gem_batch_pool_fini(&engine->batch_pool); + + i915_timelines_park(i915); + i915_vma_parked(i915); + + i915_globals_park(); +} + +static void idle_work_handler(struct work_struct *work) +{ + struct drm_i915_private *i915 = + container_of(work, typeof(*i915), gem.idle_work); + bool restart = true; + + cancel_delayed_work(&i915->gem.retire_work); + mutex_lock(&i915->drm.struct_mutex); + + intel_wakeref_lock(&i915->gt.wakeref); + if (!intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work)) { + i915_gem_park(i915); + restart = false; + } + intel_wakeref_unlock(&i915->gt.wakeref); + + mutex_unlock(&i915->drm.struct_mutex); + if (restart) + queue_delayed_work(i915->wq, + &i915->gem.retire_work, + round_jiffies_up_relative(HZ)); +} + +static void retire_work_handler(struct work_struct *work) +{ + struct drm_i915_private *i915 = + container_of(work, typeof(*i915), gem.retire_work.work); + + /* Come back later if the device is busy... */ + if (mutex_trylock(&i915->drm.struct_mutex)) { + i915_retire_requests(i915); + mutex_unlock(&i915->drm.struct_mutex); + } + + queue_delayed_work(i915->wq, + &i915->gem.retire_work, + round_jiffies_up_relative(HZ)); +} + +static int pm_notifier(struct notifier_block *nb, + unsigned long action, + void *data) +{ + struct drm_i915_private *i915 = + container_of(nb, typeof(*i915), gem.pm_notifier); + + switch (action) { + case INTEL_GT_UNPARK: + i915_globals_unpark(); + queue_delayed_work(i915->wq, + &i915->gem.retire_work, + round_jiffies_up_relative(HZ)); + break; + + case INTEL_GT_PARK: + queue_work(i915->wq, &i915->gem.idle_work); + break; + } + + return NOTIFY_OK; +} + +static bool switch_to_kernel_context_sync(struct drm_i915_private *i915) +{ + bool result = true; + + do { + if (i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED | + I915_WAIT_FOR_IDLE_BOOST, + I915_GEM_IDLE_TIMEOUT) == -ETIME) { + /* XXX hide warning from gem_eio */ + if (i915_modparams.reset) { + dev_err(i915->drm.dev, + "Failed to idle engines, declaring wedged!\n"); + GEM_TRACE_DUMP(); + } + + /* + * Forcibly cancel outstanding work and leave + * the gpu quiet. + */ + i915_gem_set_wedged(i915); + result = false; + } + } while (i915_retire_requests(i915) && result); + + GEM_BUG_ON(i915->gt.awake); + return result; +} + +bool i915_gem_load_power_context(struct drm_i915_private *i915) +{ + return switch_to_kernel_context_sync(i915); +} + +void i915_gem_suspend(struct drm_i915_private *i915) +{ + GEM_TRACE("\n"); + + intel_wakeref_auto(&i915->mm.userfault_wakeref, 0); + flush_workqueue(i915->wq); + + mutex_lock(&i915->drm.struct_mutex); + + /* + * We have to flush all the executing contexts to main memory so + * that they can saved in the hibernation image. To ensure the last + * context image is coherent, we have to switch away from it. That + * leaves the i915->kernel_context still active when + * we actually suspend, and its image in memory may not match the GPU + * state. Fortunately, the kernel_context is disposable and we do + * not rely on its state. + */ + switch_to_kernel_context_sync(i915); + + mutex_unlock(&i915->drm.struct_mutex); + + /* + * Assert that we successfully flushed all the work and + * reset the GPU back to its idle, low power state. + */ + GEM_BUG_ON(i915->gt.awake); + flush_work(&i915->gem.idle_work); + + cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work); + + i915_gem_drain_freed_objects(i915); + + intel_uc_suspend(i915); +} + +void i915_gem_suspend_late(struct drm_i915_private *i915) +{ + struct drm_i915_gem_object *obj; + struct list_head *phases[] = { + &i915->mm.unbound_list, + &i915->mm.bound_list, + NULL + }, **phase; + + /* + * Neither the BIOS, ourselves or any other kernel + * expects the system to be in execlists mode on startup, + * so we need to reset the GPU back to legacy mode. And the only + * known way to disable logical contexts is through a GPU reset. + * + * So in order to leave the system in a known default configuration, + * always reset the GPU upon unload and suspend. Afterwards we then + * clean up the GEM state tracking, flushing off the requests and + * leaving the system in a known idle state. + * + * Note that is of the upmost importance that the GPU is idle and + * all stray writes are flushed *before* we dismantle the backing + * storage for the pinned objects. + * + * However, since we are uncertain that resetting the GPU on older + * machines is a good idea, we don't - just in case it leaves the + * machine in an unusable condition. + */ + + mutex_lock(&i915->drm.struct_mutex); + for (phase = phases; *phase; phase++) { + list_for_each_entry(obj, *phase, mm.link) + WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); + } + mutex_unlock(&i915->drm.struct_mutex); + + intel_uc_sanitize(i915); + i915_gem_sanitize(i915); +} + +void i915_gem_resume(struct drm_i915_private *i915) +{ + GEM_TRACE("\n"); + + WARN_ON(i915->gt.awake); + + mutex_lock(&i915->drm.struct_mutex); + intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); + + i915_gem_restore_gtt_mappings(i915); + i915_gem_restore_fences(i915); + + /* + * As we didn't flush the kernel context before suspend, we cannot + * guarantee that the context image is complete. So let's just reset + * it and start again. + */ + intel_gt_resume(i915); + + if (i915_gem_init_hw(i915)) + goto err_wedged; + + intel_uc_resume(i915); + + /* Always reload a context for powersaving. */ + if (!i915_gem_load_power_context(i915)) + goto err_wedged; + +out_unlock: + intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); + mutex_unlock(&i915->drm.struct_mutex); + return; + +err_wedged: + if (!i915_reset_failed(i915)) { + dev_err(i915->drm.dev, + "Failed to re-initialize GPU, declaring it wedged!\n"); + i915_gem_set_wedged(i915); + } + goto out_unlock; +} + +void i915_gem_init__pm(struct drm_i915_private *i915) +{ + INIT_WORK(&i915->gem.idle_work, idle_work_handler); + INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler); + + i915->gem.pm_notifier.notifier_call = pm_notifier; + blocking_notifier_chain_register(&i915->gt.pm_notifications, + &i915->gem.pm_notifier); +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h new file mode 100644 index 000000000000..6f7d5d11ac3b --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __I915_GEM_PM_H__ +#define __I915_GEM_PM_H__ + +#include + +struct drm_i915_private; +struct work_struct; + +void i915_gem_init__pm(struct drm_i915_private *i915); + +bool i915_gem_load_power_context(struct drm_i915_private *i915); +void i915_gem_resume(struct drm_i915_private *i915); + +void i915_gem_idle_work_handler(struct work_struct *work); + +void i915_gem_suspend(struct drm_i915_private *i915); +void i915_gem_suspend_late(struct drm_i915_private *i915); + +#endif /* __I915_GEM_PM_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c new file mode 100644 index 000000000000..cd42299f019a --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -0,0 +1,555 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2008-2015 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i915_trace.h" + +static bool shrinker_lock(struct drm_i915_private *i915, + unsigned int flags, + bool *unlock) +{ + struct mutex *m = &i915->drm.struct_mutex; + + switch (mutex_trylock_recursive(m)) { + case MUTEX_TRYLOCK_RECURSIVE: + *unlock = false; + return true; + + case MUTEX_TRYLOCK_FAILED: + *unlock = false; + if (flags & I915_SHRINK_ACTIVE && + mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0) + *unlock = true; + return *unlock; + + case MUTEX_TRYLOCK_SUCCESS: + *unlock = true; + return true; + } + + BUG(); +} + +static void shrinker_unlock(struct drm_i915_private *i915, bool unlock) +{ + if (!unlock) + return; + + mutex_unlock(&i915->drm.struct_mutex); +} + +static bool swap_available(void) +{ + return get_nr_swap_pages() > 0; +} + +static bool can_release_pages(struct drm_i915_gem_object *obj) +{ + /* Consider only shrinkable ojects. */ + if (!i915_gem_object_is_shrinkable(obj)) + return false; + + /* Only report true if by unbinding the object and putting its pages + * we can actually make forward progress towards freeing physical + * pages. + * + * If the pages are pinned for any other reason than being bound + * to the GPU, simply unbinding from the GPU is not going to succeed + * in releasing our pin count on the pages themselves. + */ + if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count) + return false; + + /* If any vma are "permanently" pinned, it will prevent us from + * reclaiming the obj->mm.pages. We only allow scanout objects to claim + * a permanent pin, along with a few others like the context objects. + * To simplify the scan, and to avoid walking the list of vma under the + * object, we just check the count of its permanently pinned. + */ + if (READ_ONCE(obj->pin_global)) + return false; + + /* We can only return physical pages to the system if we can either + * discard the contents (because the user has marked them as being + * purgeable) or if we can move their contents out to swap. + */ + return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; +} + +static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) +{ + if (i915_gem_object_unbind(obj) == 0) + __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); + return !i915_gem_object_has_pages(obj); +} + +static void try_to_writeback(struct drm_i915_gem_object *obj, + unsigned int flags) +{ + switch (obj->mm.madv) { + case I915_MADV_DONTNEED: + i915_gem_object_truncate(obj); + case __I915_MADV_PURGED: + return; + } + + if (flags & I915_SHRINK_WRITEBACK) + i915_gem_object_writeback(obj); +} + +/** + * i915_gem_shrink - Shrink buffer object caches + * @i915: i915 device + * @target: amount of memory to make available, in pages + * @nr_scanned: optional output for number of pages scanned (incremental) + * @flags: control flags for selecting cache types + * + * This function is the main interface to the shrinker. It will try to release + * up to @target pages of main memory backing storage from buffer objects. + * Selection of the specific caches can be done with @flags. This is e.g. useful + * when purgeable objects should be removed from caches preferentially. + * + * Note that it's not guaranteed that released amount is actually available as + * free system memory - the pages might still be in-used to due to other reasons + * (like cpu mmaps) or the mm core has reused them before we could grab them. + * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to + * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all(). + * + * Also note that any kind of pinning (both per-vma address space pins and + * backing storage pins at the buffer object level) result in the shrinker code + * having to skip the object. + * + * Returns: + * The number of pages of backing storage actually released. + */ +unsigned long +i915_gem_shrink(struct drm_i915_private *i915, + unsigned long target, + unsigned long *nr_scanned, + unsigned flags) +{ + const struct { + struct list_head *list; + unsigned int bit; + } phases[] = { + { &i915->mm.unbound_list, I915_SHRINK_UNBOUND }, + { &i915->mm.bound_list, I915_SHRINK_BOUND }, + { NULL, 0 }, + }, *phase; + intel_wakeref_t wakeref = 0; + unsigned long count = 0; + unsigned long scanned = 0; + bool unlock; + + if (!shrinker_lock(i915, flags, &unlock)) + return 0; + + /* + * When shrinking the active list, also consider active contexts. + * Active contexts are pinned until they are retired, and so can + * not be simply unbound to retire and unpin their pages. To shrink + * the contexts, we must wait until the gpu is idle. + * + * We don't care about errors here; if we cannot wait upon the GPU, + * we will free as much as we can and hope to get a second chance. + */ + if (flags & I915_SHRINK_ACTIVE) + i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + + trace_i915_gem_shrink(i915, target, flags); + i915_retire_requests(i915); + + /* + * Unbinding of objects will require HW access; Let us not wake the + * device just to recover a little memory. If absolutely necessary, + * we will force the wake during oom-notifier. + */ + if (flags & I915_SHRINK_BOUND) { + wakeref = intel_runtime_pm_get_if_in_use(i915); + if (!wakeref) + flags &= ~I915_SHRINK_BOUND; + } + + /* + * As we may completely rewrite the (un)bound list whilst unbinding + * (due to retiring requests) we have to strictly process only + * one element of the list at the time, and recheck the list + * on every iteration. + * + * In particular, we must hold a reference whilst removing the + * object as we may end up waiting for and/or retiring the objects. + * This might release the final reference (held by the active list) + * and result in the object being freed from under us. This is + * similar to the precautions the eviction code must take whilst + * removing objects. + * + * Also note that although these lists do not hold a reference to + * the object we can safely grab one here: The final object + * unreferencing and the bound_list are both protected by the + * dev->struct_mutex and so we won't ever be able to observe an + * object on the bound_list with a reference count equals 0. + */ + for (phase = phases; phase->list; phase++) { + struct list_head still_in_list; + struct drm_i915_gem_object *obj; + + if ((flags & phase->bit) == 0) + continue; + + INIT_LIST_HEAD(&still_in_list); + + /* + * We serialize our access to unreferenced objects through + * the use of the struct_mutex. While the objects are not + * yet freed (due to RCU then a workqueue) we still want + * to be able to shrink their pages, so they remain on + * the unbound/bound list until actually freed. + */ + spin_lock(&i915->mm.obj_lock); + while (count < target && + (obj = list_first_entry_or_null(phase->list, + typeof(*obj), + mm.link))) { + list_move_tail(&obj->mm.link, &still_in_list); + + if (flags & I915_SHRINK_PURGEABLE && + obj->mm.madv != I915_MADV_DONTNEED) + continue; + + if (flags & I915_SHRINK_VMAPS && + !is_vmalloc_addr(obj->mm.mapping)) + continue; + + if (!(flags & I915_SHRINK_ACTIVE) && + (i915_gem_object_is_active(obj) || + i915_gem_object_is_framebuffer(obj))) + continue; + + if (!can_release_pages(obj)) + continue; + + spin_unlock(&i915->mm.obj_lock); + + if (unsafe_drop_pages(obj)) { + /* May arrive from get_pages on another bo */ + mutex_lock_nested(&obj->mm.lock, + I915_MM_SHRINKER); + if (!i915_gem_object_has_pages(obj)) { + try_to_writeback(obj, flags); + count += obj->base.size >> PAGE_SHIFT; + } + mutex_unlock(&obj->mm.lock); + } + scanned += obj->base.size >> PAGE_SHIFT; + + spin_lock(&i915->mm.obj_lock); + } + list_splice_tail(&still_in_list, phase->list); + spin_unlock(&i915->mm.obj_lock); + } + + if (flags & I915_SHRINK_BOUND) + intel_runtime_pm_put(i915, wakeref); + + i915_retire_requests(i915); + + shrinker_unlock(i915, unlock); + + if (nr_scanned) + *nr_scanned += scanned; + return count; +} + +/** + * i915_gem_shrink_all - Shrink buffer object caches completely + * @i915: i915 device + * + * This is a simple wraper around i915_gem_shrink() to aggressively shrink all + * caches completely. It also first waits for and retires all outstanding + * requests to also be able to release backing storage for active objects. + * + * This should only be used in code to intentionally quiescent the gpu or as a + * last-ditch effort when memory seems to have run out. + * + * Returns: + * The number of pages of backing storage actually released. + */ +unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) +{ + intel_wakeref_t wakeref; + unsigned long freed = 0; + + with_intel_runtime_pm(i915, wakeref) { + freed = i915_gem_shrink(i915, -1UL, NULL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_ACTIVE); + } + + return freed; +} + +static unsigned long +i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) +{ + struct drm_i915_private *i915 = + container_of(shrinker, struct drm_i915_private, mm.shrinker); + struct drm_i915_gem_object *obj; + unsigned long num_objects = 0; + unsigned long count = 0; + + spin_lock(&i915->mm.obj_lock); + list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) + if (can_release_pages(obj)) { + count += obj->base.size >> PAGE_SHIFT; + num_objects++; + } + + list_for_each_entry(obj, &i915->mm.bound_list, mm.link) + if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) { + count += obj->base.size >> PAGE_SHIFT; + num_objects++; + } + spin_unlock(&i915->mm.obj_lock); + + /* Update our preferred vmscan batch size for the next pass. + * Our rough guess for an effective batch size is roughly 2 + * available GEM objects worth of pages. That is we don't want + * the shrinker to fire, until it is worth the cost of freeing an + * entire GEM object. + */ + if (num_objects) { + unsigned long avg = 2 * count / num_objects; + + i915->mm.shrinker.batch = + max((i915->mm.shrinker.batch + avg) >> 1, + 128ul /* default SHRINK_BATCH */); + } + + return count; +} + +static unsigned long +i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) +{ + struct drm_i915_private *i915 = + container_of(shrinker, struct drm_i915_private, mm.shrinker); + unsigned long freed; + bool unlock; + + sc->nr_scanned = 0; + + if (!shrinker_lock(i915, 0, &unlock)) + return SHRINK_STOP; + + freed = i915_gem_shrink(i915, + sc->nr_to_scan, + &sc->nr_scanned, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_PURGEABLE | + I915_SHRINK_WRITEBACK); + if (sc->nr_scanned < sc->nr_to_scan) + freed += i915_gem_shrink(i915, + sc->nr_to_scan - sc->nr_scanned, + &sc->nr_scanned, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_WRITEBACK); + if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) { + intel_wakeref_t wakeref; + + with_intel_runtime_pm(i915, wakeref) { + freed += i915_gem_shrink(i915, + sc->nr_to_scan - sc->nr_scanned, + &sc->nr_scanned, + I915_SHRINK_ACTIVE | + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_WRITEBACK); + } + } + + shrinker_unlock(i915, unlock); + + return sc->nr_scanned ? freed : SHRINK_STOP; +} + +static int +i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) +{ + struct drm_i915_private *i915 = + container_of(nb, struct drm_i915_private, mm.oom_notifier); + struct drm_i915_gem_object *obj; + unsigned long unevictable, bound, unbound, freed_pages; + intel_wakeref_t wakeref; + + freed_pages = 0; + with_intel_runtime_pm(i915, wakeref) + freed_pages += i915_gem_shrink(i915, -1UL, NULL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_WRITEBACK); + + /* Because we may be allocating inside our own driver, we cannot + * assert that there are no objects with pinned pages that are not + * being pointed to by hardware. + */ + unbound = bound = unevictable = 0; + spin_lock(&i915->mm.obj_lock); + list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) { + if (!can_release_pages(obj)) + unevictable += obj->base.size >> PAGE_SHIFT; + else + unbound += obj->base.size >> PAGE_SHIFT; + } + list_for_each_entry(obj, &i915->mm.bound_list, mm.link) { + if (!can_release_pages(obj)) + unevictable += obj->base.size >> PAGE_SHIFT; + else + bound += obj->base.size >> PAGE_SHIFT; + } + spin_unlock(&i915->mm.obj_lock); + + if (freed_pages || unbound || bound) + pr_info("Purging GPU memory, %lu pages freed, " + "%lu pages still pinned.\n", + freed_pages, unevictable); + + *(unsigned long *)ptr += freed_pages; + return NOTIFY_DONE; +} + +static int +i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) +{ + struct drm_i915_private *i915 = + container_of(nb, struct drm_i915_private, mm.vmap_notifier); + struct i915_vma *vma, *next; + unsigned long freed_pages = 0; + intel_wakeref_t wakeref; + bool unlock; + + if (!shrinker_lock(i915, 0, &unlock)) + return NOTIFY_DONE; + + /* Force everything onto the inactive lists */ + if (i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT)) + goto out; + + with_intel_runtime_pm(i915, wakeref) + freed_pages += i915_gem_shrink(i915, -1UL, NULL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_VMAPS); + + /* We also want to clear any cached iomaps as they wrap vmap */ + mutex_lock(&i915->ggtt.vm.mutex); + list_for_each_entry_safe(vma, next, + &i915->ggtt.vm.bound_list, vm_link) { + unsigned long count = vma->node.size >> PAGE_SHIFT; + + if (!vma->iomap || i915_vma_is_active(vma)) + continue; + + mutex_unlock(&i915->ggtt.vm.mutex); + if (i915_vma_unbind(vma) == 0) + freed_pages += count; + mutex_lock(&i915->ggtt.vm.mutex); + } + mutex_unlock(&i915->ggtt.vm.mutex); + +out: + shrinker_unlock(i915, unlock); + + *(unsigned long *)ptr += freed_pages; + return NOTIFY_DONE; +} + +/** + * i915_gem_shrinker_register - Register the i915 shrinker + * @i915: i915 device + * + * This function registers and sets up the i915 shrinker and OOM handler. + */ +void i915_gem_shrinker_register(struct drm_i915_private *i915) +{ + i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan; + i915->mm.shrinker.count_objects = i915_gem_shrinker_count; + i915->mm.shrinker.seeks = DEFAULT_SEEKS; + i915->mm.shrinker.batch = 4096; + WARN_ON(register_shrinker(&i915->mm.shrinker)); + + i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; + WARN_ON(register_oom_notifier(&i915->mm.oom_notifier)); + + i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap; + WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier)); +} + +/** + * i915_gem_shrinker_unregister - Unregisters the i915 shrinker + * @i915: i915 device + * + * This function unregisters the i915 shrinker and OOM handler. + */ +void i915_gem_shrinker_unregister(struct drm_i915_private *i915) +{ + WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier)); + WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier)); + unregister_shrinker(&i915->mm.shrinker); +} + +void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, + struct mutex *mutex) +{ + bool unlock = false; + + if (!IS_ENABLED(CONFIG_LOCKDEP)) + return; + + if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) { + mutex_acquire(&i915->drm.struct_mutex.dep_map, + I915_MM_NORMAL, 0, _RET_IP_); + unlock = true; + } + + fs_reclaim_acquire(GFP_KERNEL); + + /* + * As we invariably rely on the struct_mutex within the shrinker, + * but have a complicated recursion dance, taint all the mutexes used + * within the shrinker with the struct_mutex. For completeness, we + * taint with all subclass of struct_mutex, even though we should + * only need tainting by I915_MM_NORMAL to catch possible ABBA + * deadlocks from using struct_mutex inside @mutex. + */ + mutex_acquire(&i915->drm.struct_mutex.dep_map, + I915_MM_SHRINKER, 0, _RET_IP_); + + mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_); + mutex_release(&mutex->dep_map, 0, _RET_IP_); + + mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_); + + fs_reclaim_release(GFP_KERNEL); + + if (unlock) + mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_); +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c new file mode 100644 index 000000000000..9080a736663a --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -0,0 +1,704 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2008-2012 Intel Corporation + */ + +#include +#include + +#include +#include + +#include "i915_drv.h" + +/* + * The BIOS typically reserves some of the system's memory for the exclusive + * use of the integrated graphics. This memory is no longer available for + * use by the OS and so the user finds that his system has less memory + * available than he put in. We refer to this memory as stolen. + * + * The BIOS will allocate its framebuffer from the stolen memory. Our + * goal is try to reuse that object for our own fbcon which must always + * be available for panics. Anything else we can reuse the stolen memory + * for is a boon. + */ + +int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, + struct drm_mm_node *node, u64 size, + unsigned alignment, u64 start, u64 end) +{ + int ret; + + if (!drm_mm_initialized(&dev_priv->mm.stolen)) + return -ENODEV; + + /* WaSkipStolenMemoryFirstPage:bdw+ */ + if (INTEL_GEN(dev_priv) >= 8 && start < 4096) + start = 4096; + + mutex_lock(&dev_priv->mm.stolen_lock); + ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, + size, alignment, 0, + start, end, DRM_MM_INSERT_BEST); + mutex_unlock(&dev_priv->mm.stolen_lock); + + return ret; +} + +int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, + struct drm_mm_node *node, u64 size, + unsigned alignment) +{ + return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, + alignment, 0, U64_MAX); +} + +void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, + struct drm_mm_node *node) +{ + mutex_lock(&dev_priv->mm.stolen_lock); + drm_mm_remove_node(node); + mutex_unlock(&dev_priv->mm.stolen_lock); +} + +static int i915_adjust_stolen(struct drm_i915_private *dev_priv, + struct resource *dsm) +{ + struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct resource *r; + + if (dsm->start == 0 || dsm->end <= dsm->start) + return -EINVAL; + + /* + * TODO: We have yet too encounter the case where the GTT wasn't at the + * end of stolen. With that assumption we could simplify this. + */ + + /* Make sure we don't clobber the GTT if it's within stolen memory */ + if (INTEL_GEN(dev_priv) <= 4 && + !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) { + struct resource stolen[2] = {*dsm, *dsm}; + struct resource ggtt_res; + resource_size_t ggtt_start; + + ggtt_start = I915_READ(PGTBL_CTL); + if (IS_GEN(dev_priv, 4)) + ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | + (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; + else + ggtt_start &= PGTBL_ADDRESS_LO_MASK; + + ggtt_res = + (struct resource) DEFINE_RES_MEM(ggtt_start, + ggtt_total_entries(ggtt) * 4); + + if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) + stolen[0].end = ggtt_res.start; + if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end) + stolen[1].start = ggtt_res.end; + + /* Pick the larger of the two chunks */ + if (resource_size(&stolen[0]) > resource_size(&stolen[1])) + *dsm = stolen[0]; + else + *dsm = stolen[1]; + + if (stolen[0].start != stolen[1].start || + stolen[0].end != stolen[1].end) { + DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res); + DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm); + } + } + + /* + * Verify that nothing else uses this physical address. Stolen + * memory should be reserved by the BIOS and hidden from the + * kernel. So if the region is already marked as busy, something + * is seriously wrong. + */ + r = devm_request_mem_region(dev_priv->drm.dev, dsm->start, + resource_size(dsm), + "Graphics Stolen Memory"); + if (r == NULL) { + /* + * One more attempt but this time requesting region from + * start + 1, as we have seen that this resolves the region + * conflict with the PCI Bus. + * This is a BIOS w/a: Some BIOS wrap stolen in the root + * PCI bus, but have an off-by-one error. Hence retry the + * reservation starting from 1 instead of 0. + * There's also BIOS with off-by-one on the other end. + */ + r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1, + resource_size(dsm) - 2, + "Graphics Stolen Memory"); + /* + * GEN3 firmware likes to smash pci bridges into the stolen + * range. Apparently this works. + */ + if (r == NULL && !IS_GEN(dev_priv, 3)) { + DRM_ERROR("conflict detected with stolen region: %pR\n", + dsm); + + return -EBUSY; + } + } + + return 0; +} + +void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv) +{ + if (!drm_mm_initialized(&dev_priv->mm.stolen)) + return; + + drm_mm_takedown(&dev_priv->mm.stolen); +} + +static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, + resource_size_t *base, + resource_size_t *size) +{ + u32 reg_val = I915_READ(IS_GM45(dev_priv) ? + CTG_STOLEN_RESERVED : + ELK_STOLEN_RESERVED); + resource_size_t stolen_top = dev_priv->dsm.end + 1; + + DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n", + IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val); + + if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) + return; + + /* + * Whether ILK really reuses the ELK register for this is unclear. + * Let's see if we catch anyone with this supposedly enabled on ILK. + */ + WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n", + reg_val); + + if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) + return; + + *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; + WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); + + *size = stolen_top - *base; +} + +static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, + resource_size_t *base, + resource_size_t *size) +{ + u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); + + DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); + + if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) + return; + + *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; + + switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { + case GEN6_STOLEN_RESERVED_1M: + *size = 1024 * 1024; + break; + case GEN6_STOLEN_RESERVED_512K: + *size = 512 * 1024; + break; + case GEN6_STOLEN_RESERVED_256K: + *size = 256 * 1024; + break; + case GEN6_STOLEN_RESERVED_128K: + *size = 128 * 1024; + break; + default: + *size = 1024 * 1024; + MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); + } +} + +static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv, + resource_size_t *base, + resource_size_t *size) +{ + u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); + resource_size_t stolen_top = dev_priv->dsm.end + 1; + + DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); + + if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) + return; + + switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { + default: + MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); + /* fall through */ + case GEN7_STOLEN_RESERVED_1M: + *size = 1024 * 1024; + break; + } + + /* + * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the + * reserved location as (top - size). + */ + *base = stolen_top - *size; +} + +static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, + resource_size_t *base, + resource_size_t *size) +{ + u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); + + DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); + + if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) + return; + + *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; + + switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { + case GEN7_STOLEN_RESERVED_1M: + *size = 1024 * 1024; + break; + case GEN7_STOLEN_RESERVED_256K: + *size = 256 * 1024; + break; + default: + *size = 1024 * 1024; + MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); + } +} + +static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv, + resource_size_t *base, + resource_size_t *size) +{ + u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); + + DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); + + if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) + return; + + *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; + + switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { + case GEN8_STOLEN_RESERVED_1M: + *size = 1024 * 1024; + break; + case GEN8_STOLEN_RESERVED_2M: + *size = 2 * 1024 * 1024; + break; + case GEN8_STOLEN_RESERVED_4M: + *size = 4 * 1024 * 1024; + break; + case GEN8_STOLEN_RESERVED_8M: + *size = 8 * 1024 * 1024; + break; + default: + *size = 8 * 1024 * 1024; + MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); + } +} + +static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, + resource_size_t *base, + resource_size_t *size) +{ + u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); + resource_size_t stolen_top = dev_priv->dsm.end + 1; + + DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); + + if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) + return; + + if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK)) + return; + + *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; + *size = stolen_top - *base; +} + +static void icl_get_stolen_reserved(struct drm_i915_private *dev_priv, + resource_size_t *base, + resource_size_t *size) +{ + u64 reg_val = I915_READ64(GEN6_STOLEN_RESERVED); + + DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); + + *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; + + switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { + case GEN8_STOLEN_RESERVED_1M: + *size = 1024 * 1024; + break; + case GEN8_STOLEN_RESERVED_2M: + *size = 2 * 1024 * 1024; + break; + case GEN8_STOLEN_RESERVED_4M: + *size = 4 * 1024 * 1024; + break; + case GEN8_STOLEN_RESERVED_8M: + *size = 8 * 1024 * 1024; + break; + default: + *size = 8 * 1024 * 1024; + MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); + } +} + +int i915_gem_init_stolen(struct drm_i915_private *dev_priv) +{ + resource_size_t reserved_base, stolen_top; + resource_size_t reserved_total, reserved_size; + + mutex_init(&dev_priv->mm.stolen_lock); + + if (intel_vgpu_active(dev_priv)) { + DRM_INFO("iGVT-g active, disabling use of stolen memory\n"); + return 0; + } + + if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) { + DRM_INFO("DMAR active, disabling use of stolen memory\n"); + return 0; + } + + if (resource_size(&intel_graphics_stolen_res) == 0) + return 0; + + dev_priv->dsm = intel_graphics_stolen_res; + + if (i915_adjust_stolen(dev_priv, &dev_priv->dsm)) + return 0; + + GEM_BUG_ON(dev_priv->dsm.start == 0); + GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start); + + stolen_top = dev_priv->dsm.end + 1; + reserved_base = stolen_top; + reserved_size = 0; + + switch (INTEL_GEN(dev_priv)) { + case 2: + case 3: + break; + case 4: + if (!IS_G4X(dev_priv)) + break; + /* fall through */ + case 5: + g4x_get_stolen_reserved(dev_priv, + &reserved_base, &reserved_size); + break; + case 6: + gen6_get_stolen_reserved(dev_priv, + &reserved_base, &reserved_size); + break; + case 7: + if (IS_VALLEYVIEW(dev_priv)) + vlv_get_stolen_reserved(dev_priv, + &reserved_base, &reserved_size); + else + gen7_get_stolen_reserved(dev_priv, + &reserved_base, &reserved_size); + break; + case 8: + case 9: + case 10: + if (IS_LP(dev_priv)) + chv_get_stolen_reserved(dev_priv, + &reserved_base, &reserved_size); + else + bdw_get_stolen_reserved(dev_priv, + &reserved_base, &reserved_size); + break; + case 11: + default: + icl_get_stolen_reserved(dev_priv, &reserved_base, + &reserved_size); + break; + } + + /* + * Our expectation is that the reserved space is at the top of the + * stolen region and *never* at the bottom. If we see !reserved_base, + * it likely means we failed to read the registers correctly. + */ + if (!reserved_base) { + DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n", + &reserved_base, &reserved_size); + reserved_base = stolen_top; + reserved_size = 0; + } + + dev_priv->dsm_reserved = + (struct resource) DEFINE_RES_MEM(reserved_base, reserved_size); + + if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) { + DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n", + &dev_priv->dsm_reserved, &dev_priv->dsm); + return 0; + } + + /* It is possible for the reserved area to end before the end of stolen + * memory, so just consider the start. */ + reserved_total = stolen_top - reserved_base; + + DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n", + (u64)resource_size(&dev_priv->dsm) >> 10, + ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10); + + dev_priv->stolen_usable_size = + resource_size(&dev_priv->dsm) - reserved_total; + + /* Basic memrange allocator for stolen space. */ + drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size); + + return 0; +} + +static struct sg_table * +i915_pages_create_for_stolen(struct drm_device *dev, + resource_size_t offset, resource_size_t size) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct sg_table *st; + struct scatterlist *sg; + + GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm))); + + /* We hide that we have no struct page backing our stolen object + * by wrapping the contiguous physical allocation with a fake + * dma mapping in a single scatterlist. + */ + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (st == NULL) + return ERR_PTR(-ENOMEM); + + if (sg_alloc_table(st, 1, GFP_KERNEL)) { + kfree(st); + return ERR_PTR(-ENOMEM); + } + + sg = st->sgl; + sg->offset = 0; + sg->length = size; + + sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset; + sg_dma_len(sg) = size; + + return st; +} + +static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) +{ + struct sg_table *pages = + i915_pages_create_for_stolen(obj->base.dev, + obj->stolen->start, + obj->stolen->size); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + __i915_gem_object_set_pages(obj, pages, obj->stolen->size); + + return 0; +} + +static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + /* Should only be called from i915_gem_object_release_stolen() */ + sg_free_table(pages); + kfree(pages); +} + +static void +i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); + + GEM_BUG_ON(!stolen); + + __i915_gem_object_unpin_pages(obj); + + i915_gem_stolen_remove_node(dev_priv, stolen); + kfree(stolen); +} + +static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { + .get_pages = i915_gem_object_get_pages_stolen, + .put_pages = i915_gem_object_put_pages_stolen, + .release = i915_gem_object_release_stolen, +}; + +static struct drm_i915_gem_object * +_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, + struct drm_mm_node *stolen) +{ + struct drm_i915_gem_object *obj; + unsigned int cache_level; + + obj = i915_gem_object_alloc(); + if (obj == NULL) + return NULL; + + drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size); + i915_gem_object_init(obj, &i915_gem_object_stolen_ops); + + obj->stolen = stolen; + obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; + cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; + i915_gem_object_set_cache_coherency(obj, cache_level); + + if (i915_gem_object_pin_pages(obj)) + goto cleanup; + + return obj; + +cleanup: + i915_gem_object_free(obj); + return NULL; +} + +struct drm_i915_gem_object * +i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, + resource_size_t size) +{ + struct drm_i915_gem_object *obj; + struct drm_mm_node *stolen; + int ret; + + if (!drm_mm_initialized(&dev_priv->mm.stolen)) + return NULL; + + if (size == 0) + return NULL; + + stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); + if (!stolen) + return NULL; + + ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); + if (ret) { + kfree(stolen); + return NULL; + } + + obj = _i915_gem_object_create_stolen(dev_priv, stolen); + if (obj) + return obj; + + i915_gem_stolen_remove_node(dev_priv, stolen); + kfree(stolen); + return NULL; +} + +struct drm_i915_gem_object * +i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, + resource_size_t stolen_offset, + resource_size_t gtt_offset, + resource_size_t size) +{ + struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_gem_object *obj; + struct drm_mm_node *stolen; + struct i915_vma *vma; + int ret; + + if (!drm_mm_initialized(&dev_priv->mm.stolen)) + return NULL; + + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n", + &stolen_offset, >t_offset, &size); + + /* KISS and expect everything to be page-aligned */ + if (WARN_ON(size == 0) || + WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) || + WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT))) + return NULL; + + stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); + if (!stolen) + return NULL; + + stolen->start = stolen_offset; + stolen->size = size; + mutex_lock(&dev_priv->mm.stolen_lock); + ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); + mutex_unlock(&dev_priv->mm.stolen_lock); + if (ret) { + DRM_DEBUG_DRIVER("failed to allocate stolen space\n"); + kfree(stolen); + return NULL; + } + + obj = _i915_gem_object_create_stolen(dev_priv, stolen); + if (obj == NULL) { + DRM_DEBUG_DRIVER("failed to allocate stolen object\n"); + i915_gem_stolen_remove_node(dev_priv, stolen); + kfree(stolen); + return NULL; + } + + /* Some objects just need physical mem from stolen space */ + if (gtt_offset == I915_GTT_OFFSET_NONE) + return obj; + + ret = i915_gem_object_pin_pages(obj); + if (ret) + goto err; + + vma = i915_vma_instance(obj, &ggtt->vm, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_pages; + } + + /* To simplify the initialisation sequence between KMS and GTT, + * we allow construction of the stolen object prior to + * setting up the GTT space. The actual reservation will occur + * later. + */ + ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, + size, gtt_offset, obj->cache_level, + 0); + if (ret) { + DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n"); + goto err_pages; + } + + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + + vma->pages = obj->mm.pages; + vma->flags |= I915_VMA_GLOBAL_BIND; + __i915_vma_set_map_and_fenceable(vma); + + mutex_lock(&ggtt->vm.mutex); + list_move_tail(&vma->vm_link, &ggtt->vm.bound_list); + mutex_unlock(&ggtt->vm.mutex); + + spin_lock(&dev_priv->mm.obj_lock); + list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); + obj->bind_count++; + spin_unlock(&dev_priv->mm.obj_lock); + + return obj; + +err_pages: + i915_gem_object_unpin_pages(obj); +err: + i915_gem_object_put(obj); + return NULL; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c new file mode 100644 index 000000000000..ca0c2f451742 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -0,0 +1,440 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2008 Intel Corporation + */ + +#include +#include +#include + +#include "i915_drv.h" +#include "i915_gem.h" +#include "i915_gem_ioctls.h" +#include "i915_gem_object.h" + +/** + * DOC: buffer object tiling + * + * i915_gem_set_tiling_ioctl() and i915_gem_get_tiling_ioctl() is the userspace + * interface to declare fence register requirements. + * + * In principle GEM doesn't care at all about the internal data layout of an + * object, and hence it also doesn't care about tiling or swizzling. There's two + * exceptions: + * + * - For X and Y tiling the hardware provides detilers for CPU access, so called + * fences. Since there's only a limited amount of them the kernel must manage + * these, and therefore userspace must tell the kernel the object tiling if it + * wants to use fences for detiling. + * - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which + * depends upon the physical page frame number. When swapping such objects the + * page frame number might change and the kernel must be able to fix this up + * and hence now the tiling. Note that on a subset of platforms with + * asymmetric memory channel population the swizzling pattern changes in an + * unknown way, and for those the kernel simply forbids swapping completely. + * + * Since neither of this applies for new tiling layouts on modern platforms like + * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled. + * Anything else can be handled in userspace entirely without the kernel's + * invovlement. + */ + +/** + * i915_gem_fence_size - required global GTT size for a fence + * @i915: i915 device + * @size: object size + * @tiling: tiling mode + * @stride: tiling stride + * + * Return the required global GTT size for a fence (view of a tiled object), + * taking into account potential fence register mapping. + */ +u32 i915_gem_fence_size(struct drm_i915_private *i915, + u32 size, unsigned int tiling, unsigned int stride) +{ + u32 ggtt_size; + + GEM_BUG_ON(!size); + + if (tiling == I915_TILING_NONE) + return size; + + GEM_BUG_ON(!stride); + + if (INTEL_GEN(i915) >= 4) { + stride *= i915_gem_tile_height(tiling); + GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE)); + return roundup(size, stride); + } + + /* Previous chips need a power-of-two fence region when tiling */ + if (IS_GEN(i915, 3)) + ggtt_size = 1024*1024; + else + ggtt_size = 512*1024; + + while (ggtt_size < size) + ggtt_size <<= 1; + + return ggtt_size; +} + +/** + * i915_gem_fence_alignment - required global GTT alignment for a fence + * @i915: i915 device + * @size: object size + * @tiling: tiling mode + * @stride: tiling stride + * + * Return the required global GTT alignment for a fence (a view of a tiled + * object), taking into account potential fence register mapping. + */ +u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size, + unsigned int tiling, unsigned int stride) +{ + GEM_BUG_ON(!size); + + /* + * Minimum alignment is 4k (GTT page size), but might be greater + * if a fence register is needed for the object. + */ + if (tiling == I915_TILING_NONE) + return I915_GTT_MIN_ALIGNMENT; + + if (INTEL_GEN(i915) >= 4) + return I965_FENCE_PAGE; + + /* + * Previous chips need to be aligned to the size of the smallest + * fence register that can contain the object. + */ + return i915_gem_fence_size(i915, size, tiling, stride); +} + +/* Check pitch constriants for all chips & tiling formats */ +static bool +i915_tiling_ok(struct drm_i915_gem_object *obj, + unsigned int tiling, unsigned int stride) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + unsigned int tile_width; + + /* Linear is always fine */ + if (tiling == I915_TILING_NONE) + return true; + + if (tiling > I915_TILING_LAST) + return false; + + /* check maximum stride & object size */ + /* i965+ stores the end address of the gtt mapping in the fence + * reg, so dont bother to check the size */ + if (INTEL_GEN(i915) >= 7) { + if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL) + return false; + } else if (INTEL_GEN(i915) >= 4) { + if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) + return false; + } else { + if (stride > 8192) + return false; + + if (!is_power_of_2(stride)) + return false; + } + + if (IS_GEN(i915, 2) || + (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915))) + tile_width = 128; + else + tile_width = 512; + + if (!stride || !IS_ALIGNED(stride, tile_width)) + return false; + + return true; +} + +static bool i915_vma_fence_prepare(struct i915_vma *vma, + int tiling_mode, unsigned int stride) +{ + struct drm_i915_private *i915 = vma->vm->i915; + u32 size, alignment; + + if (!i915_vma_is_map_and_fenceable(vma)) + return true; + + size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride); + if (vma->node.size < size) + return false; + + alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride); + if (!IS_ALIGNED(vma->node.start, alignment)) + return false; + + return true; +} + +/* Make the current GTT allocation valid for the change in tiling. */ +static int +i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, + int tiling_mode, unsigned int stride) +{ + struct i915_vma *vma; + int ret; + + if (tiling_mode == I915_TILING_NONE) + return 0; + + for_each_ggtt_vma(vma, obj) { + if (i915_vma_fence_prepare(vma, tiling_mode, stride)) + continue; + + ret = i915_vma_unbind(vma); + if (ret) + return ret; + } + + return 0; +} + +int +i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, + unsigned int tiling, unsigned int stride) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_vma *vma; + int err; + + /* Make sure we don't cross-contaminate obj->tiling_and_stride */ + BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK); + + GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride)); + GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE)); + lockdep_assert_held(&i915->drm.struct_mutex); + + if ((tiling | stride) == obj->tiling_and_stride) + return 0; + + if (i915_gem_object_is_framebuffer(obj)) + return -EBUSY; + + /* We need to rebind the object if its current allocation + * no longer meets the alignment restrictions for its new + * tiling mode. Otherwise we can just leave it alone, but + * need to ensure that any fence register is updated before + * the next fenced (either through the GTT or by the BLT unit + * on older GPUs) access. + * + * After updating the tiling parameters, we then flag whether + * we need to update an associated fence register. Note this + * has to also include the unfenced register the GPU uses + * whilst executing a fenced command for an untiled object. + */ + + err = i915_gem_object_fence_prepare(obj, tiling, stride); + if (err) + return err; + + i915_gem_object_lock(obj); + if (i915_gem_object_is_framebuffer(obj)) { + i915_gem_object_unlock(obj); + return -EBUSY; + } + + /* If the memory has unknown (i.e. varying) swizzling, we pin the + * pages to prevent them being swapped out and causing corruption + * due to the change in swizzling. + */ + mutex_lock(&obj->mm.lock); + if (i915_gem_object_has_pages(obj) && + obj->mm.madv == I915_MADV_WILLNEED && + i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { + if (tiling == I915_TILING_NONE) { + GEM_BUG_ON(!obj->mm.quirked); + __i915_gem_object_unpin_pages(obj); + obj->mm.quirked = false; + } + if (!i915_gem_object_is_tiled(obj)) { + GEM_BUG_ON(obj->mm.quirked); + __i915_gem_object_pin_pages(obj); + obj->mm.quirked = true; + } + } + mutex_unlock(&obj->mm.lock); + + for_each_ggtt_vma(vma, obj) { + vma->fence_size = + i915_gem_fence_size(i915, vma->size, tiling, stride); + vma->fence_alignment = + i915_gem_fence_alignment(i915, + vma->size, tiling, stride); + + if (vma->fence) + vma->fence->dirty = true; + } + + obj->tiling_and_stride = tiling | stride; + i915_gem_object_unlock(obj); + + /* Force the fence to be reacquired for GTT access */ + i915_gem_object_release_mmap(obj); + + /* Try to preallocate memory required to save swizzling on put-pages */ + if (i915_gem_object_needs_bit17_swizzle(obj)) { + if (!obj->bit_17) { + obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT, + GFP_KERNEL); + } + } else { + bitmap_free(obj->bit_17); + obj->bit_17 = NULL; + } + + return 0; +} + +/** + * i915_gem_set_tiling_ioctl - IOCTL handler to set tiling mode + * @dev: DRM device + * @data: data pointer for the ioctl + * @file: DRM file for the ioctl call + * + * Sets the tiling mode of an object, returning the required swizzling of + * bit 6 of addresses in the object. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int +i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_set_tiling *args = data; + struct drm_i915_gem_object *obj; + int err; + + obj = i915_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; + + /* + * The tiling mode of proxy objects is handled by its generator, and + * not allowed to be changed by userspace. + */ + if (i915_gem_object_is_proxy(obj)) { + err = -ENXIO; + goto err; + } + + if (!i915_tiling_ok(obj, args->tiling_mode, args->stride)) { + err = -EINVAL; + goto err; + } + + if (args->tiling_mode == I915_TILING_NONE) { + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; + args->stride = 0; + } else { + if (args->tiling_mode == I915_TILING_X) + args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_x; + else + args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_y; + + /* Hide bit 17 swizzling from the user. This prevents old Mesa + * from aborting the application on sw fallbacks to bit 17, + * and we use the pread/pwrite bit17 paths to swizzle for it. + * If there was a user that was relying on the swizzle + * information for drm_intel_bo_map()ed reads/writes this would + * break it, but we don't have any of those. + */ + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9; + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; + + /* If we can't handle the swizzling, make it untiled. */ + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { + args->tiling_mode = I915_TILING_NONE; + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; + args->stride = 0; + } + } + + err = mutex_lock_interruptible(&dev->struct_mutex); + if (err) + goto err; + + err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride); + mutex_unlock(&dev->struct_mutex); + + /* We have to maintain this existing ABI... */ + args->stride = i915_gem_object_get_stride(obj); + args->tiling_mode = i915_gem_object_get_tiling(obj); + +err: + i915_gem_object_put(obj); + return err; +} + +/** + * i915_gem_get_tiling_ioctl - IOCTL handler to get tiling mode + * @dev: DRM device + * @data: data pointer for the ioctl + * @file: DRM file for the ioctl call + * + * Returns the current tiling mode and required bit 6 swizzling for the object. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int +i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_get_tiling *args = data; + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_gem_object *obj; + int err = -ENOENT; + + rcu_read_lock(); + obj = i915_gem_object_lookup_rcu(file, args->handle); + if (obj) { + args->tiling_mode = + READ_ONCE(obj->tiling_and_stride) & TILING_MASK; + err = 0; + } + rcu_read_unlock(); + if (unlikely(err)) + return err; + + switch (args->tiling_mode) { + case I915_TILING_X: + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; + break; + case I915_TILING_Y: + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; + break; + default: + case I915_TILING_NONE: + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; + break; + } + + /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ + if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) + args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN; + else + args->phys_swizzle_mode = args->swizzle_mode; + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9; + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; + + return 0; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c new file mode 100644 index 000000000000..ccac73b72597 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -0,0 +1,832 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2012-2014 Intel Corporation + */ + +#include +#include +#include +#include +#include + +#include + +#include "i915_gem_ioctls.h" +#include "i915_gem_object.h" +#include "i915_trace.h" +#include "intel_drv.h" + +struct i915_mm_struct { + struct mm_struct *mm; + struct drm_i915_private *i915; + struct i915_mmu_notifier *mn; + struct hlist_node node; + struct kref kref; + struct work_struct work; +}; + +#if defined(CONFIG_MMU_NOTIFIER) +#include + +struct i915_mmu_notifier { + spinlock_t lock; + struct hlist_node node; + struct mmu_notifier mn; + struct rb_root_cached objects; + struct i915_mm_struct *mm; +}; + +struct i915_mmu_object { + struct i915_mmu_notifier *mn; + struct drm_i915_gem_object *obj; + struct interval_tree_node it; +}; + +static void add_object(struct i915_mmu_object *mo) +{ + GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb)); + interval_tree_insert(&mo->it, &mo->mn->objects); +} + +static void del_object(struct i915_mmu_object *mo) +{ + if (RB_EMPTY_NODE(&mo->it.rb)) + return; + + interval_tree_remove(&mo->it, &mo->mn->objects); + RB_CLEAR_NODE(&mo->it.rb); +} + +static void +__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value) +{ + struct i915_mmu_object *mo = obj->userptr.mmu_object; + + /* + * During mm_invalidate_range we need to cancel any userptr that + * overlaps the range being invalidated. Doing so requires the + * struct_mutex, and that risks recursion. In order to cause + * recursion, the user must alias the userptr address space with + * a GTT mmapping (possible with a MAP_FIXED) - then when we have + * to invalidate that mmaping, mm_invalidate_range is called with + * the userptr address *and* the struct_mutex held. To prevent that + * we set a flag under the i915_mmu_notifier spinlock to indicate + * whether this object is valid. + */ + if (!mo) + return; + + spin_lock(&mo->mn->lock); + if (value) + add_object(mo); + else + del_object(mo); + spin_unlock(&mo->mn->lock); +} + +static int +userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, + const struct mmu_notifier_range *range) +{ + struct i915_mmu_notifier *mn = + container_of(_mn, struct i915_mmu_notifier, mn); + struct interval_tree_node *it; + struct mutex *unlock = NULL; + unsigned long end; + int ret = 0; + + if (RB_EMPTY_ROOT(&mn->objects.rb_root)) + return 0; + + /* interval ranges are inclusive, but invalidate range is exclusive */ + end = range->end - 1; + + spin_lock(&mn->lock); + it = interval_tree_iter_first(&mn->objects, range->start, end); + while (it) { + struct drm_i915_gem_object *obj; + + if (!mmu_notifier_range_blockable(range)) { + ret = -EAGAIN; + break; + } + + /* + * The mmu_object is released late when destroying the + * GEM object so it is entirely possible to gain a + * reference on an object in the process of being freed + * since our serialisation is via the spinlock and not + * the struct_mutex - and consequently use it after it + * is freed and then double free it. To prevent that + * use-after-free we only acquire a reference on the + * object if it is not in the process of being destroyed. + */ + obj = container_of(it, struct i915_mmu_object, it)->obj; + if (!kref_get_unless_zero(&obj->base.refcount)) { + it = interval_tree_iter_next(it, range->start, end); + continue; + } + spin_unlock(&mn->lock); + + if (!unlock) { + unlock = &mn->mm->i915->drm.struct_mutex; + + switch (mutex_trylock_recursive(unlock)) { + default: + case MUTEX_TRYLOCK_FAILED: + if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) { + i915_gem_object_put(obj); + return -EINTR; + } + /* fall through */ + case MUTEX_TRYLOCK_SUCCESS: + break; + + case MUTEX_TRYLOCK_RECURSIVE: + unlock = ERR_PTR(-EEXIST); + break; + } + } + + ret = i915_gem_object_unbind(obj); + if (ret == 0) + ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); + i915_gem_object_put(obj); + if (ret) + goto unlock; + + spin_lock(&mn->lock); + + /* + * As we do not (yet) protect the mmu from concurrent insertion + * over this range, there is no guarantee that this search will + * terminate given a pathologic workload. + */ + it = interval_tree_iter_first(&mn->objects, range->start, end); + } + spin_unlock(&mn->lock); + +unlock: + if (!IS_ERR_OR_NULL(unlock)) + mutex_unlock(unlock); + + return ret; + +} + +static const struct mmu_notifier_ops i915_gem_userptr_notifier = { + .invalidate_range_start = userptr_mn_invalidate_range_start, +}; + +static struct i915_mmu_notifier * +i915_mmu_notifier_create(struct i915_mm_struct *mm) +{ + struct i915_mmu_notifier *mn; + + mn = kmalloc(sizeof(*mn), GFP_KERNEL); + if (mn == NULL) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&mn->lock); + mn->mn.ops = &i915_gem_userptr_notifier; + mn->objects = RB_ROOT_CACHED; + mn->mm = mm; + + return mn; +} + +static void +i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) +{ + struct i915_mmu_object *mo; + + mo = fetch_and_zero(&obj->userptr.mmu_object); + if (!mo) + return; + + spin_lock(&mo->mn->lock); + del_object(mo); + spin_unlock(&mo->mn->lock); + kfree(mo); +} + +static struct i915_mmu_notifier * +i915_mmu_notifier_find(struct i915_mm_struct *mm) +{ + struct i915_mmu_notifier *mn; + int err = 0; + + mn = mm->mn; + if (mn) + return mn; + + mn = i915_mmu_notifier_create(mm); + if (IS_ERR(mn)) + err = PTR_ERR(mn); + + down_write(&mm->mm->mmap_sem); + mutex_lock(&mm->i915->mm_lock); + if (mm->mn == NULL && !err) { + /* Protected by mmap_sem (write-lock) */ + err = __mmu_notifier_register(&mn->mn, mm->mm); + if (!err) { + /* Protected by mm_lock */ + mm->mn = fetch_and_zero(&mn); + } + } else if (mm->mn) { + /* + * Someone else raced and successfully installed the mmu + * notifier, we can cancel our own errors. + */ + err = 0; + } + mutex_unlock(&mm->i915->mm_lock); + up_write(&mm->mm->mmap_sem); + + if (mn && !IS_ERR(mn)) + kfree(mn); + + return err ? ERR_PTR(err) : mm->mn; +} + +static int +i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, + unsigned flags) +{ + struct i915_mmu_notifier *mn; + struct i915_mmu_object *mo; + + if (flags & I915_USERPTR_UNSYNCHRONIZED) + return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; + + if (WARN_ON(obj->userptr.mm == NULL)) + return -EINVAL; + + mn = i915_mmu_notifier_find(obj->userptr.mm); + if (IS_ERR(mn)) + return PTR_ERR(mn); + + mo = kzalloc(sizeof(*mo), GFP_KERNEL); + if (!mo) + return -ENOMEM; + + mo->mn = mn; + mo->obj = obj; + mo->it.start = obj->userptr.ptr; + mo->it.last = obj->userptr.ptr + obj->base.size - 1; + RB_CLEAR_NODE(&mo->it.rb); + + obj->userptr.mmu_object = mo; + return 0; +} + +static void +i915_mmu_notifier_free(struct i915_mmu_notifier *mn, + struct mm_struct *mm) +{ + if (mn == NULL) + return; + + mmu_notifier_unregister(&mn->mn, mm); + kfree(mn); +} + +#else + +static void +__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value) +{ +} + +static void +i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) +{ +} + +static int +i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, + unsigned flags) +{ + if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0) + return -ENODEV; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + return 0; +} + +static void +i915_mmu_notifier_free(struct i915_mmu_notifier *mn, + struct mm_struct *mm) +{ +} + +#endif + +static struct i915_mm_struct * +__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) +{ + struct i915_mm_struct *mm; + + /* Protected by dev_priv->mm_lock */ + hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) + if (mm->mm == real) + return mm; + + return NULL; +} + +static int +i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct i915_mm_struct *mm; + int ret = 0; + + /* During release of the GEM object we hold the struct_mutex. This + * precludes us from calling mmput() at that time as that may be + * the last reference and so call exit_mmap(). exit_mmap() will + * attempt to reap the vma, and if we were holding a GTT mmap + * would then call drm_gem_vm_close() and attempt to reacquire + * the struct mutex. So in order to avoid that recursion, we have + * to defer releasing the mm reference until after we drop the + * struct_mutex, i.e. we need to schedule a worker to do the clean + * up. + */ + mutex_lock(&dev_priv->mm_lock); + mm = __i915_mm_struct_find(dev_priv, current->mm); + if (mm == NULL) { + mm = kmalloc(sizeof(*mm), GFP_KERNEL); + if (mm == NULL) { + ret = -ENOMEM; + goto out; + } + + kref_init(&mm->kref); + mm->i915 = to_i915(obj->base.dev); + + mm->mm = current->mm; + mmgrab(current->mm); + + mm->mn = NULL; + + /* Protected by dev_priv->mm_lock */ + hash_add(dev_priv->mm_structs, + &mm->node, (unsigned long)mm->mm); + } else + kref_get(&mm->kref); + + obj->userptr.mm = mm; +out: + mutex_unlock(&dev_priv->mm_lock); + return ret; +} + +static void +__i915_mm_struct_free__worker(struct work_struct *work) +{ + struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); + i915_mmu_notifier_free(mm->mn, mm->mm); + mmdrop(mm->mm); + kfree(mm); +} + +static void +__i915_mm_struct_free(struct kref *kref) +{ + struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); + + /* Protected by dev_priv->mm_lock */ + hash_del(&mm->node); + mutex_unlock(&mm->i915->mm_lock); + + INIT_WORK(&mm->work, __i915_mm_struct_free__worker); + queue_work(mm->i915->mm.userptr_wq, &mm->work); +} + +static void +i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) +{ + if (obj->userptr.mm == NULL) + return; + + kref_put_mutex(&obj->userptr.mm->kref, + __i915_mm_struct_free, + &to_i915(obj->base.dev)->mm_lock); + obj->userptr.mm = NULL; +} + +struct get_pages_work { + struct work_struct work; + struct drm_i915_gem_object *obj; + struct task_struct *task; +}; + +static struct sg_table * +__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, + struct page **pvec, int num_pages) +{ + unsigned int max_segment = i915_sg_segment_size(); + struct sg_table *st; + unsigned int sg_page_sizes; + int ret; + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + +alloc_table: + ret = __sg_alloc_table_from_pages(st, pvec, num_pages, + 0, num_pages << PAGE_SHIFT, + max_segment, + GFP_KERNEL); + if (ret) { + kfree(st); + return ERR_PTR(ret); + } + + ret = i915_gem_gtt_prepare_pages(obj, st); + if (ret) { + sg_free_table(st); + + if (max_segment > PAGE_SIZE) { + max_segment = PAGE_SIZE; + goto alloc_table; + } + + kfree(st); + return ERR_PTR(ret); + } + + sg_page_sizes = i915_sg_page_sizes(st->sgl); + + __i915_gem_object_set_pages(obj, st, sg_page_sizes); + + return st; +} + +static void +__i915_gem_userptr_get_pages_worker(struct work_struct *_work) +{ + struct get_pages_work *work = container_of(_work, typeof(*work), work); + struct drm_i915_gem_object *obj = work->obj; + const int npages = obj->base.size >> PAGE_SHIFT; + struct page **pvec; + int pinned, ret; + + ret = -ENOMEM; + pinned = 0; + + pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); + if (pvec != NULL) { + struct mm_struct *mm = obj->userptr.mm->mm; + unsigned int flags = 0; + + if (!i915_gem_object_is_readonly(obj)) + flags |= FOLL_WRITE; + + ret = -EFAULT; + if (mmget_not_zero(mm)) { + down_read(&mm->mmap_sem); + while (pinned < npages) { + ret = get_user_pages_remote + (work->task, mm, + obj->userptr.ptr + pinned * PAGE_SIZE, + npages - pinned, + flags, + pvec + pinned, NULL, NULL); + if (ret < 0) + break; + + pinned += ret; + } + up_read(&mm->mmap_sem); + mmput(mm); + } + } + + mutex_lock(&obj->mm.lock); + if (obj->userptr.work == &work->work) { + struct sg_table *pages = ERR_PTR(ret); + + if (pinned == npages) { + pages = __i915_gem_userptr_alloc_pages(obj, pvec, + npages); + if (!IS_ERR(pages)) { + pinned = 0; + pages = NULL; + } + } + + obj->userptr.work = ERR_CAST(pages); + if (IS_ERR(pages)) + __i915_gem_userptr_set_active(obj, false); + } + mutex_unlock(&obj->mm.lock); + + release_pages(pvec, pinned); + kvfree(pvec); + + i915_gem_object_put(obj); + put_task_struct(work->task); + kfree(work); +} + +static struct sg_table * +__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj) +{ + struct get_pages_work *work; + + /* Spawn a worker so that we can acquire the + * user pages without holding our mutex. Access + * to the user pages requires mmap_sem, and we have + * a strict lock ordering of mmap_sem, struct_mutex - + * we already hold struct_mutex here and so cannot + * call gup without encountering a lock inversion. + * + * Userspace will keep on repeating the operation + * (thanks to EAGAIN) until either we hit the fast + * path or the worker completes. If the worker is + * cancelled or superseded, the task is still run + * but the results ignored. (This leads to + * complications that we may have a stray object + * refcount that we need to be wary of when + * checking for existing objects during creation.) + * If the worker encounters an error, it reports + * that error back to this function through + * obj->userptr.work = ERR_PTR. + */ + work = kmalloc(sizeof(*work), GFP_KERNEL); + if (work == NULL) + return ERR_PTR(-ENOMEM); + + obj->userptr.work = &work->work; + + work->obj = i915_gem_object_get(obj); + + work->task = current; + get_task_struct(work->task); + + INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); + queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work); + + return ERR_PTR(-EAGAIN); +} + +static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) +{ + const int num_pages = obj->base.size >> PAGE_SHIFT; + struct mm_struct *mm = obj->userptr.mm->mm; + struct page **pvec; + struct sg_table *pages; + bool active; + int pinned; + + /* If userspace should engineer that these pages are replaced in + * the vma between us binding this page into the GTT and completion + * of rendering... Their loss. If they change the mapping of their + * pages they need to create a new bo to point to the new vma. + * + * However, that still leaves open the possibility of the vma + * being copied upon fork. Which falls under the same userspace + * synchronisation issue as a regular bo, except that this time + * the process may not be expecting that a particular piece of + * memory is tied to the GPU. + * + * Fortunately, we can hook into the mmu_notifier in order to + * discard the page references prior to anything nasty happening + * to the vma (discard or cloning) which should prevent the more + * egregious cases from causing harm. + */ + + if (obj->userptr.work) { + /* active flag should still be held for the pending work */ + if (IS_ERR(obj->userptr.work)) + return PTR_ERR(obj->userptr.work); + else + return -EAGAIN; + } + + pvec = NULL; + pinned = 0; + + if (mm == current->mm) { + pvec = kvmalloc_array(num_pages, sizeof(struct page *), + GFP_KERNEL | + __GFP_NORETRY | + __GFP_NOWARN); + if (pvec) /* defer to worker if malloc fails */ + pinned = __get_user_pages_fast(obj->userptr.ptr, + num_pages, + !i915_gem_object_is_readonly(obj), + pvec); + } + + active = false; + if (pinned < 0) { + pages = ERR_PTR(pinned); + pinned = 0; + } else if (pinned < num_pages) { + pages = __i915_gem_userptr_get_pages_schedule(obj); + active = pages == ERR_PTR(-EAGAIN); + } else { + pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages); + active = !IS_ERR(pages); + } + if (active) + __i915_gem_userptr_set_active(obj, true); + + if (IS_ERR(pages)) + release_pages(pvec, pinned); + kvfree(pvec); + + return PTR_ERR_OR_ZERO(pages); +} + +static void +i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + struct sgt_iter sgt_iter; + struct page *page; + + /* Cancel any inflight work and force them to restart their gup */ + obj->userptr.work = NULL; + __i915_gem_userptr_set_active(obj, false); + if (!pages) + return; + + __i915_gem_object_release_shmem(obj, pages, true); + i915_gem_gtt_finish_pages(obj, pages); + + for_each_sgt_page(page, sgt_iter, pages) { + if (obj->mm.dirty) + set_page_dirty(page); + + mark_page_accessed(page); + put_page(page); + } + obj->mm.dirty = false; + + sg_free_table(pages); + kfree(pages); +} + +static void +i915_gem_userptr_release(struct drm_i915_gem_object *obj) +{ + i915_gem_userptr_release__mmu_notifier(obj); + i915_gem_userptr_release__mm_struct(obj); +} + +static int +i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) +{ + if (obj->userptr.mmu_object) + return 0; + + return i915_gem_userptr_init__mmu_notifier(obj, 0); +} + +static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { + .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | + I915_GEM_OBJECT_IS_SHRINKABLE | + I915_GEM_OBJECT_ASYNC_CANCEL, + .get_pages = i915_gem_userptr_get_pages, + .put_pages = i915_gem_userptr_put_pages, + .dmabuf_export = i915_gem_userptr_dmabuf_export, + .release = i915_gem_userptr_release, +}; + +/* + * Creates a new mm object that wraps some normal memory from the process + * context - user memory. + * + * We impose several restrictions upon the memory being mapped + * into the GPU. + * 1. It must be page aligned (both start/end addresses, i.e ptr and size). + * 2. It must be normal system memory, not a pointer into another map of IO + * space (e.g. it must not be a GTT mmapping of another object). + * 3. We only allow a bo as large as we could in theory map into the GTT, + * that is we limit the size to the total size of the GTT. + * 4. The bo is marked as being snoopable. The backing pages are left + * accessible directly by the CPU, but reads and writes by the GPU may + * incur the cost of a snoop (unless you have an LLC architecture). + * + * Synchronisation between multiple users and the GPU is left to userspace + * through the normal set-domain-ioctl. The kernel will enforce that the + * GPU relinquishes the VMA before it is returned back to the system + * i.e. upon free(), munmap() or process termination. However, the userspace + * malloc() library may not immediately relinquish the VMA after free() and + * instead reuse it whilst the GPU is still reading and writing to the VMA. + * Caveat emptor. + * + * Also note, that the object created here is not currently a "first class" + * object, in that several ioctls are banned. These are the CPU access + * ioctls: mmap(), pwrite and pread. In practice, you are expected to use + * direct access via your pointer rather than use those ioctls. Another + * restriction is that we do not allow userptr surfaces to be pinned to the + * hardware and so we reject any attempt to create a framebuffer out of a + * userptr. + * + * If you think this is a good interface to use to pass GPU memory between + * drivers, please use dma-buf instead. In fact, wherever possible use + * dma-buf instead. + */ +int +i915_gem_userptr_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_gem_userptr *args = data; + struct drm_i915_gem_object *obj; + int ret; + u32 handle; + + if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) { + /* We cannot support coherent userptr objects on hw without + * LLC and broken snooping. + */ + return -ENODEV; + } + + if (args->flags & ~(I915_USERPTR_READ_ONLY | + I915_USERPTR_UNSYNCHRONIZED)) + return -EINVAL; + + if (!args->user_size) + return -EINVAL; + + if (offset_in_page(args->user_ptr | args->user_size)) + return -EINVAL; + + if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size)) + return -EFAULT; + + if (args->flags & I915_USERPTR_READ_ONLY) { + struct i915_hw_ppgtt *ppgtt; + + /* + * On almost all of the older hw, we cannot tell the GPU that + * a page is readonly. + */ + ppgtt = dev_priv->kernel_context->ppgtt; + if (!ppgtt || !ppgtt->vm.has_read_only) + return -ENODEV; + } + + obj = i915_gem_object_alloc(); + if (obj == NULL) + return -ENOMEM; + + drm_gem_private_object_init(dev, &obj->base, args->user_size); + i915_gem_object_init(obj, &i915_gem_userptr_ops); + obj->read_domains = I915_GEM_DOMAIN_CPU; + obj->write_domain = I915_GEM_DOMAIN_CPU; + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); + + obj->userptr.ptr = args->user_ptr; + if (args->flags & I915_USERPTR_READ_ONLY) + i915_gem_object_set_readonly(obj); + + /* And keep a pointer to the current->mm for resolving the user pages + * at binding. This means that we need to hook into the mmu_notifier + * in order to detect if the mmu is destroyed. + */ + ret = i915_gem_userptr_init__mm_struct(obj); + if (ret == 0) + ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); + if (ret == 0) + ret = drm_gem_handle_create(file, &obj->base, &handle); + + /* drop reference from allocate - handle holds it now */ + i915_gem_object_put(obj); + if (ret) + return ret; + + args->handle = handle; + return 0; +} + +int i915_gem_init_userptr(struct drm_i915_private *dev_priv) +{ + mutex_init(&dev_priv->mm_lock); + hash_init(dev_priv->mm_structs); + + dev_priv->mm.userptr_wq = + alloc_workqueue("i915-userptr-acquire", + WQ_HIGHPRI | WQ_UNBOUND, + 0); + if (!dev_priv->mm.userptr_wq) + return -ENOMEM; + + return 0; +} + +void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv) +{ + destroy_workqueue(dev_priv->mm.userptr_wq); +} diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c new file mode 100644 index 000000000000..099f3397aada --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2017 Intel Corporation + */ + +#include +#include +#include + +#include "i915_drv.h" +#include "i915_gemfs.h" + +int i915_gemfs_init(struct drm_i915_private *i915) +{ + struct file_system_type *type; + struct vfsmount *gemfs; + + type = get_fs_type("tmpfs"); + if (!type) + return -ENODEV; + + gemfs = kern_mount(type); + if (IS_ERR(gemfs)) + return PTR_ERR(gemfs); + + /* + * Enable huge-pages for objects that are at least HPAGE_PMD_SIZE, most + * likely 2M. Note that within_size may overallocate huge-pages, if say + * we allocate an object of size 2M + 4K, we may get 2M + 2M, but under + * memory pressure shmem should split any huge-pages which can be + * shrunk. + */ + + if (has_transparent_hugepage()) { + struct super_block *sb = gemfs->mnt_sb; + /* FIXME: Disabled until we get W/A for read BW issue. */ + char options[] = "huge=never"; + int flags = 0; + int err; + + err = sb->s_op->remount_fs(sb, &flags, options); + if (err) { + kern_unmount(gemfs); + return err; + } + } + + i915->mm.gemfs = gemfs; + + return 0; +} + +void i915_gemfs_fini(struct drm_i915_private *i915) +{ + kern_unmount(i915->mm.gemfs); +} diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.h b/drivers/gpu/drm/i915/gem/i915_gemfs.h new file mode 100644 index 000000000000..2a1e59af3e4a --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gemfs.h @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2017 Intel Corporation + */ + +#ifndef __I915_GEMFS_H__ +#define __I915_GEMFS_H__ + +struct drm_i915_private; + +int i915_gemfs_init(struct drm_i915_private *i915); + +void i915_gemfs_fini(struct drm_i915_private *i915); + +#endif diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c new file mode 100644 index 000000000000..824f3761314c --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c @@ -0,0 +1,121 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#include "huge_gem_object.h" + +static void huge_free_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + unsigned long nreal = obj->scratch / PAGE_SIZE; + struct scatterlist *sg; + + for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg)) + __free_page(sg_page(sg)); + + sg_free_table(pages); + kfree(pages); +} + +static int huge_get_pages(struct drm_i915_gem_object *obj) +{ +#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY) + const unsigned long nreal = obj->scratch / PAGE_SIZE; + const unsigned long npages = obj->base.size / PAGE_SIZE; + struct scatterlist *sg, *src, *end; + struct sg_table *pages; + unsigned long n; + + pages = kmalloc(sizeof(*pages), GFP); + if (!pages) + return -ENOMEM; + + if (sg_alloc_table(pages, npages, GFP)) { + kfree(pages); + return -ENOMEM; + } + + sg = pages->sgl; + for (n = 0; n < nreal; n++) { + struct page *page; + + page = alloc_page(GFP | __GFP_HIGHMEM); + if (!page) { + sg_mark_end(sg); + goto err; + } + + sg_set_page(sg, page, PAGE_SIZE, 0); + sg = __sg_next(sg); + } + if (nreal < npages) { + for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) { + sg_set_page(sg, sg_page(src), PAGE_SIZE, 0); + src = __sg_next(src); + if (src == end) + src = pages->sgl; + } + } + + if (i915_gem_gtt_prepare_pages(obj, pages)) + goto err; + + __i915_gem_object_set_pages(obj, pages, PAGE_SIZE); + + return 0; + +err: + huge_free_pages(obj, pages); + + return -ENOMEM; +#undef GFP +} + +static void huge_put_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + i915_gem_gtt_finish_pages(obj, pages); + huge_free_pages(obj, pages); + + obj->mm.dirty = false; +} + +static const struct drm_i915_gem_object_ops huge_ops = { + .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | + I915_GEM_OBJECT_IS_SHRINKABLE, + .get_pages = huge_get_pages, + .put_pages = huge_put_pages, +}; + +struct drm_i915_gem_object * +huge_gem_object(struct drm_i915_private *i915, + phys_addr_t phys_size, + dma_addr_t dma_size) +{ + struct drm_i915_gem_object *obj; + unsigned int cache_level; + + GEM_BUG_ON(!phys_size || phys_size > dma_size); + GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE)); + GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE)); + + if (overflows_type(dma_size, obj->base.size)) + return ERR_PTR(-E2BIG); + + obj = i915_gem_object_alloc(); + if (!obj) + return ERR_PTR(-ENOMEM); + + drm_gem_private_object_init(&i915->drm, &obj->base, dma_size); + i915_gem_object_init(obj, &huge_ops); + + obj->read_domains = I915_GEM_DOMAIN_CPU; + obj->write_domain = I915_GEM_DOMAIN_CPU; + cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; + i915_gem_object_set_cache_coherency(obj, cache_level); + obj->scratch = phys_size; + + return obj; +} diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h new file mode 100644 index 000000000000..549c1394bcdc --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.h @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#ifndef __HUGE_GEM_OBJECT_H +#define __HUGE_GEM_OBJECT_H + +struct drm_i915_gem_object * +huge_gem_object(struct drm_i915_private *i915, + phys_addr_t phys_size, + dma_addr_t dma_size); + +static inline phys_addr_t +huge_gem_object_phys_size(struct drm_i915_gem_object *obj) +{ + return obj->scratch; +} + +static inline dma_addr_t +huge_gem_object_dma_size(struct drm_i915_gem_object *obj) +{ + return obj->base.size; +} + +#endif /* !__HUGE_GEM_OBJECT_H */ diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c new file mode 100644 index 000000000000..7b437f06a9be --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -0,0 +1,1780 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2017 Intel Corporation + */ + +#include + +#include "i915_selftest.h" + +#include "gem/i915_gem_pm.h" + +#include "igt_gem_utils.h" +#include "mock_context.h" + +#include "selftests/mock_drm.h" +#include "selftests/mock_gem_device.h" +#include "selftests/i915_random.h" + +static const unsigned int page_sizes[] = { + I915_GTT_PAGE_SIZE_2M, + I915_GTT_PAGE_SIZE_64K, + I915_GTT_PAGE_SIZE_4K, +}; + +static unsigned int get_largest_page_size(struct drm_i915_private *i915, + u64 rem) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) { + unsigned int page_size = page_sizes[i]; + + if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size) + return page_size; + } + + return 0; +} + +static void huge_pages_free_pages(struct sg_table *st) +{ + struct scatterlist *sg; + + for (sg = st->sgl; sg; sg = __sg_next(sg)) { + if (sg_page(sg)) + __free_pages(sg_page(sg), get_order(sg->length)); + } + + sg_free_table(st); + kfree(st); +} + +static int get_huge_pages(struct drm_i915_gem_object *obj) +{ +#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY) + unsigned int page_mask = obj->mm.page_mask; + struct sg_table *st; + struct scatterlist *sg; + unsigned int sg_page_sizes; + u64 rem; + + st = kmalloc(sizeof(*st), GFP); + if (!st) + return -ENOMEM; + + if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) { + kfree(st); + return -ENOMEM; + } + + rem = obj->base.size; + sg = st->sgl; + st->nents = 0; + sg_page_sizes = 0; + + /* + * Our goal here is simple, we want to greedily fill the object from + * largest to smallest page-size, while ensuring that we use *every* + * page-size as per the given page-mask. + */ + do { + unsigned int bit = ilog2(page_mask); + unsigned int page_size = BIT(bit); + int order = get_order(page_size); + + do { + struct page *page; + + GEM_BUG_ON(order >= MAX_ORDER); + page = alloc_pages(GFP | __GFP_ZERO, order); + if (!page) + goto err; + + sg_set_page(sg, page, page_size, 0); + sg_page_sizes |= page_size; + st->nents++; + + rem -= page_size; + if (!rem) { + sg_mark_end(sg); + break; + } + + sg = __sg_next(sg); + } while ((rem - ((page_size-1) & page_mask)) >= page_size); + + page_mask &= (page_size-1); + } while (page_mask); + + if (i915_gem_gtt_prepare_pages(obj, st)) + goto err; + + obj->mm.madv = I915_MADV_DONTNEED; + + GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask); + __i915_gem_object_set_pages(obj, st, sg_page_sizes); + + return 0; + +err: + sg_set_page(sg, NULL, 0, 0); + sg_mark_end(sg); + huge_pages_free_pages(st); + + return -ENOMEM; +} + +static void put_huge_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + i915_gem_gtt_finish_pages(obj, pages); + huge_pages_free_pages(pages); + + obj->mm.dirty = false; + obj->mm.madv = I915_MADV_WILLNEED; +} + +static const struct drm_i915_gem_object_ops huge_page_ops = { + .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | + I915_GEM_OBJECT_IS_SHRINKABLE, + .get_pages = get_huge_pages, + .put_pages = put_huge_pages, +}; + +static struct drm_i915_gem_object * +huge_pages_object(struct drm_i915_private *i915, + u64 size, + unsigned int page_mask) +{ + struct drm_i915_gem_object *obj; + + GEM_BUG_ON(!size); + GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask)))); + + if (size >> PAGE_SHIFT > INT_MAX) + return ERR_PTR(-E2BIG); + + if (overflows_type(size, obj->base.size)) + return ERR_PTR(-E2BIG); + + obj = i915_gem_object_alloc(); + if (!obj) + return ERR_PTR(-ENOMEM); + + drm_gem_private_object_init(&i915->drm, &obj->base, size); + i915_gem_object_init(obj, &huge_page_ops); + + obj->write_domain = I915_GEM_DOMAIN_CPU; + obj->read_domains = I915_GEM_DOMAIN_CPU; + obj->cache_level = I915_CACHE_NONE; + + obj->mm.page_mask = page_mask; + + return obj; +} + +static int fake_get_huge_pages(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + const u64 max_len = rounddown_pow_of_two(UINT_MAX); + struct sg_table *st; + struct scatterlist *sg; + unsigned int sg_page_sizes; + u64 rem; + + st = kmalloc(sizeof(*st), GFP); + if (!st) + return -ENOMEM; + + if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) { + kfree(st); + return -ENOMEM; + } + + /* Use optimal page sized chunks to fill in the sg table */ + rem = obj->base.size; + sg = st->sgl; + st->nents = 0; + sg_page_sizes = 0; + do { + unsigned int page_size = get_largest_page_size(i915, rem); + unsigned int len = min(page_size * div_u64(rem, page_size), + max_len); + + GEM_BUG_ON(!page_size); + + sg->offset = 0; + sg->length = len; + sg_dma_len(sg) = len; + sg_dma_address(sg) = page_size; + + sg_page_sizes |= len; + + st->nents++; + + rem -= len; + if (!rem) { + sg_mark_end(sg); + break; + } + + sg = sg_next(sg); + } while (1); + + i915_sg_trim(st); + + obj->mm.madv = I915_MADV_DONTNEED; + + __i915_gem_object_set_pages(obj, st, sg_page_sizes); + + return 0; +} + +static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct sg_table *st; + struct scatterlist *sg; + unsigned int page_size; + + st = kmalloc(sizeof(*st), GFP); + if (!st) + return -ENOMEM; + + if (sg_alloc_table(st, 1, GFP)) { + kfree(st); + return -ENOMEM; + } + + sg = st->sgl; + st->nents = 1; + + page_size = get_largest_page_size(i915, obj->base.size); + GEM_BUG_ON(!page_size); + + sg->offset = 0; + sg->length = obj->base.size; + sg_dma_len(sg) = obj->base.size; + sg_dma_address(sg) = page_size; + + obj->mm.madv = I915_MADV_DONTNEED; + + __i915_gem_object_set_pages(obj, st, sg->length); + + return 0; +#undef GFP +} + +static void fake_free_huge_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + sg_free_table(pages); + kfree(pages); +} + +static void fake_put_huge_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + fake_free_huge_pages(obj, pages); + obj->mm.dirty = false; + obj->mm.madv = I915_MADV_WILLNEED; +} + +static const struct drm_i915_gem_object_ops fake_ops = { + .flags = I915_GEM_OBJECT_IS_SHRINKABLE, + .get_pages = fake_get_huge_pages, + .put_pages = fake_put_huge_pages, +}; + +static const struct drm_i915_gem_object_ops fake_ops_single = { + .flags = I915_GEM_OBJECT_IS_SHRINKABLE, + .get_pages = fake_get_huge_pages_single, + .put_pages = fake_put_huge_pages, +}; + +static struct drm_i915_gem_object * +fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) +{ + struct drm_i915_gem_object *obj; + + GEM_BUG_ON(!size); + GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); + + if (size >> PAGE_SHIFT > UINT_MAX) + return ERR_PTR(-E2BIG); + + if (overflows_type(size, obj->base.size)) + return ERR_PTR(-E2BIG); + + obj = i915_gem_object_alloc(); + if (!obj) + return ERR_PTR(-ENOMEM); + + drm_gem_private_object_init(&i915->drm, &obj->base, size); + + if (single) + i915_gem_object_init(obj, &fake_ops_single); + else + i915_gem_object_init(obj, &fake_ops); + + obj->write_domain = I915_GEM_DOMAIN_CPU; + obj->read_domains = I915_GEM_DOMAIN_CPU; + obj->cache_level = I915_CACHE_NONE; + + return obj; +} + +static int igt_check_page_sizes(struct i915_vma *vma) +{ + struct drm_i915_private *i915 = vma->vm->i915; + unsigned int supported = INTEL_INFO(i915)->page_sizes; + struct drm_i915_gem_object *obj = vma->obj; + int err = 0; + + if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) { + pr_err("unsupported page_sizes.sg=%u, supported=%u\n", + vma->page_sizes.sg & ~supported, supported); + err = -EINVAL; + } + + if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) { + pr_err("unsupported page_sizes.gtt=%u, supported=%u\n", + vma->page_sizes.gtt & ~supported, supported); + err = -EINVAL; + } + + if (vma->page_sizes.phys != obj->mm.page_sizes.phys) { + pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n", + vma->page_sizes.phys, obj->mm.page_sizes.phys); + err = -EINVAL; + } + + if (vma->page_sizes.sg != obj->mm.page_sizes.sg) { + pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n", + vma->page_sizes.sg, obj->mm.page_sizes.sg); + err = -EINVAL; + } + + if (obj->mm.page_sizes.gtt) { + pr_err("obj->page_sizes.gtt(%u) should never be set\n", + obj->mm.page_sizes.gtt); + err = -EINVAL; + } + + return err; +} + +static int igt_mock_exhaust_device_supported_pages(void *arg) +{ + struct i915_hw_ppgtt *ppgtt = arg; + struct drm_i915_private *i915 = ppgtt->vm.i915; + unsigned int saved_mask = INTEL_INFO(i915)->page_sizes; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int i, j, single; + int err; + + /* + * Sanity check creating objects with every valid page support + * combination for our mock device. + */ + + for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) { + unsigned int combination = 0; + + for (j = 0; j < ARRAY_SIZE(page_sizes); j++) { + if (i & BIT(j)) + combination |= page_sizes[j]; + } + + mkwrite_device_info(i915)->page_sizes = combination; + + for (single = 0; single <= 1; ++single) { + obj = fake_huge_pages_object(i915, combination, !!single); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_device; + } + + if (obj->base.size != combination) { + pr_err("obj->base.size=%zu, expected=%u\n", + obj->base.size, combination); + err = -EINVAL; + goto out_put; + } + + vma = i915_vma_instance(obj, &ppgtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_put; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto out_close; + + err = igt_check_page_sizes(vma); + + if (vma->page_sizes.sg != combination) { + pr_err("page_sizes.sg=%u, expected=%u\n", + vma->page_sizes.sg, combination); + err = -EINVAL; + } + + i915_vma_unpin(vma); + i915_vma_close(vma); + + i915_gem_object_put(obj); + + if (err) + goto out_device; + } + } + + goto out_device; + +out_close: + i915_vma_close(vma); +out_put: + i915_gem_object_put(obj); +out_device: + mkwrite_device_info(i915)->page_sizes = saved_mask; + + return err; +} + +static int igt_mock_ppgtt_misaligned_dma(void *arg) +{ + struct i915_hw_ppgtt *ppgtt = arg; + struct drm_i915_private *i915 = ppgtt->vm.i915; + unsigned long supported = INTEL_INFO(i915)->page_sizes; + struct drm_i915_gem_object *obj; + int bit; + int err; + + /* + * Sanity check dma misalignment for huge pages -- the dma addresses we + * insert into the paging structures need to always respect the page + * size alignment. + */ + + bit = ilog2(I915_GTT_PAGE_SIZE_64K); + + for_each_set_bit_from(bit, &supported, + ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { + IGT_TIMEOUT(end_time); + unsigned int page_size = BIT(bit); + unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; + unsigned int offset; + unsigned int size = + round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1; + struct i915_vma *vma; + + obj = fake_huge_pages_object(i915, size, true); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + if (obj->base.size != size) { + pr_err("obj->base.size=%zu, expected=%u\n", + obj->base.size, size); + err = -EINVAL; + goto out_put; + } + + err = i915_gem_object_pin_pages(obj); + if (err) + goto out_put; + + /* Force the page size for this object */ + obj->mm.page_sizes.sg = page_size; + + vma = i915_vma_instance(obj, &ppgtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_unpin; + } + + err = i915_vma_pin(vma, 0, 0, flags); + if (err) { + i915_vma_close(vma); + goto out_unpin; + } + + + err = igt_check_page_sizes(vma); + + if (vma->page_sizes.gtt != page_size) { + pr_err("page_sizes.gtt=%u, expected %u\n", + vma->page_sizes.gtt, page_size); + err = -EINVAL; + } + + i915_vma_unpin(vma); + + if (err) { + i915_vma_close(vma); + goto out_unpin; + } + + /* + * Try all the other valid offsets until the next + * boundary -- should always fall back to using 4K + * pages. + */ + for (offset = 4096; offset < page_size; offset += 4096) { + err = i915_vma_unbind(vma); + if (err) { + i915_vma_close(vma); + goto out_unpin; + } + + err = i915_vma_pin(vma, 0, 0, flags | offset); + if (err) { + i915_vma_close(vma); + goto out_unpin; + } + + err = igt_check_page_sizes(vma); + + if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) { + pr_err("page_sizes.gtt=%u, expected %llu\n", + vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K); + err = -EINVAL; + } + + i915_vma_unpin(vma); + + if (err) { + i915_vma_close(vma); + goto out_unpin; + } + + if (igt_timeout(end_time, + "%s timed out at offset %x with page-size %x\n", + __func__, offset, page_size)) + break; + } + + i915_vma_close(vma); + + i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + i915_gem_object_put(obj); + } + + return 0; + +out_unpin: + i915_gem_object_unpin_pages(obj); +out_put: + i915_gem_object_put(obj); + + return err; +} + +static void close_object_list(struct list_head *objects, + struct i915_hw_ppgtt *ppgtt) +{ + struct drm_i915_gem_object *obj, *on; + + list_for_each_entry_safe(obj, on, objects, st_link) { + struct i915_vma *vma; + + vma = i915_vma_instance(obj, &ppgtt->vm, NULL); + if (!IS_ERR(vma)) + i915_vma_close(vma); + + list_del(&obj->st_link); + i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + i915_gem_object_put(obj); + } +} + +static int igt_mock_ppgtt_huge_fill(void *arg) +{ + struct i915_hw_ppgtt *ppgtt = arg; + struct drm_i915_private *i915 = ppgtt->vm.i915; + unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT; + unsigned long page_num; + bool single = false; + LIST_HEAD(objects); + IGT_TIMEOUT(end_time); + int err = -ENODEV; + + for_each_prime_number_from(page_num, 1, max_pages) { + struct drm_i915_gem_object *obj; + u64 size = page_num << PAGE_SHIFT; + struct i915_vma *vma; + unsigned int expected_gtt = 0; + int i; + + obj = fake_huge_pages_object(i915, size, single); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + break; + } + + if (obj->base.size != size) { + pr_err("obj->base.size=%zd, expected=%llu\n", + obj->base.size, size); + i915_gem_object_put(obj); + err = -EINVAL; + break; + } + + err = i915_gem_object_pin_pages(obj); + if (err) { + i915_gem_object_put(obj); + break; + } + + list_add(&obj->st_link, &objects); + + vma = i915_vma_instance(obj, &ppgtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + break; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + break; + + err = igt_check_page_sizes(vma); + if (err) { + i915_vma_unpin(vma); + break; + } + + /* + * Figure out the expected gtt page size knowing that we go from + * largest to smallest page size sg chunks, and that we align to + * the largest page size. + */ + for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) { + unsigned int page_size = page_sizes[i]; + + if (HAS_PAGE_SIZES(i915, page_size) && + size >= page_size) { + expected_gtt |= page_size; + size &= page_size-1; + } + } + + GEM_BUG_ON(!expected_gtt); + GEM_BUG_ON(size); + + if (expected_gtt & I915_GTT_PAGE_SIZE_4K) + expected_gtt &= ~I915_GTT_PAGE_SIZE_64K; + + i915_vma_unpin(vma); + + if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) { + if (!IS_ALIGNED(vma->node.start, + I915_GTT_PAGE_SIZE_2M)) { + pr_err("node.start(%llx) not aligned to 2M\n", + vma->node.start); + err = -EINVAL; + break; + } + + if (!IS_ALIGNED(vma->node.size, + I915_GTT_PAGE_SIZE_2M)) { + pr_err("node.size(%llx) not aligned to 2M\n", + vma->node.size); + err = -EINVAL; + break; + } + } + + if (vma->page_sizes.gtt != expected_gtt) { + pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n", + vma->page_sizes.gtt, expected_gtt, + obj->base.size, yesno(!!single)); + err = -EINVAL; + break; + } + + if (igt_timeout(end_time, + "%s timed out at size %zd\n", + __func__, obj->base.size)) + break; + + single = !single; + } + + close_object_list(&objects, ppgtt); + + if (err == -ENOMEM || err == -ENOSPC) + err = 0; + + return err; +} + +static int igt_mock_ppgtt_64K(void *arg) +{ + struct i915_hw_ppgtt *ppgtt = arg; + struct drm_i915_private *i915 = ppgtt->vm.i915; + struct drm_i915_gem_object *obj; + const struct object_info { + unsigned int size; + unsigned int gtt; + unsigned int offset; + } objects[] = { + /* Cases with forced padding/alignment */ + { + .size = SZ_64K, + .gtt = I915_GTT_PAGE_SIZE_64K, + .offset = 0, + }, + { + .size = SZ_64K + SZ_4K, + .gtt = I915_GTT_PAGE_SIZE_4K, + .offset = 0, + }, + { + .size = SZ_64K - SZ_4K, + .gtt = I915_GTT_PAGE_SIZE_4K, + .offset = 0, + }, + { + .size = SZ_2M, + .gtt = I915_GTT_PAGE_SIZE_64K, + .offset = 0, + }, + { + .size = SZ_2M - SZ_4K, + .gtt = I915_GTT_PAGE_SIZE_4K, + .offset = 0, + }, + { + .size = SZ_2M + SZ_4K, + .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K, + .offset = 0, + }, + { + .size = SZ_2M + SZ_64K, + .gtt = I915_GTT_PAGE_SIZE_64K, + .offset = 0, + }, + { + .size = SZ_2M - SZ_64K, + .gtt = I915_GTT_PAGE_SIZE_64K, + .offset = 0, + }, + /* Try without any forced padding/alignment */ + { + .size = SZ_64K, + .offset = SZ_2M, + .gtt = I915_GTT_PAGE_SIZE_4K, + }, + { + .size = SZ_128K, + .offset = SZ_2M - SZ_64K, + .gtt = I915_GTT_PAGE_SIZE_4K, + }, + }; + struct i915_vma *vma; + int i, single; + int err; + + /* + * Sanity check some of the trickiness with 64K pages -- either we can + * safely mark the whole page-table(2M block) as 64K, or we have to + * always fallback to 4K. + */ + + if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K)) + return 0; + + for (i = 0; i < ARRAY_SIZE(objects); ++i) { + unsigned int size = objects[i].size; + unsigned int expected_gtt = objects[i].gtt; + unsigned int offset = objects[i].offset; + unsigned int flags = PIN_USER; + + for (single = 0; single <= 1; single++) { + obj = fake_huge_pages_object(i915, size, !!single); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = i915_gem_object_pin_pages(obj); + if (err) + goto out_object_put; + + /* + * Disable 2M pages -- We only want to use 64K/4K pages + * for this test. + */ + obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M; + + vma = i915_vma_instance(obj, &ppgtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_object_unpin; + } + + if (offset) + flags |= PIN_OFFSET_FIXED | offset; + + err = i915_vma_pin(vma, 0, 0, flags); + if (err) + goto out_vma_close; + + err = igt_check_page_sizes(vma); + if (err) + goto out_vma_unpin; + + if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) { + if (!IS_ALIGNED(vma->node.start, + I915_GTT_PAGE_SIZE_2M)) { + pr_err("node.start(%llx) not aligned to 2M\n", + vma->node.start); + err = -EINVAL; + goto out_vma_unpin; + } + + if (!IS_ALIGNED(vma->node.size, + I915_GTT_PAGE_SIZE_2M)) { + pr_err("node.size(%llx) not aligned to 2M\n", + vma->node.size); + err = -EINVAL; + goto out_vma_unpin; + } + } + + if (vma->page_sizes.gtt != expected_gtt) { + pr_err("gtt=%u, expected=%u, i=%d, single=%s\n", + vma->page_sizes.gtt, expected_gtt, i, + yesno(!!single)); + err = -EINVAL; + goto out_vma_unpin; + } + + i915_vma_unpin(vma); + i915_vma_close(vma); + + i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + i915_gem_object_put(obj); + } + } + + return 0; + +out_vma_unpin: + i915_vma_unpin(vma); +out_vma_close: + i915_vma_close(vma); +out_object_unpin: + i915_gem_object_unpin_pages(obj); +out_object_put: + i915_gem_object_put(obj); + + return err; +} + +static struct i915_vma * +gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) +{ + struct drm_i915_private *i915 = vma->vm->i915; + const int gen = INTEL_GEN(i915); + unsigned int count = vma->size >> PAGE_SHIFT; + struct drm_i915_gem_object *obj; + struct i915_vma *batch; + unsigned int size; + u32 *cmd; + int n; + int err; + + size = (1 + 4 * count) * sizeof(u32); + size = round_up(size, PAGE_SIZE); + obj = i915_gem_object_create_internal(i915, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(cmd)) { + err = PTR_ERR(cmd); + goto err; + } + + offset += vma->node.start; + + for (n = 0; n < count; n++) { + if (gen >= 8) { + *cmd++ = MI_STORE_DWORD_IMM_GEN4; + *cmd++ = lower_32_bits(offset); + *cmd++ = upper_32_bits(offset); + *cmd++ = val; + } else if (gen >= 4) { + *cmd++ = MI_STORE_DWORD_IMM_GEN4 | + (gen < 6 ? MI_USE_GGTT : 0); + *cmd++ = 0; + *cmd++ = offset; + *cmd++ = val; + } else { + *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *cmd++ = offset; + *cmd++ = val; + } + + offset += PAGE_SIZE; + } + + *cmd = MI_BATCH_BUFFER_END; + i915_gem_chipset_flush(i915); + + i915_gem_object_unpin_map(obj); + + batch = i915_vma_instance(obj, vma->vm, NULL); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto err; + } + + err = i915_vma_pin(batch, 0, 0, PIN_USER); + if (err) + goto err; + + return batch; + +err: + i915_gem_object_put(obj); + + return ERR_PTR(err); +} + +static int gpu_write(struct i915_vma *vma, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u32 dword, + u32 value) +{ + struct i915_request *rq; + struct i915_vma *batch; + int err; + + GEM_BUG_ON(!intel_engine_can_store_dword(engine)); + + err = i915_gem_object_set_to_gtt_domain(vma->obj, true); + if (err) + return err; + + batch = gpu_write_dw(vma, dword * sizeof(u32), value); + if (IS_ERR(batch)) + return PTR_ERR(batch); + + rq = igt_request_alloc(ctx, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_batch; + } + + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto err_request; + + i915_gem_object_set_active_reference(batch->obj); + + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto err_request; + + err = engine->emit_bb_start(rq, + batch->node.start, batch->node.size, + 0); +err_request: + if (err) + i915_request_skip(rq, err); + i915_request_add(rq); +err_batch: + i915_vma_unpin(batch); + i915_vma_close(batch); + + return err; +} + +static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) +{ + unsigned int needs_flush; + unsigned long n; + int err; + + err = i915_gem_object_prepare_read(obj, &needs_flush); + if (err) + return err; + + for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { + u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n)); + + if (needs_flush & CLFLUSH_BEFORE) + drm_clflush_virt_range(ptr, PAGE_SIZE); + + if (ptr[dword] != val) { + pr_err("n=%lu ptr[%u]=%u, val=%u\n", + n, dword, ptr[dword], val); + kunmap_atomic(ptr); + err = -EINVAL; + break; + } + + kunmap_atomic(ptr); + } + + i915_gem_object_finish_access(obj); + + return err; +} + +static int __igt_write_huge(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct drm_i915_gem_object *obj, + u64 size, u64 offset, + u32 dword, u32 val) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_address_space *vm = + ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; + unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; + struct i915_vma *vma; + int err; + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + err = i915_vma_unbind(vma); + if (err) + goto out_vma_close; + + err = i915_vma_pin(vma, size, 0, flags | offset); + if (err) { + /* + * The ggtt may have some pages reserved so + * refrain from erroring out. + */ + if (err == -ENOSPC && i915_is_ggtt(vm)) + err = 0; + + goto out_vma_close; + } + + err = igt_check_page_sizes(vma); + if (err) + goto out_vma_unpin; + + err = gpu_write(vma, ctx, engine, dword, val); + if (err) { + pr_err("gpu-write failed at offset=%llx\n", offset); + goto out_vma_unpin; + } + + err = cpu_check(obj, dword, val); + if (err) { + pr_err("cpu-check failed at offset=%llx\n", offset); + goto out_vma_unpin; + } + +out_vma_unpin: + i915_vma_unpin(vma); +out_vma_close: + i915_vma_destroy(vma); + + return err; +} + +static int igt_write_huge(struct i915_gem_context *ctx, + struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_address_space *vm = + ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; + static struct intel_engine_cs *engines[I915_NUM_ENGINES]; + struct intel_engine_cs *engine; + I915_RND_STATE(prng); + IGT_TIMEOUT(end_time); + unsigned int max_page_size; + unsigned int id; + u64 max; + u64 num; + u64 size; + int *order; + int i, n; + int err = 0; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + + size = obj->base.size; + if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) + size = round_up(size, I915_GTT_PAGE_SIZE_2M); + + max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg); + max = div_u64((vm->total - size), max_page_size); + + n = 0; + for_each_engine(engine, i915, id) { + if (!intel_engine_can_store_dword(engine)) { + pr_info("store-dword-imm not supported on engine=%u\n", + id); + continue; + } + engines[n++] = engine; + } + + if (!n) + return 0; + + /* + * To keep things interesting when alternating between engines in our + * randomized order, lets also make feeding to the same engine a few + * times in succession a possibility by enlarging the permutation array. + */ + order = i915_random_order(n * I915_NUM_ENGINES, &prng); + if (!order) + return -ENOMEM; + + /* + * Try various offsets in an ascending/descending fashion until we + * timeout -- we want to avoid issues hidden by effectively always using + * offset = 0. + */ + i = 0; + for_each_prime_number_from(num, 0, max) { + u64 offset_low = num * max_page_size; + u64 offset_high = (max - num) * max_page_size; + u32 dword = offset_in_page(num) / 4; + + engine = engines[order[i] % n]; + i = (i + 1) % (n * I915_NUM_ENGINES); + + /* + * In order to utilize 64K pages we need to both pad the vma + * size and ensure the vma offset is at the start of the pt + * boundary, however to improve coverage we opt for testing both + * aligned and unaligned offsets. + */ + if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) + offset_low = round_down(offset_low, + I915_GTT_PAGE_SIZE_2M); + + err = __igt_write_huge(ctx, engine, obj, size, offset_low, + dword, num + 1); + if (err) + break; + + err = __igt_write_huge(ctx, engine, obj, size, offset_high, + dword, num + 1); + if (err) + break; + + if (igt_timeout(end_time, + "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n", + __func__, engine->id, offset_low, offset_high, + max_page_size)) + break; + } + + kfree(order); + + return err; +} + +static int igt_ppgtt_exhaust_huge(void *arg) +{ + struct i915_gem_context *ctx = arg; + struct drm_i915_private *i915 = ctx->i915; + unsigned long supported = INTEL_INFO(i915)->page_sizes; + static unsigned int pages[ARRAY_SIZE(page_sizes)]; + struct drm_i915_gem_object *obj; + unsigned int size_mask; + unsigned int page_mask; + int n, i; + int err = -ENODEV; + + if (supported == I915_GTT_PAGE_SIZE_4K) + return 0; + + /* + * Sanity check creating objects with a varying mix of page sizes -- + * ensuring that our writes lands in the right place. + */ + + n = 0; + for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) + pages[n++] = BIT(i); + + for (size_mask = 2; size_mask < BIT(n); size_mask++) { + unsigned int size = 0; + + for (i = 0; i < n; i++) { + if (size_mask & BIT(i)) + size |= pages[i]; + } + + /* + * For our page mask we want to enumerate all the page-size + * combinations which will fit into our chosen object size. + */ + for (page_mask = 2; page_mask <= size_mask; page_mask++) { + unsigned int page_sizes = 0; + + for (i = 0; i < n; i++) { + if (page_mask & BIT(i)) + page_sizes |= pages[i]; + } + + /* + * Ensure that we can actually fill the given object + * with our chosen page mask. + */ + if (!IS_ALIGNED(size, BIT(__ffs(page_sizes)))) + continue; + + obj = huge_pages_object(i915, size, page_sizes); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_device; + } + + err = i915_gem_object_pin_pages(obj); + if (err) { + i915_gem_object_put(obj); + + if (err == -ENOMEM) { + pr_info("unable to get pages, size=%u, pages=%u\n", + size, page_sizes); + err = 0; + break; + } + + pr_err("pin_pages failed, size=%u, pages=%u\n", + size_mask, page_mask); + + goto out_device; + } + + /* Force the page-size for the gtt insertion */ + obj->mm.page_sizes.sg = page_sizes; + + err = igt_write_huge(ctx, obj); + if (err) { + pr_err("exhaust write-huge failed with size=%u\n", + size); + goto out_unpin; + } + + i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + i915_gem_object_put(obj); + } + } + + goto out_device; + +out_unpin: + i915_gem_object_unpin_pages(obj); + i915_gem_object_put(obj); +out_device: + mkwrite_device_info(i915)->page_sizes = supported; + + return err; +} + +static int igt_ppgtt_internal_huge(void *arg) +{ + struct i915_gem_context *ctx = arg; + struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_gem_object *obj; + static const unsigned int sizes[] = { + SZ_64K, + SZ_128K, + SZ_256K, + SZ_512K, + SZ_1M, + SZ_2M, + }; + int i; + int err; + + /* + * Sanity check that the HW uses huge pages correctly through internal + * -- ensure that our writes land in the right place. + */ + + for (i = 0; i < ARRAY_SIZE(sizes); ++i) { + unsigned int size = sizes[i]; + + obj = i915_gem_object_create_internal(i915, size); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = i915_gem_object_pin_pages(obj); + if (err) + goto out_put; + + if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) { + pr_info("internal unable to allocate huge-page(s) with size=%u\n", + size); + goto out_unpin; + } + + err = igt_write_huge(ctx, obj); + if (err) { + pr_err("internal write-huge failed with size=%u\n", + size); + goto out_unpin; + } + + i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + i915_gem_object_put(obj); + } + + return 0; + +out_unpin: + i915_gem_object_unpin_pages(obj); +out_put: + i915_gem_object_put(obj); + + return err; +} + +static inline bool igt_can_allocate_thp(struct drm_i915_private *i915) +{ + return i915->mm.gemfs && has_transparent_hugepage(); +} + +static int igt_ppgtt_gemfs_huge(void *arg) +{ + struct i915_gem_context *ctx = arg; + struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_gem_object *obj; + static const unsigned int sizes[] = { + SZ_2M, + SZ_4M, + SZ_8M, + SZ_16M, + SZ_32M, + }; + int i; + int err; + + /* + * Sanity check that the HW uses huge pages correctly through gemfs -- + * ensure that our writes land in the right place. + */ + + if (!igt_can_allocate_thp(i915)) { + pr_info("missing THP support, skipping\n"); + return 0; + } + + for (i = 0; i < ARRAY_SIZE(sizes); ++i) { + unsigned int size = sizes[i]; + + obj = i915_gem_object_create_shmem(i915, size); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = i915_gem_object_pin_pages(obj); + if (err) + goto out_put; + + if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) { + pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n", + size); + goto out_unpin; + } + + err = igt_write_huge(ctx, obj); + if (err) { + pr_err("gemfs write-huge failed with size=%u\n", + size); + goto out_unpin; + } + + i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + i915_gem_object_put(obj); + } + + return 0; + +out_unpin: + i915_gem_object_unpin_pages(obj); +out_put: + i915_gem_object_put(obj); + + return err; +} + +static int igt_ppgtt_pin_update(void *arg) +{ + struct i915_gem_context *ctx = arg; + struct drm_i915_private *dev_priv = ctx->i915; + unsigned long supported = INTEL_INFO(dev_priv)->page_sizes; + struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; + int first, last; + int err; + + /* + * Make sure there's no funny business when doing a PIN_UPDATE -- in the + * past we had a subtle issue with being able to incorrectly do multiple + * alloc va ranges on the same object when doing a PIN_UPDATE, which + * resulted in some pretty nasty bugs, though only when using + * huge-gtt-pages. + */ + + if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) { + pr_info("48b PPGTT not supported, skipping\n"); + return 0; + } + + first = ilog2(I915_GTT_PAGE_SIZE_64K); + last = ilog2(I915_GTT_PAGE_SIZE_2M); + + for_each_set_bit_from(first, &supported, last + 1) { + unsigned int page_size = BIT(first); + + obj = i915_gem_object_create_internal(dev_priv, page_size); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vma = i915_vma_instance(obj, &ppgtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_put; + } + + err = i915_vma_pin(vma, SZ_2M, 0, flags); + if (err) + goto out_close; + + if (vma->page_sizes.sg < page_size) { + pr_info("Unable to allocate page-size %x, finishing test early\n", + page_size); + goto out_unpin; + } + + err = igt_check_page_sizes(vma); + if (err) + goto out_unpin; + + if (vma->page_sizes.gtt != page_size) { + dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0); + + /* + * The only valid reason for this to ever fail would be + * if the dma-mapper screwed us over when we did the + * dma_map_sg(), since it has the final say over the dma + * address. + */ + if (IS_ALIGNED(addr, page_size)) { + pr_err("page_sizes.gtt=%u, expected=%u\n", + vma->page_sizes.gtt, page_size); + err = -EINVAL; + } else { + pr_info("dma address misaligned, finishing test early\n"); + } + + goto out_unpin; + } + + err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE); + if (err) + goto out_unpin; + + i915_vma_unpin(vma); + i915_vma_close(vma); + + i915_gem_object_put(obj); + } + + obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vma = i915_vma_instance(obj, &ppgtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_put; + } + + err = i915_vma_pin(vma, 0, 0, flags); + if (err) + goto out_close; + + /* + * Make sure we don't end up with something like where the pde is still + * pointing to the 2M page, and the pt we just filled-in is dangling -- + * we can check this by writing to the first page where it would then + * land in the now stale 2M page. + */ + + err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf); + if (err) + goto out_unpin; + + err = cpu_check(obj, 0, 0xdeadbeaf); + +out_unpin: + i915_vma_unpin(vma); +out_close: + i915_vma_close(vma); +out_put: + i915_gem_object_put(obj); + + return err; +} + +static int igt_tmpfs_fallback(void *arg) +{ + struct i915_gem_context *ctx = arg; + struct drm_i915_private *i915 = ctx->i915; + struct vfsmount *gemfs = i915->mm.gemfs; + struct i915_address_space *vm = + ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u32 *vaddr; + int err = 0; + + /* + * Make sure that we don't burst into a ball of flames upon falling back + * to tmpfs, which we rely on if on the off-chance we encouter a failure + * when setting up gemfs. + */ + + i915->mm.gemfs = NULL; + + obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_restore; + } + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto out_put; + } + *vaddr = 0xdeadbeaf; + + __i915_gem_object_flush_map(obj, 0, 64); + i915_gem_object_unpin_map(obj); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_put; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto out_close; + + err = igt_check_page_sizes(vma); + + i915_vma_unpin(vma); +out_close: + i915_vma_close(vma); +out_put: + i915_gem_object_put(obj); +out_restore: + i915->mm.gemfs = gemfs; + + return err; +} + +static int igt_shrink_thp(void *arg) +{ + struct i915_gem_context *ctx = arg; + struct drm_i915_private *i915 = ctx->i915; + struct i915_address_space *vm = + ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + unsigned int flags = PIN_USER; + int err; + + /* + * Sanity check shrinking huge-paged object -- make sure nothing blows + * up. + */ + + if (!igt_can_allocate_thp(i915)) { + pr_info("missing THP support, skipping\n"); + return 0; + } + + obj = i915_gem_object_create_shmem(i915, SZ_2M); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_put; + } + + err = i915_vma_pin(vma, 0, 0, flags); + if (err) + goto out_close; + + if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) { + pr_info("failed to allocate THP, finishing test early\n"); + goto out_unpin; + } + + err = igt_check_page_sizes(vma); + if (err) + goto out_unpin; + + err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf); + if (err) + goto out_unpin; + + i915_vma_unpin(vma); + + /* + * Now that the pages are *unpinned* shrink-all should invoke + * shmem to truncate our pages. + */ + i915_gem_shrink_all(i915); + if (i915_gem_object_has_pages(obj)) { + pr_err("shrink-all didn't truncate the pages\n"); + err = -EINVAL; + goto out_close; + } + + if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) { + pr_err("residual page-size bits left\n"); + err = -EINVAL; + goto out_close; + } + + err = i915_vma_pin(vma, 0, 0, flags); + if (err) + goto out_close; + + err = cpu_check(obj, 0, 0xdeadbeaf); + +out_unpin: + i915_vma_unpin(vma); +out_close: + i915_vma_close(vma); +out_put: + i915_gem_object_put(obj); + + return err; +} + +int i915_gem_huge_page_mock_selftests(void) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_mock_exhaust_device_supported_pages), + SUBTEST(igt_mock_ppgtt_misaligned_dma), + SUBTEST(igt_mock_ppgtt_huge_fill), + SUBTEST(igt_mock_ppgtt_64K), + }; + struct drm_i915_private *dev_priv; + struct i915_hw_ppgtt *ppgtt; + int err; + + dev_priv = mock_gem_device(); + if (!dev_priv) + return -ENOMEM; + + /* Pretend to be a device which supports the 48b PPGTT */ + mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; + mkwrite_device_info(dev_priv)->ppgtt_size = 48; + + mutex_lock(&dev_priv->drm.struct_mutex); + ppgtt = i915_ppgtt_create(dev_priv); + if (IS_ERR(ppgtt)) { + err = PTR_ERR(ppgtt); + goto out_unlock; + } + + if (!i915_vm_is_4lvl(&ppgtt->vm)) { + pr_err("failed to create 48b PPGTT\n"); + err = -EINVAL; + goto out_close; + } + + /* If we were ever hit this then it's time to mock the 64K scratch */ + if (!i915_vm_has_scratch_64K(&ppgtt->vm)) { + pr_err("PPGTT missing 64K scratch page\n"); + err = -EINVAL; + goto out_close; + } + + err = i915_subtests(tests, ppgtt); + +out_close: + i915_ppgtt_put(ppgtt); + +out_unlock: + mutex_unlock(&dev_priv->drm.struct_mutex); + drm_dev_put(&dev_priv->drm); + + return err; +} + +int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_shrink_thp), + SUBTEST(igt_ppgtt_pin_update), + SUBTEST(igt_tmpfs_fallback), + SUBTEST(igt_ppgtt_exhaust_huge), + SUBTEST(igt_ppgtt_gemfs_huge), + SUBTEST(igt_ppgtt_internal_huge), + }; + struct drm_file *file; + struct i915_gem_context *ctx; + intel_wakeref_t wakeref; + int err; + + if (!HAS_PPGTT(dev_priv)) { + pr_info("PPGTT not supported, skipping live-selftests\n"); + return 0; + } + + if (i915_terminally_wedged(dev_priv)) + return 0; + + file = mock_file(dev_priv); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&dev_priv->drm.struct_mutex); + wakeref = intel_runtime_pm_get(dev_priv); + + ctx = live_context(dev_priv, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_unlock; + } + + if (ctx->ppgtt) + ctx->ppgtt->vm.scrub_64K = true; + + err = i915_subtests(tests, ctx); + +out_unlock: + intel_runtime_pm_put(dev_priv, wakeref); + mutex_unlock(&dev_priv->drm.struct_mutex); + + mock_file_free(dev_priv, file); + + return err; +} diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c new file mode 100644 index 000000000000..5495875b48b3 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -0,0 +1,379 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2017 Intel Corporation + */ + +#include + +#include "i915_selftest.h" +#include "selftests/i915_random.h" + +static int cpu_set(struct drm_i915_gem_object *obj, + unsigned long offset, + u32 v) +{ + unsigned int needs_clflush; + struct page *page; + void *map; + u32 *cpu; + int err; + + err = i915_gem_object_prepare_write(obj, &needs_clflush); + if (err) + return err; + + page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); + map = kmap_atomic(page); + cpu = map + offset_in_page(offset); + + if (needs_clflush & CLFLUSH_BEFORE) + drm_clflush_virt_range(cpu, sizeof(*cpu)); + + *cpu = v; + + if (needs_clflush & CLFLUSH_AFTER) + drm_clflush_virt_range(cpu, sizeof(*cpu)); + + kunmap_atomic(map); + i915_gem_object_finish_access(obj); + + return 0; +} + +static int cpu_get(struct drm_i915_gem_object *obj, + unsigned long offset, + u32 *v) +{ + unsigned int needs_clflush; + struct page *page; + void *map; + u32 *cpu; + int err; + + err = i915_gem_object_prepare_read(obj, &needs_clflush); + if (err) + return err; + + page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); + map = kmap_atomic(page); + cpu = map + offset_in_page(offset); + + if (needs_clflush & CLFLUSH_BEFORE) + drm_clflush_virt_range(cpu, sizeof(*cpu)); + + *v = *cpu; + + kunmap_atomic(map); + i915_gem_object_finish_access(obj); + + return 0; +} + +static int gtt_set(struct drm_i915_gem_object *obj, + unsigned long offset, + u32 v) +{ + struct i915_vma *vma; + u32 __iomem *map; + int err; + + err = i915_gem_object_set_to_gtt_domain(obj, true); + if (err) + return err; + + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + map = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + if (IS_ERR(map)) + return PTR_ERR(map); + + iowrite32(v, &map[offset / sizeof(*map)]); + i915_vma_unpin_iomap(vma); + + return 0; +} + +static int gtt_get(struct drm_i915_gem_object *obj, + unsigned long offset, + u32 *v) +{ + struct i915_vma *vma; + u32 __iomem *map; + int err; + + err = i915_gem_object_set_to_gtt_domain(obj, false); + if (err) + return err; + + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + map = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + if (IS_ERR(map)) + return PTR_ERR(map); + + *v = ioread32(&map[offset / sizeof(*map)]); + i915_vma_unpin_iomap(vma); + + return 0; +} + +static int wc_set(struct drm_i915_gem_object *obj, + unsigned long offset, + u32 v) +{ + u32 *map; + int err; + + err = i915_gem_object_set_to_wc_domain(obj, true); + if (err) + return err; + + map = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(map)) + return PTR_ERR(map); + + map[offset / sizeof(*map)] = v; + i915_gem_object_unpin_map(obj); + + return 0; +} + +static int wc_get(struct drm_i915_gem_object *obj, + unsigned long offset, + u32 *v) +{ + u32 *map; + int err; + + err = i915_gem_object_set_to_wc_domain(obj, false); + if (err) + return err; + + map = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(map)) + return PTR_ERR(map); + + *v = map[offset / sizeof(*map)]; + i915_gem_object_unpin_map(obj); + + return 0; +} + +static int gpu_set(struct drm_i915_gem_object *obj, + unsigned long offset, + u32 v) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_request *rq; + struct i915_vma *vma; + u32 *cs; + int err; + + err = i915_gem_object_set_to_gtt_domain(obj, true); + if (err) + return err; + + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + rq = i915_request_create(i915->engine[RCS0]->kernel_context); + if (IS_ERR(rq)) { + i915_vma_unpin(vma); + return PTR_ERR(rq); + } + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) { + i915_request_add(rq); + i915_vma_unpin(vma); + return PTR_ERR(cs); + } + + if (INTEL_GEN(i915) >= 8) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22; + *cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset); + *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset); + *cs++ = v; + } else if (INTEL_GEN(i915) >= 4) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = 0; + *cs++ = i915_ggtt_offset(vma) + offset; + *cs++ = v; + } else { + *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *cs++ = i915_ggtt_offset(vma) + offset; + *cs++ = v; + *cs++ = MI_NOOP; + } + intel_ring_advance(rq, cs); + + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unpin(vma); + + i915_request_add(rq); + + return err; +} + +static bool always_valid(struct drm_i915_private *i915) +{ + return true; +} + +static bool needs_fence_registers(struct drm_i915_private *i915) +{ + return !i915_terminally_wedged(i915); +} + +static bool needs_mi_store_dword(struct drm_i915_private *i915) +{ + if (i915_terminally_wedged(i915)) + return false; + + return intel_engine_can_store_dword(i915->engine[RCS0]); +} + +static const struct igt_coherency_mode { + const char *name; + int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v); + int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v); + bool (*valid)(struct drm_i915_private *i915); +} igt_coherency_mode[] = { + { "cpu", cpu_set, cpu_get, always_valid }, + { "gtt", gtt_set, gtt_get, needs_fence_registers }, + { "wc", wc_set, wc_get, always_valid }, + { "gpu", gpu_set, NULL, needs_mi_store_dword }, + { }, +}; + +static int igt_gem_coherency(void *arg) +{ + const unsigned int ncachelines = PAGE_SIZE/64; + I915_RND_STATE(prng); + struct drm_i915_private *i915 = arg; + const struct igt_coherency_mode *read, *write, *over; + struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; + unsigned long count, n; + u32 *offsets, *values; + int err = 0; + + /* We repeatedly write, overwrite and read from a sequence of + * cachelines in order to try and detect incoherency (unflushed writes + * from either the CPU or GPU). Each setter/getter uses our cache + * domain API which should prevent incoherency. + */ + + offsets = kmalloc_array(ncachelines, 2*sizeof(u32), GFP_KERNEL); + if (!offsets) + return -ENOMEM; + for (count = 0; count < ncachelines; count++) + offsets[count] = count * 64 + 4 * (count % 16); + + values = offsets + ncachelines; + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + for (over = igt_coherency_mode; over->name; over++) { + if (!over->set) + continue; + + if (!over->valid(i915)) + continue; + + for (write = igt_coherency_mode; write->name; write++) { + if (!write->set) + continue; + + if (!write->valid(i915)) + continue; + + for (read = igt_coherency_mode; read->name; read++) { + if (!read->get) + continue; + + if (!read->valid(i915)) + continue; + + for_each_prime_number_from(count, 1, ncachelines) { + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto unlock; + } + + i915_random_reorder(offsets, ncachelines, &prng); + for (n = 0; n < count; n++) + values[n] = prandom_u32_state(&prng); + + for (n = 0; n < count; n++) { + err = over->set(obj, offsets[n], ~values[n]); + if (err) { + pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n", + n, count, over->name, err); + goto put_object; + } + } + + for (n = 0; n < count; n++) { + err = write->set(obj, offsets[n], values[n]); + if (err) { + pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n", + n, count, write->name, err); + goto put_object; + } + } + + for (n = 0; n < count; n++) { + u32 found; + + err = read->get(obj, offsets[n], &found); + if (err) { + pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n", + n, count, read->name, err); + goto put_object; + } + + if (found != values[n]) { + pr_err("Value[%ld/%ld] mismatch, (overwrite with %s) wrote [%s] %x read [%s] %x (inverse %x), at offset %x\n", + n, count, over->name, + write->name, values[n], + read->name, found, + ~values[n], offsets[n]); + err = -EINVAL; + goto put_object; + } + } + + __i915_gem_object_release_unless_active(obj); + } + } + } + } +unlock: + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + kfree(offsets); + return err; + +put_object: + __i915_gem_object_release_unless_active(obj); + goto unlock; +} + +int i915_gem_coherency_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_gem_coherency), + }; + + return i915_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c new file mode 100644 index 000000000000..653ae08a277f --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -0,0 +1,1736 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2017 Intel Corporation + */ + +#include + +#include "gem/i915_gem_pm.h" +#include "gt/intel_reset.h" +#include "i915_selftest.h" + +#include "gem/selftests/igt_gem_utils.h" +#include "selftests/i915_random.h" +#include "selftests/igt_flush_test.h" +#include "selftests/igt_live_test.h" +#include "selftests/igt_reset.h" +#include "selftests/igt_spinner.h" +#include "selftests/mock_drm.h" +#include "selftests/mock_gem_device.h" + +#include "huge_gem_object.h" +#include "igt_gem_utils.h" + +#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32)) + +static int live_nop_switch(void *arg) +{ + const unsigned int nctx = 1024; + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_gem_context **ctx; + enum intel_engine_id id; + intel_wakeref_t wakeref; + struct igt_live_test t; + struct drm_file *file; + unsigned long n; + int err = -ENODEV; + + /* + * Create as many contexts as we can feasibly get away with + * and check we can switch between them rapidly. + * + * Serves as very simple stress test for submission and HW switching + * between contexts. + */ + + if (!DRIVER_CAPS(i915)->has_logical_contexts) + return 0; + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + err = -ENOMEM; + goto out_unlock; + } + + for (n = 0; n < nctx; n++) { + ctx[n] = live_context(i915, file); + if (IS_ERR(ctx[n])) { + err = PTR_ERR(ctx[n]); + goto out_unlock; + } + } + + for_each_engine(engine, i915, id) { + struct i915_request *rq; + unsigned long end_time, prime; + ktime_t times[2] = {}; + + times[0] = ktime_get_raw(); + for (n = 0; n < nctx; n++) { + rq = igt_request_alloc(ctx[n], engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_unlock; + } + i915_request_add(rq); + } + if (i915_request_wait(rq, + I915_WAIT_LOCKED, + HZ / 5) < 0) { + pr_err("Failed to populated %d contexts\n", nctx); + i915_gem_set_wedged(i915); + err = -EIO; + goto out_unlock; + } + + times[1] = ktime_get_raw(); + + pr_info("Populated %d contexts on %s in %lluns\n", + nctx, engine->name, ktime_to_ns(times[1] - times[0])); + + err = igt_live_test_begin(&t, i915, __func__, engine->name); + if (err) + goto out_unlock; + + end_time = jiffies + i915_selftest.timeout_jiffies; + for_each_prime_number_from(prime, 2, 8192) { + times[1] = ktime_get_raw(); + + for (n = 0; n < prime; n++) { + rq = igt_request_alloc(ctx[n % nctx], engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_unlock; + } + + /* + * This space is left intentionally blank. + * + * We do not actually want to perform any + * action with this request, we just want + * to measure the latency in allocation + * and submission of our breadcrumbs - + * ensuring that the bare request is sufficient + * for the system to work (i.e. proper HEAD + * tracking of the rings, interrupt handling, + * etc). It also gives us the lowest bounds + * for latency. + */ + + i915_request_add(rq); + } + if (i915_request_wait(rq, + I915_WAIT_LOCKED, + HZ / 5) < 0) { + pr_err("Switching between %ld contexts timed out\n", + prime); + i915_gem_set_wedged(i915); + break; + } + + times[1] = ktime_sub(ktime_get_raw(), times[1]); + if (prime == 2) + times[0] = times[1]; + + if (__igt_timeout(end_time, NULL)) + break; + } + + err = igt_live_test_end(&t); + if (err) + goto out_unlock; + + pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n", + engine->name, + ktime_to_ns(times[0]), + prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1)); + } + +out_unlock: + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + mock_file_free(i915, file); + return err; +} + +static struct i915_vma * +gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) +{ + struct drm_i915_gem_object *obj; + const int gen = INTEL_GEN(vma->vm->i915); + unsigned long n, size; + u32 *cmd; + int err; + + size = (4 * count + 1) * sizeof(u32); + size = round_up(size, PAGE_SIZE); + obj = i915_gem_object_create_internal(vma->vm->i915, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(cmd)) { + err = PTR_ERR(cmd); + goto err; + } + + GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size); + offset += vma->node.start; + + for (n = 0; n < count; n++) { + if (gen >= 8) { + *cmd++ = MI_STORE_DWORD_IMM_GEN4; + *cmd++ = lower_32_bits(offset); + *cmd++ = upper_32_bits(offset); + *cmd++ = value; + } else if (gen >= 4) { + *cmd++ = MI_STORE_DWORD_IMM_GEN4 | + (gen < 6 ? MI_USE_GGTT : 0); + *cmd++ = 0; + *cmd++ = offset; + *cmd++ = value; + } else { + *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *cmd++ = offset; + *cmd++ = value; + } + offset += PAGE_SIZE; + } + *cmd = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + + err = i915_gem_object_set_to_gtt_domain(obj, false); + if (err) + goto err; + + vma = i915_vma_instance(obj, vma->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto err; + + return vma; + +err: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +static unsigned long real_page_count(struct drm_i915_gem_object *obj) +{ + return huge_gem_object_phys_size(obj) >> PAGE_SHIFT; +} + +static unsigned long fake_page_count(struct drm_i915_gem_object *obj) +{ + return huge_gem_object_dma_size(obj) >> PAGE_SHIFT; +} + +static int gpu_fill(struct drm_i915_gem_object *obj, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + unsigned int dw) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_address_space *vm = + ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; + struct i915_request *rq; + struct i915_vma *vma; + struct i915_vma *batch; + unsigned int flags; + int err; + + GEM_BUG_ON(obj->base.size > vm->total); + GEM_BUG_ON(!intel_engine_can_store_dword(engine)); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + err = i915_gem_object_set_to_gtt_domain(obj, false); + if (err) + return err; + + err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER); + if (err) + return err; + + /* Within the GTT the huge objects maps every page onto + * its 1024 real pages (using phys_pfn = dma_pfn % 1024). + * We set the nth dword within the page using the nth + * mapping via the GTT - this should exercise the GTT mapping + * whilst checking that each context provides a unique view + * into the object. + */ + batch = gpu_fill_dw(vma, + (dw * real_page_count(obj)) << PAGE_SHIFT | + (dw * sizeof(u32)), + real_page_count(obj), + dw); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto err_vma; + } + + rq = igt_request_alloc(ctx, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_batch; + } + + flags = 0; + if (INTEL_GEN(vm->i915) <= 5) + flags |= I915_DISPATCH_SECURE; + + err = engine->emit_bb_start(rq, + batch->node.start, batch->node.size, + flags); + if (err) + goto err_request; + + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto skip_request; + + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto skip_request; + + i915_gem_object_set_active_reference(batch->obj); + i915_vma_unpin(batch); + i915_vma_close(batch); + + i915_vma_unpin(vma); + + i915_request_add(rq); + + return 0; + +skip_request: + i915_request_skip(rq, err); +err_request: + i915_request_add(rq); +err_batch: + i915_vma_unpin(batch); + i915_vma_put(batch); +err_vma: + i915_vma_unpin(vma); + return err; +} + +static int cpu_fill(struct drm_i915_gem_object *obj, u32 value) +{ + const bool has_llc = HAS_LLC(to_i915(obj->base.dev)); + unsigned int n, m, need_flush; + int err; + + err = i915_gem_object_prepare_write(obj, &need_flush); + if (err) + return err; + + for (n = 0; n < real_page_count(obj); n++) { + u32 *map; + + map = kmap_atomic(i915_gem_object_get_page(obj, n)); + for (m = 0; m < DW_PER_PAGE; m++) + map[m] = value; + if (!has_llc) + drm_clflush_virt_range(map, PAGE_SIZE); + kunmap_atomic(map); + } + + i915_gem_object_finish_access(obj); + obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU; + obj->write_domain = 0; + return 0; +} + +static noinline int cpu_check(struct drm_i915_gem_object *obj, + unsigned int idx, unsigned int max) +{ + unsigned int n, m, needs_flush; + int err; + + err = i915_gem_object_prepare_read(obj, &needs_flush); + if (err) + return err; + + for (n = 0; n < real_page_count(obj); n++) { + u32 *map; + + map = kmap_atomic(i915_gem_object_get_page(obj, n)); + if (needs_flush & CLFLUSH_BEFORE) + drm_clflush_virt_range(map, PAGE_SIZE); + + for (m = 0; m < max; m++) { + if (map[m] != m) { + pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n", + __builtin_return_address(0), idx, + n, real_page_count(obj), m, max, + map[m], m); + err = -EINVAL; + goto out_unmap; + } + } + + for (; m < DW_PER_PAGE; m++) { + if (map[m] != STACK_MAGIC) { + pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n", + __builtin_return_address(0), idx, n, m, + map[m], STACK_MAGIC); + err = -EINVAL; + goto out_unmap; + } + } + +out_unmap: + kunmap_atomic(map); + if (err) + break; + } + + i915_gem_object_finish_access(obj); + return err; +} + +static int file_add_object(struct drm_file *file, + struct drm_i915_gem_object *obj) +{ + int err; + + GEM_BUG_ON(obj->base.handle_count); + + /* tie the object to the drm_file for easy reaping */ + err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL); + if (err < 0) + return err; + + i915_gem_object_get(obj); + obj->base.handle_count++; + return 0; +} + +static struct drm_i915_gem_object * +create_test_object(struct i915_gem_context *ctx, + struct drm_file *file, + struct list_head *objects) +{ + struct drm_i915_gem_object *obj; + struct i915_address_space *vm = + ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm; + u64 size; + int err; + + size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE); + size = round_down(size, DW_PER_PAGE * PAGE_SIZE); + + obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size); + if (IS_ERR(obj)) + return obj; + + err = file_add_object(file, obj); + i915_gem_object_put(obj); + if (err) + return ERR_PTR(err); + + err = cpu_fill(obj, STACK_MAGIC); + if (err) { + pr_err("Failed to fill object with cpu, err=%d\n", + err); + return ERR_PTR(err); + } + + list_add_tail(&obj->st_link, objects); + return obj; +} + +static unsigned long max_dwords(struct drm_i915_gem_object *obj) +{ + unsigned long npages = fake_page_count(obj); + + GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE)); + return npages / DW_PER_PAGE; +} + +static int igt_ctx_exec(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = -ENODEV; + + /* + * Create a few different contexts (with different mm) and write + * through each ctx/mm using the GPU making sure those writes end + * up in the expected pages of our obj. + */ + + if (!DRIVER_CAPS(i915)->has_logical_contexts) + return 0; + + for_each_engine(engine, i915, id) { + struct drm_i915_gem_object *obj = NULL; + unsigned long ncontexts, ndwords, dw; + struct igt_live_test t; + struct drm_file *file; + IGT_TIMEOUT(end_time); + LIST_HEAD(objects); + + if (!intel_engine_can_store_dword(engine)) + continue; + + if (!engine->context_size) + continue; /* No logical context support in HW */ + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + + err = igt_live_test_begin(&t, i915, __func__, engine->name); + if (err) + goto out_unlock; + + ncontexts = 0; + ndwords = 0; + dw = 0; + while (!time_after(jiffies, end_time)) { + struct i915_gem_context *ctx; + intel_wakeref_t wakeref; + + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_unlock; + } + + if (!obj) { + obj = create_test_object(ctx, file, &objects); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_unlock; + } + } + + with_intel_runtime_pm(i915, wakeref) + err = gpu_fill(obj, ctx, engine, dw); + if (err) { + pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", + ndwords, dw, max_dwords(obj), + engine->name, ctx->hw_id, + yesno(!!ctx->ppgtt), err); + goto out_unlock; + } + + if (++dw == max_dwords(obj)) { + obj = NULL; + dw = 0; + } + + ndwords++; + ncontexts++; + } + + pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", + ncontexts, engine->name, ndwords); + + ncontexts = dw = 0; + list_for_each_entry(obj, &objects, st_link) { + unsigned int rem = + min_t(unsigned int, ndwords - dw, max_dwords(obj)); + + err = cpu_check(obj, ncontexts++, rem); + if (err) + break; + + dw += rem; + } + +out_unlock: + if (igt_live_test_end(&t)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); + + mock_file_free(i915, file); + if (err) + return err; + } + + return 0; +} + +static int igt_shared_ctx_exec(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gem_context *parent; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct igt_live_test t; + struct drm_file *file; + int err = 0; + + /* + * Create a few different contexts with the same mm and write + * through each ctx using the GPU making sure those writes end + * up in the expected pages of our obj. + */ + if (!DRIVER_CAPS(i915)->has_logical_contexts) + return 0; + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + + parent = live_context(i915, file); + if (IS_ERR(parent)) { + err = PTR_ERR(parent); + goto out_unlock; + } + + if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */ + err = 0; + goto out_unlock; + } + + err = igt_live_test_begin(&t, i915, __func__, ""); + if (err) + goto out_unlock; + + for_each_engine(engine, i915, id) { + unsigned long ncontexts, ndwords, dw; + struct drm_i915_gem_object *obj = NULL; + IGT_TIMEOUT(end_time); + LIST_HEAD(objects); + + if (!intel_engine_can_store_dword(engine)) + continue; + + dw = 0; + ndwords = 0; + ncontexts = 0; + while (!time_after(jiffies, end_time)) { + struct i915_gem_context *ctx; + intel_wakeref_t wakeref; + + ctx = kernel_context(i915); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_test; + } + + __assign_ppgtt(ctx, parent->ppgtt); + + if (!obj) { + obj = create_test_object(parent, file, &objects); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + kernel_context_close(ctx); + goto out_test; + } + } + + err = 0; + with_intel_runtime_pm(i915, wakeref) + err = gpu_fill(obj, ctx, engine, dw); + if (err) { + pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", + ndwords, dw, max_dwords(obj), + engine->name, ctx->hw_id, + yesno(!!ctx->ppgtt), err); + kernel_context_close(ctx); + goto out_test; + } + + if (++dw == max_dwords(obj)) { + obj = NULL; + dw = 0; + } + + ndwords++; + ncontexts++; + + kernel_context_close(ctx); + } + pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", + ncontexts, engine->name, ndwords); + + ncontexts = dw = 0; + list_for_each_entry(obj, &objects, st_link) { + unsigned int rem = + min_t(unsigned int, ndwords - dw, max_dwords(obj)); + + err = cpu_check(obj, ncontexts++, rem); + if (err) + goto out_test; + + dw += rem; + } + } +out_test: + if (igt_live_test_end(&t)) + err = -EIO; +out_unlock: + mutex_unlock(&i915->drm.struct_mutex); + + mock_file_free(i915, file); + return err; +} + +static struct i915_vma *rpcs_query_batch(struct i915_vma *vma) +{ + struct drm_i915_gem_object *obj; + u32 *cmd; + int err; + + if (INTEL_GEN(vma->vm->i915) < 8) + return ERR_PTR(-EINVAL); + + obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(cmd)) { + err = PTR_ERR(cmd); + goto err; + } + + *cmd++ = MI_STORE_REGISTER_MEM_GEN8; + *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE); + *cmd++ = lower_32_bits(vma->node.start); + *cmd++ = upper_32_bits(vma->node.start); + *cmd = MI_BATCH_BUFFER_END; + + __i915_gem_object_flush_map(obj, 0, 64); + i915_gem_object_unpin_map(obj); + + vma = i915_vma_instance(obj, vma->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto err; + + return vma; + +err: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +static int +emit_rpcs_query(struct drm_i915_gem_object *obj, + struct intel_context *ce, + struct i915_request **rq_out) +{ + struct i915_request *rq; + struct i915_vma *batch; + struct i915_vma *vma; + int err; + + GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); + + vma = i915_vma_instance(obj, &ce->gem_context->ppgtt->vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + err = i915_gem_object_set_to_gtt_domain(obj, false); + if (err) + return err; + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + return err; + + batch = rpcs_query_batch(vma); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto err_vma; + } + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_batch; + } + + err = rq->engine->emit_bb_start(rq, + batch->node.start, batch->node.size, + 0); + if (err) + goto err_request; + + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto skip_request; + + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto skip_request; + + i915_gem_object_set_active_reference(batch->obj); + i915_vma_unpin(batch); + i915_vma_close(batch); + + i915_vma_unpin(vma); + + *rq_out = i915_request_get(rq); + + i915_request_add(rq); + + return 0; + +skip_request: + i915_request_skip(rq, err); +err_request: + i915_request_add(rq); +err_batch: + i915_vma_unpin(batch); +err_vma: + i915_vma_unpin(vma); + + return err; +} + +#define TEST_IDLE BIT(0) +#define TEST_BUSY BIT(1) +#define TEST_RESET BIT(2) + +static int +__sseu_prepare(struct drm_i915_private *i915, + const char *name, + unsigned int flags, + struct intel_context *ce, + struct igt_spinner **spin) +{ + struct i915_request *rq; + int ret; + + *spin = NULL; + if (!(flags & (TEST_BUSY | TEST_RESET))) + return 0; + + *spin = kzalloc(sizeof(**spin), GFP_KERNEL); + if (!*spin) + return -ENOMEM; + + ret = igt_spinner_init(*spin, i915); + if (ret) + goto err_free; + + rq = igt_spinner_create_request(*spin, + ce->gem_context, + ce->engine, + MI_NOOP); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + goto err_fini; + } + + i915_request_add(rq); + + if (!igt_wait_for_spinner(*spin, rq)) { + pr_err("%s: Spinner failed to start!\n", name); + ret = -ETIMEDOUT; + goto err_end; + } + + return 0; + +err_end: + igt_spinner_end(*spin); +err_fini: + igt_spinner_fini(*spin); +err_free: + kfree(fetch_and_zero(spin)); + return ret; +} + +static int +__read_slice_count(struct drm_i915_private *i915, + struct intel_context *ce, + struct drm_i915_gem_object *obj, + struct igt_spinner *spin, + u32 *rpcs) +{ + struct i915_request *rq = NULL; + u32 s_mask, s_shift; + unsigned int cnt; + u32 *buf, val; + long ret; + + ret = emit_rpcs_query(obj, ce, &rq); + if (ret) + return ret; + + if (spin) + igt_spinner_end(spin); + + ret = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT); + i915_request_put(rq); + if (ret < 0) + return ret; + + buf = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + return ret; + } + + if (INTEL_GEN(i915) >= 11) { + s_mask = GEN11_RPCS_S_CNT_MASK; + s_shift = GEN11_RPCS_S_CNT_SHIFT; + } else { + s_mask = GEN8_RPCS_S_CNT_MASK; + s_shift = GEN8_RPCS_S_CNT_SHIFT; + } + + val = *buf; + cnt = (val & s_mask) >> s_shift; + *rpcs = val; + + i915_gem_object_unpin_map(obj); + + return cnt; +} + +static int +__check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected, + const char *prefix, const char *suffix) +{ + if (slices == expected) + return 0; + + if (slices < 0) { + pr_err("%s: %s read slice count failed with %d%s\n", + name, prefix, slices, suffix); + return slices; + } + + pr_err("%s: %s slice count %d is not %u%s\n", + name, prefix, slices, expected, suffix); + + pr_info("RPCS=0x%x; %u%sx%u%s\n", + rpcs, slices, + (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "", + (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT, + (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : ""); + + return -EINVAL; +} + +static int +__sseu_finish(struct drm_i915_private *i915, + const char *name, + unsigned int flags, + struct intel_context *ce, + struct drm_i915_gem_object *obj, + unsigned int expected, + struct igt_spinner *spin) +{ + unsigned int slices = hweight32(ce->engine->sseu.slice_mask); + u32 rpcs = 0; + int ret = 0; + + if (flags & TEST_RESET) { + ret = i915_reset_engine(ce->engine, "sseu"); + if (ret) + goto out; + } + + ret = __read_slice_count(i915, ce, obj, + flags & TEST_RESET ? NULL : spin, &rpcs); + ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!"); + if (ret) + goto out; + + ret = __read_slice_count(i915, ce->engine->kernel_context, obj, + NULL, &rpcs); + ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!"); + +out: + if (spin) + igt_spinner_end(spin); + + if ((flags & TEST_IDLE) && ret == 0) { + ret = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + if (ret) + return ret; + + ret = __read_slice_count(i915, ce, obj, NULL, &rpcs); + ret = __check_rpcs(name, rpcs, ret, expected, + "Context", " after idle!"); + } + + return ret; +} + +static int +__sseu_test(struct drm_i915_private *i915, + const char *name, + unsigned int flags, + struct intel_context *ce, + struct drm_i915_gem_object *obj, + struct intel_sseu sseu) +{ + struct igt_spinner *spin = NULL; + int ret; + + ret = __sseu_prepare(i915, name, flags, ce, &spin); + if (ret) + return ret; + + ret = __intel_context_reconfigure_sseu(ce, sseu); + if (ret) + goto out_spin; + + ret = __sseu_finish(i915, name, flags, ce, obj, + hweight32(sseu.slice_mask), spin); + +out_spin: + if (spin) { + igt_spinner_end(spin); + igt_spinner_fini(spin); + kfree(spin); + } + return ret; +} + +static int +__igt_ctx_sseu(struct drm_i915_private *i915, + const char *name, + unsigned int flags) +{ + struct intel_engine_cs *engine = i915->engine[RCS0]; + struct intel_sseu default_sseu = engine->sseu; + struct drm_i915_gem_object *obj; + struct i915_gem_context *ctx; + struct intel_context *ce; + struct intel_sseu pg_sseu; + intel_wakeref_t wakeref; + struct drm_file *file; + int ret; + + if (INTEL_GEN(i915) < 9) + return 0; + + if (!RUNTIME_INFO(i915)->sseu.has_slice_pg) + return 0; + + if (hweight32(default_sseu.slice_mask) < 2) + return 0; + + /* + * Gen11 VME friendly power-gated configuration with half enabled + * sub-slices. + */ + pg_sseu = default_sseu; + pg_sseu.slice_mask = 1; + pg_sseu.subslice_mask = + ~(~0 << (hweight32(default_sseu.subslice_mask) / 2)); + + pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n", + name, flags, hweight32(default_sseu.slice_mask), + hweight32(pg_sseu.slice_mask)); + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + if (flags & TEST_RESET) + igt_global_reset_lock(i915); + + mutex_lock(&i915->drm.struct_mutex); + + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + ret = PTR_ERR(ctx); + goto out_unlock; + } + i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */ + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + ret = PTR_ERR(obj); + goto out_unlock; + } + + wakeref = intel_runtime_pm_get(i915); + + ce = i915_gem_context_get_engine(ctx, RCS0); + if (IS_ERR(ce)) { + ret = PTR_ERR(ce); + goto out_rpm; + } + + ret = intel_context_pin(ce); + if (ret) + goto out_context; + + /* First set the default mask. */ + ret = __sseu_test(i915, name, flags, ce, obj, default_sseu); + if (ret) + goto out_fail; + + /* Then set a power-gated configuration. */ + ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu); + if (ret) + goto out_fail; + + /* Back to defaults. */ + ret = __sseu_test(i915, name, flags, ce, obj, default_sseu); + if (ret) + goto out_fail; + + /* One last power-gated configuration for the road. */ + ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu); + if (ret) + goto out_fail; + +out_fail: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + ret = -EIO; + + intel_context_unpin(ce); +out_context: + intel_context_put(ce); +out_rpm: + intel_runtime_pm_put(i915, wakeref); + i915_gem_object_put(obj); + +out_unlock: + mutex_unlock(&i915->drm.struct_mutex); + + if (flags & TEST_RESET) + igt_global_reset_unlock(i915); + + mock_file_free(i915, file); + + if (ret) + pr_err("%s: Failed with %d!\n", name, ret); + + return ret; +} + +static int igt_ctx_sseu(void *arg) +{ + struct { + const char *name; + unsigned int flags; + } *phase, phases[] = { + { .name = "basic", .flags = 0 }, + { .name = "idle", .flags = TEST_IDLE }, + { .name = "busy", .flags = TEST_BUSY }, + { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET }, + { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE }, + { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE }, + }; + unsigned int i; + int ret = 0; + + for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases); + i++, phase++) + ret = __igt_ctx_sseu(arg, phase->name, phase->flags); + + return ret; +} + +static int igt_ctx_readonly(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj = NULL; + struct i915_gem_context *ctx; + struct i915_hw_ppgtt *ppgtt; + unsigned long idx, ndwords, dw; + struct igt_live_test t; + struct drm_file *file; + I915_RND_STATE(prng); + IGT_TIMEOUT(end_time); + LIST_HEAD(objects); + int err = -ENODEV; + + /* + * Create a few read-only objects (with the occasional writable object) + * and try to write into these object checking that the GPU discards + * any write to a read-only object. + */ + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + + err = igt_live_test_begin(&t, i915, __func__, ""); + if (err) + goto out_unlock; + + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_unlock; + } + + ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt; + if (!ppgtt || !ppgtt->vm.has_read_only) { + err = 0; + goto out_unlock; + } + + ndwords = 0; + dw = 0; + while (!time_after(jiffies, end_time)) { + struct intel_engine_cs *engine; + unsigned int id; + + for_each_engine(engine, i915, id) { + intel_wakeref_t wakeref; + + if (!intel_engine_can_store_dword(engine)) + continue; + + if (!obj) { + obj = create_test_object(ctx, file, &objects); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_unlock; + } + + if (prandom_u32_state(&prng) & 1) + i915_gem_object_set_readonly(obj); + } + + err = 0; + with_intel_runtime_pm(i915, wakeref) + err = gpu_fill(obj, ctx, engine, dw); + if (err) { + pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", + ndwords, dw, max_dwords(obj), + engine->name, ctx->hw_id, + yesno(!!ctx->ppgtt), err); + goto out_unlock; + } + + if (++dw == max_dwords(obj)) { + obj = NULL; + dw = 0; + } + ndwords++; + } + } + pr_info("Submitted %lu dwords (across %u engines)\n", + ndwords, RUNTIME_INFO(i915)->num_engines); + + dw = 0; + idx = 0; + list_for_each_entry(obj, &objects, st_link) { + unsigned int rem = + min_t(unsigned int, ndwords - dw, max_dwords(obj)); + unsigned int num_writes; + + num_writes = rem; + if (i915_gem_object_is_readonly(obj)) + num_writes = 0; + + err = cpu_check(obj, idx++, num_writes); + if (err) + break; + + dw += rem; + } + +out_unlock: + if (igt_live_test_end(&t)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); + + mock_file_free(i915, file); + return err; +} + +static int check_scratch(struct i915_gem_context *ctx, u64 offset) +{ + struct drm_mm_node *node = + __drm_mm_interval_first(&ctx->ppgtt->vm.mm, + offset, offset + sizeof(u32) - 1); + if (!node || node->start > offset) + return 0; + + GEM_BUG_ON(offset >= node->start + node->size); + + pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n", + upper_32_bits(offset), lower_32_bits(offset)); + return -EINVAL; +} + +static int write_to_scratch(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u64 offset, u32 value) +{ + struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_gem_object *obj; + struct i915_request *rq; + struct i915_vma *vma; + u32 *cmd; + int err; + + GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(cmd)) { + err = PTR_ERR(cmd); + goto err; + } + + *cmd++ = MI_STORE_DWORD_IMM_GEN4; + if (INTEL_GEN(i915) >= 8) { + *cmd++ = lower_32_bits(offset); + *cmd++ = upper_32_bits(offset); + } else { + *cmd++ = 0; + *cmd++ = offset; + } + *cmd++ = value; + *cmd = MI_BATCH_BUFFER_END; + __i915_gem_object_flush_map(obj, 0, 64); + i915_gem_object_unpin_map(obj); + + vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); + if (err) + goto err; + + err = check_scratch(ctx, offset); + if (err) + goto err_unpin; + + rq = igt_request_alloc(ctx, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_unpin; + } + + err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0); + if (err) + goto err_request; + + err = i915_vma_move_to_active(vma, rq, 0); + if (err) + goto skip_request; + + i915_gem_object_set_active_reference(obj); + i915_vma_unpin(vma); + i915_vma_close(vma); + + i915_request_add(rq); + + return 0; + +skip_request: + i915_request_skip(rq, err); +err_request: + i915_request_add(rq); +err_unpin: + i915_vma_unpin(vma); +err: + i915_gem_object_put(obj); + return err; +} + +static int read_from_scratch(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u64 offset, u32 *value) +{ + struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_gem_object *obj; + const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */ + const u32 result = 0x100; + struct i915_request *rq; + struct i915_vma *vma; + u32 *cmd; + int err; + + GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(cmd)) { + err = PTR_ERR(cmd); + goto err; + } + + memset(cmd, POISON_INUSE, PAGE_SIZE); + if (INTEL_GEN(i915) >= 8) { + *cmd++ = MI_LOAD_REGISTER_MEM_GEN8; + *cmd++ = RCS_GPR0; + *cmd++ = lower_32_bits(offset); + *cmd++ = upper_32_bits(offset); + *cmd++ = MI_STORE_REGISTER_MEM_GEN8; + *cmd++ = RCS_GPR0; + *cmd++ = result; + *cmd++ = 0; + } else { + *cmd++ = MI_LOAD_REGISTER_MEM; + *cmd++ = RCS_GPR0; + *cmd++ = offset; + *cmd++ = MI_STORE_REGISTER_MEM; + *cmd++ = RCS_GPR0; + *cmd++ = result; + } + *cmd = MI_BATCH_BUFFER_END; + + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + + vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); + if (err) + goto err; + + err = check_scratch(ctx, offset); + if (err) + goto err_unpin; + + rq = igt_request_alloc(ctx, engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_unpin; + } + + err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0); + if (err) + goto err_request; + + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto skip_request; + + i915_vma_unpin(vma); + i915_vma_close(vma); + + i915_request_add(rq); + + err = i915_gem_object_set_to_cpu_domain(obj, false); + if (err) + goto err; + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(cmd)) { + err = PTR_ERR(cmd); + goto err; + } + + *value = cmd[result / sizeof(*cmd)]; + i915_gem_object_unpin_map(obj); + i915_gem_object_put(obj); + + return 0; + +skip_request: + i915_request_skip(rq, err); +err_request: + i915_request_add(rq); +err_unpin: + i915_vma_unpin(vma); +err: + i915_gem_object_put(obj); + return err; +} + +static int igt_vm_isolation(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gem_context *ctx_a, *ctx_b; + struct intel_engine_cs *engine; + intel_wakeref_t wakeref; + struct igt_live_test t; + struct drm_file *file; + I915_RND_STATE(prng); + unsigned long count; + unsigned int id; + u64 vm_total; + int err; + + if (INTEL_GEN(i915) < 7) + return 0; + + /* + * The simple goal here is that a write into one context is not + * observed in a second (separate page tables and scratch). + */ + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + + err = igt_live_test_begin(&t, i915, __func__, ""); + if (err) + goto out_unlock; + + ctx_a = live_context(i915, file); + if (IS_ERR(ctx_a)) { + err = PTR_ERR(ctx_a); + goto out_unlock; + } + + ctx_b = live_context(i915, file); + if (IS_ERR(ctx_b)) { + err = PTR_ERR(ctx_b); + goto out_unlock; + } + + /* We can only test vm isolation, if the vm are distinct */ + if (ctx_a->ppgtt == ctx_b->ppgtt) + goto out_unlock; + + vm_total = ctx_a->ppgtt->vm.total; + GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total); + vm_total -= I915_GTT_PAGE_SIZE; + + wakeref = intel_runtime_pm_get(i915); + + count = 0; + for_each_engine(engine, i915, id) { + IGT_TIMEOUT(end_time); + unsigned long this = 0; + + if (!intel_engine_can_store_dword(engine)) + continue; + + while (!__igt_timeout(end_time, NULL)) { + u32 value = 0xc5c5c5c5; + u64 offset; + + div64_u64_rem(i915_prandom_u64_state(&prng), + vm_total, &offset); + offset &= -sizeof(u32); + offset += I915_GTT_PAGE_SIZE; + + err = write_to_scratch(ctx_a, engine, + offset, 0xdeadbeef); + if (err == 0) + err = read_from_scratch(ctx_b, engine, + offset, &value); + if (err) + goto out_rpm; + + if (value) { + pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n", + engine->name, value, + upper_32_bits(offset), + lower_32_bits(offset), + this); + err = -EINVAL; + goto out_rpm; + } + + this++; + } + count += this; + } + pr_info("Checked %lu scratch offsets across %d engines\n", + count, RUNTIME_INFO(i915)->num_engines); + +out_rpm: + intel_runtime_pm_put(i915, wakeref); +out_unlock: + if (igt_live_test_end(&t)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); + + mock_file_free(i915, file); + return err; +} + +static __maybe_unused const char * +__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines) +{ + struct intel_engine_cs *engine; + intel_engine_mask_t tmp; + + if (engines == ALL_ENGINES) + return "all"; + + for_each_engine_masked(engine, i915, engines, tmp) + return engine->name; + + return "none"; +} + +static void mock_barrier_task(void *data) +{ + unsigned int *counter = data; + + ++*counter; +} + +static int mock_context_barrier(void *arg) +{ +#undef pr_fmt +#define pr_fmt(x) "context_barrier_task():" # x + struct drm_i915_private *i915 = arg; + struct i915_gem_context *ctx; + struct i915_request *rq; + unsigned int counter; + int err; + + /* + * The context barrier provides us with a callback after it emits + * a request; useful for retiring old state after loading new. + */ + + mutex_lock(&i915->drm.struct_mutex); + + ctx = mock_context(i915, "mock"); + if (!ctx) { + err = -ENOMEM; + goto unlock; + } + + counter = 0; + err = context_barrier_task(ctx, 0, + NULL, mock_barrier_task, &counter); + if (err) { + pr_err("Failed at line %d, err=%d\n", __LINE__, err); + goto out; + } + if (counter == 0) { + pr_err("Did not retire immediately with 0 engines\n"); + err = -EINVAL; + goto out; + } + + counter = 0; + err = context_barrier_task(ctx, ALL_ENGINES, + NULL, mock_barrier_task, &counter); + if (err) { + pr_err("Failed at line %d, err=%d\n", __LINE__, err); + goto out; + } + if (counter == 0) { + pr_err("Did not retire immediately for all unused engines\n"); + err = -EINVAL; + goto out; + } + + rq = igt_request_alloc(ctx, i915->engine[RCS0]); + if (IS_ERR(rq)) { + pr_err("Request allocation failed!\n"); + goto out; + } + i915_request_add(rq); + + counter = 0; + context_barrier_inject_fault = BIT(RCS0); + err = context_barrier_task(ctx, ALL_ENGINES, + NULL, mock_barrier_task, &counter); + context_barrier_inject_fault = 0; + if (err == -ENXIO) + err = 0; + else + pr_err("Did not hit fault injection!\n"); + if (counter != 0) { + pr_err("Invoked callback on error!\n"); + err = -EIO; + } + if (err) + goto out; + + counter = 0; + err = context_barrier_task(ctx, ALL_ENGINES, + NULL, mock_barrier_task, &counter); + if (err) { + pr_err("Failed at line %d, err=%d\n", __LINE__, err); + goto out; + } + mock_device_flush(i915); + if (counter == 0) { + pr_err("Did not retire on each active engines\n"); + err = -EINVAL; + goto out; + } + +out: + mock_context_close(ctx); +unlock: + mutex_unlock(&i915->drm.struct_mutex); + return err; +#undef pr_fmt +#define pr_fmt(x) x +} + +int i915_gem_context_mock_selftests(void) +{ + static const struct i915_subtest tests[] = { + SUBTEST(mock_context_barrier), + }; + struct drm_i915_private *i915; + int err; + + i915 = mock_gem_device(); + if (!i915) + return -ENOMEM; + + err = i915_subtests(tests, i915); + + drm_dev_put(&i915->drm); + return err; +} + +int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_nop_switch), + SUBTEST(igt_ctx_exec), + SUBTEST(igt_ctx_readonly), + SUBTEST(igt_ctx_sseu), + SUBTEST(igt_shared_ctx_exec), + SUBTEST(igt_vm_isolation), + }; + + if (i915_terminally_wedged(dev_priv)) + return 0; + + return i915_subtests(tests, dev_priv); +} diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c new file mode 100644 index 000000000000..b7431712de66 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -0,0 +1,386 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#include "i915_selftest.h" + +#include "mock_dmabuf.h" +#include "selftests/mock_gem_device.h" + +static int igt_dmabuf_export(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + struct dma_buf *dmabuf; + + obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0); + i915_gem_object_put(obj); + if (IS_ERR(dmabuf)) { + pr_err("i915_gem_prime_export failed with err=%d\n", + (int)PTR_ERR(dmabuf)); + return PTR_ERR(dmabuf); + } + + dma_buf_put(dmabuf); + return 0; +} + +static int igt_dmabuf_import_self(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + struct drm_gem_object *import; + struct dma_buf *dmabuf; + int err; + + obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0); + if (IS_ERR(dmabuf)) { + pr_err("i915_gem_prime_export failed with err=%d\n", + (int)PTR_ERR(dmabuf)); + err = PTR_ERR(dmabuf); + goto out; + } + + import = i915_gem_prime_import(&i915->drm, dmabuf); + if (IS_ERR(import)) { + pr_err("i915_gem_prime_import failed with err=%d\n", + (int)PTR_ERR(import)); + err = PTR_ERR(import); + goto out_dmabuf; + } + + if (import != &obj->base) { + pr_err("i915_gem_prime_import created a new object!\n"); + err = -EINVAL; + goto out_import; + } + + err = 0; +out_import: + i915_gem_object_put(to_intel_bo(import)); +out_dmabuf: + dma_buf_put(dmabuf); +out: + i915_gem_object_put(obj); + return err; +} + +static int igt_dmabuf_import(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + struct dma_buf *dmabuf; + void *obj_map, *dma_map; + u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff }; + int err, i; + + dmabuf = mock_dmabuf(1); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf)); + if (IS_ERR(obj)) { + pr_err("i915_gem_prime_import failed with err=%d\n", + (int)PTR_ERR(obj)); + err = PTR_ERR(obj); + goto out_dmabuf; + } + + if (obj->base.dev != &i915->drm) { + pr_err("i915_gem_prime_import created a non-i915 object!\n"); + err = -EINVAL; + goto out_obj; + } + + if (obj->base.size != PAGE_SIZE) { + pr_err("i915_gem_prime_import is wrong size found %lld, expected %ld\n", + (long long)obj->base.size, PAGE_SIZE); + err = -EINVAL; + goto out_obj; + } + + dma_map = dma_buf_vmap(dmabuf); + if (!dma_map) { + pr_err("dma_buf_vmap failed\n"); + err = -ENOMEM; + goto out_obj; + } + + if (0) { /* Can not yet map dmabuf */ + obj_map = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(obj_map)) { + err = PTR_ERR(obj_map); + pr_err("i915_gem_object_pin_map failed with err=%d\n", err); + goto out_dma_map; + } + + for (i = 0; i < ARRAY_SIZE(pattern); i++) { + memset(dma_map, pattern[i], PAGE_SIZE); + if (memchr_inv(obj_map, pattern[i], PAGE_SIZE)) { + err = -EINVAL; + pr_err("imported vmap not all set to %x!\n", pattern[i]); + i915_gem_object_unpin_map(obj); + goto out_dma_map; + } + } + + for (i = 0; i < ARRAY_SIZE(pattern); i++) { + memset(obj_map, pattern[i], PAGE_SIZE); + if (memchr_inv(dma_map, pattern[i], PAGE_SIZE)) { + err = -EINVAL; + pr_err("exported vmap not all set to %x!\n", pattern[i]); + i915_gem_object_unpin_map(obj); + goto out_dma_map; + } + } + + i915_gem_object_unpin_map(obj); + } + + err = 0; +out_dma_map: + dma_buf_vunmap(dmabuf, dma_map); +out_obj: + i915_gem_object_put(obj); +out_dmabuf: + dma_buf_put(dmabuf); + return err; +} + +static int igt_dmabuf_import_ownership(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + struct dma_buf *dmabuf; + void *ptr; + int err; + + dmabuf = mock_dmabuf(1); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + ptr = dma_buf_vmap(dmabuf); + if (!ptr) { + pr_err("dma_buf_vmap failed\n"); + err = -ENOMEM; + goto err_dmabuf; + } + + memset(ptr, 0xc5, PAGE_SIZE); + dma_buf_vunmap(dmabuf, ptr); + + obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf)); + if (IS_ERR(obj)) { + pr_err("i915_gem_prime_import failed with err=%d\n", + (int)PTR_ERR(obj)); + err = PTR_ERR(obj); + goto err_dmabuf; + } + + dma_buf_put(dmabuf); + + err = i915_gem_object_pin_pages(obj); + if (err) { + pr_err("i915_gem_object_pin_pages failed with err=%d\n", err); + goto out_obj; + } + + err = 0; + i915_gem_object_unpin_pages(obj); +out_obj: + i915_gem_object_put(obj); + return err; + +err_dmabuf: + dma_buf_put(dmabuf); + return err; +} + +static int igt_dmabuf_export_vmap(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + struct dma_buf *dmabuf; + void *ptr; + int err; + + obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0); + if (IS_ERR(dmabuf)) { + pr_err("i915_gem_prime_export failed with err=%d\n", + (int)PTR_ERR(dmabuf)); + err = PTR_ERR(dmabuf); + goto err_obj; + } + i915_gem_object_put(obj); + + ptr = dma_buf_vmap(dmabuf); + if (!ptr) { + pr_err("dma_buf_vmap failed\n"); + err = -ENOMEM; + goto out; + } + + if (memchr_inv(ptr, 0, dmabuf->size)) { + pr_err("Exported object not initialiased to zero!\n"); + err = -EINVAL; + goto out; + } + + memset(ptr, 0xc5, dmabuf->size); + + err = 0; + dma_buf_vunmap(dmabuf, ptr); +out: + dma_buf_put(dmabuf); + return err; + +err_obj: + i915_gem_object_put(obj); + return err; +} + +static int igt_dmabuf_export_kmap(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + struct dma_buf *dmabuf; + void *ptr; + int err; + + obj = i915_gem_object_create_shmem(i915, 2 * PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0); + i915_gem_object_put(obj); + if (IS_ERR(dmabuf)) { + err = PTR_ERR(dmabuf); + pr_err("i915_gem_prime_export failed with err=%d\n", err); + return err; + } + + ptr = dma_buf_kmap(dmabuf, 0); + if (!ptr) { + pr_err("dma_buf_kmap failed\n"); + err = -ENOMEM; + goto err; + } + + if (memchr_inv(ptr, 0, PAGE_SIZE)) { + dma_buf_kunmap(dmabuf, 0, ptr); + pr_err("Exported page[0] not initialiased to zero!\n"); + err = -EINVAL; + goto err; + } + + memset(ptr, 0xc5, PAGE_SIZE); + dma_buf_kunmap(dmabuf, 0, ptr); + + ptr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(ptr)) { + err = PTR_ERR(ptr); + pr_err("i915_gem_object_pin_map failed with err=%d\n", err); + goto err; + } + memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE); + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + + ptr = dma_buf_kmap(dmabuf, 1); + if (!ptr) { + pr_err("dma_buf_kmap failed\n"); + err = -ENOMEM; + goto err; + } + + if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) { + dma_buf_kunmap(dmabuf, 1, ptr); + pr_err("Exported page[1] not set to 0xaa!\n"); + err = -EINVAL; + goto err; + } + + memset(ptr, 0xc5, PAGE_SIZE); + dma_buf_kunmap(dmabuf, 1, ptr); + + ptr = dma_buf_kmap(dmabuf, 0); + if (!ptr) { + pr_err("dma_buf_kmap failed\n"); + err = -ENOMEM; + goto err; + } + if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) { + dma_buf_kunmap(dmabuf, 0, ptr); + pr_err("Exported page[0] did not retain 0xc5!\n"); + err = -EINVAL; + goto err; + } + dma_buf_kunmap(dmabuf, 0, ptr); + + ptr = dma_buf_kmap(dmabuf, 2); + if (ptr) { + pr_err("Erroneously kmapped beyond the end of the object!\n"); + dma_buf_kunmap(dmabuf, 2, ptr); + err = -EINVAL; + goto err; + } + + ptr = dma_buf_kmap(dmabuf, -1); + if (ptr) { + pr_err("Erroneously kmapped before the start of the object!\n"); + dma_buf_kunmap(dmabuf, -1, ptr); + err = -EINVAL; + goto err; + } + + err = 0; +err: + dma_buf_put(dmabuf); + return err; +} + +int i915_gem_dmabuf_mock_selftests(void) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_dmabuf_export), + SUBTEST(igt_dmabuf_import_self), + SUBTEST(igt_dmabuf_import), + SUBTEST(igt_dmabuf_import_ownership), + SUBTEST(igt_dmabuf_export_vmap), + SUBTEST(igt_dmabuf_export_kmap), + }; + struct drm_i915_private *i915; + int err; + + i915 = mock_gem_device(); + if (!i915) + return -ENOMEM; + + err = i915_subtests(tests, i915); + + drm_dev_put(&i915->drm); + return err; +} + +int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_dmabuf_export), + }; + + return i915_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 87da01230179..12c90d8fe0fb 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -7,8 +7,8 @@ #include #include "gt/intel_gt_pm.h" +#include "huge_gem_object.h" #include "i915_selftest.h" -#include "selftests/huge_gem_object.h" #include "selftests/igt_flush_test.h" struct tile { diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c new file mode 100644 index 000000000000..2b6db6f799de --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#include "i915_selftest.h" + +#include "huge_gem_object.h" +#include "selftests/igt_flush_test.h" +#include "selftests/mock_gem_device.h" + +static int igt_gem_object(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + int err = -ENOMEM; + + /* Basic test to ensure we can create an object */ + + obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + pr_err("i915_gem_object_create failed, err=%d\n", err); + goto out; + } + + err = 0; + i915_gem_object_put(obj); +out: + return err; +} + +static int igt_gem_huge(void *arg) +{ + const unsigned int nreal = 509; /* just to be awkward */ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + unsigned int n; + int err; + + /* Basic sanitycheck of our huge fake object allocation */ + + obj = huge_gem_object(i915, + nreal * PAGE_SIZE, + i915->ggtt.vm.total + PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = i915_gem_object_pin_pages(obj); + if (err) { + pr_err("Failed to allocate %u pages (%lu total), err=%d\n", + nreal, obj->base.size / PAGE_SIZE, err); + goto out; + } + + for (n = 0; n < obj->base.size / PAGE_SIZE; n++) { + if (i915_gem_object_get_page(obj, n) != + i915_gem_object_get_page(obj, n % nreal)) { + pr_err("Page lookup mismatch at index %u [%u]\n", + n, n % nreal); + err = -EINVAL; + goto out_unpin; + } + } + +out_unpin: + i915_gem_object_unpin_pages(obj); +out: + i915_gem_object_put(obj); + return err; +} + +int i915_gem_object_mock_selftests(void) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_gem_object), + }; + struct drm_i915_private *i915; + int err; + + i915 = mock_gem_device(); + if (!i915) + return -ENOMEM; + + err = i915_subtests(tests, i915); + + drm_dev_put(&i915->drm); + return err; +} + +int i915_gem_object_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_gem_huge), + }; + + return i915_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c new file mode 100644 index 000000000000..b232e6d2cd92 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include "igt_gem_utils.h" + +#include "gem/i915_gem_context.h" +#include "gem/i915_gem_pm.h" +#include "gt/intel_context.h" + +#include "i915_request.h" + +struct i915_request * +igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine) +{ + struct intel_context *ce; + struct i915_request *rq; + + /* + * Pinning the contexts may generate requests in order to acquire + * GGTT space, so do this first before we reserve a seqno for + * ourselves. + */ + ce = i915_gem_context_get_engine(ctx, engine->id); + if (IS_ERR(ce)) + return ERR_CAST(ce); + + rq = intel_context_create_request(ce); + intel_context_put(ce); + + return rq; +} diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h new file mode 100644 index 000000000000..0f17251cf75d --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h @@ -0,0 +1,17 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#ifndef __IGT_GEM_UTILS_H__ +#define __IGT_GEM_UTILS_H__ + +struct i915_request; +struct i915_gem_context; +struct intel_engine_cs; + +struct i915_request * +igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine); + +#endif /* __IGT_GEM_UTILS_H__ */ diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c new file mode 100644 index 000000000000..68d50da035e6 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -0,0 +1,111 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#include "mock_context.h" +#include "selftests/mock_gtt.h" + +struct i915_gem_context * +mock_context(struct drm_i915_private *i915, + const char *name) +{ + struct i915_gem_context *ctx; + struct i915_gem_engines *e; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + + kref_init(&ctx->ref); + INIT_LIST_HEAD(&ctx->link); + ctx->i915 = i915; + + mutex_init(&ctx->engines_mutex); + e = default_engines(ctx); + if (IS_ERR(e)) + goto err_free; + RCU_INIT_POINTER(ctx->engines, e); + + INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); + INIT_LIST_HEAD(&ctx->handles_list); + INIT_LIST_HEAD(&ctx->hw_id_link); + mutex_init(&ctx->mutex); + + ret = i915_gem_context_pin_hw_id(ctx); + if (ret < 0) + goto err_engines; + + if (name) { + struct i915_hw_ppgtt *ppgtt; + + ctx->name = kstrdup(name, GFP_KERNEL); + if (!ctx->name) + goto err_put; + + ppgtt = mock_ppgtt(i915, name); + if (!ppgtt) + goto err_put; + + __set_ppgtt(ctx, ppgtt); + } + + return ctx; + +err_engines: + free_engines(rcu_access_pointer(ctx->engines)); +err_free: + kfree(ctx); + return NULL; + +err_put: + i915_gem_context_set_closed(ctx); + i915_gem_context_put(ctx); + return NULL; +} + +void mock_context_close(struct i915_gem_context *ctx) +{ + context_close(ctx); +} + +void mock_init_contexts(struct drm_i915_private *i915) +{ + init_contexts(i915); +} + +struct i915_gem_context * +live_context(struct drm_i915_private *i915, struct drm_file *file) +{ + struct i915_gem_context *ctx; + int err; + + lockdep_assert_held(&i915->drm.struct_mutex); + + ctx = i915_gem_create_context(i915, 0); + if (IS_ERR(ctx)) + return ctx; + + err = gem_context_register(ctx, file->driver_priv); + if (err < 0) + goto err_ctx; + + return ctx; + +err_ctx: + context_close(ctx); + return ERR_PTR(err); +} + +struct i915_gem_context * +kernel_context(struct drm_i915_private *i915) +{ + return i915_gem_context_create_kernel(i915, I915_PRIORITY_NORMAL); +} + +void kernel_context_close(struct i915_gem_context *ctx) +{ + context_close(ctx); +} diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.h b/drivers/gpu/drm/i915/gem/selftests/mock_context.h new file mode 100644 index 000000000000..0b926653914f --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.h @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#ifndef __MOCK_CONTEXT_H +#define __MOCK_CONTEXT_H + +void mock_init_contexts(struct drm_i915_private *i915); + +struct i915_gem_context * +mock_context(struct drm_i915_private *i915, + const char *name); + +void mock_context_close(struct i915_gem_context *ctx); + +struct i915_gem_context * +live_context(struct drm_i915_private *i915, struct drm_file *file); + +struct i915_gem_context *kernel_context(struct drm_i915_private *i915); +void kernel_context_close(struct i915_gem_context *ctx); + +#endif /* !__MOCK_CONTEXT_H */ diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c new file mode 100644 index 000000000000..b9e059d4328a --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c @@ -0,0 +1,144 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#include "mock_dmabuf.h" + +static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction dir) +{ + struct mock_dmabuf *mock = to_mock(attachment->dmabuf); + struct sg_table *st; + struct scatterlist *sg; + int i, err; + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + + err = sg_alloc_table(st, mock->npages, GFP_KERNEL); + if (err) + goto err_free; + + sg = st->sgl; + for (i = 0; i < mock->npages; i++) { + sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0); + sg = sg_next(sg); + } + + if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { + err = -ENOMEM; + goto err_st; + } + + return st; + +err_st: + sg_free_table(st); +err_free: + kfree(st); + return ERR_PTR(err); +} + +static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *st, + enum dma_data_direction dir) +{ + dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir); + sg_free_table(st); + kfree(st); +} + +static void mock_dmabuf_release(struct dma_buf *dma_buf) +{ + struct mock_dmabuf *mock = to_mock(dma_buf); + int i; + + for (i = 0; i < mock->npages; i++) + put_page(mock->pages[i]); + + kfree(mock); +} + +static void *mock_dmabuf_vmap(struct dma_buf *dma_buf) +{ + struct mock_dmabuf *mock = to_mock(dma_buf); + + return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL); +} + +static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) +{ + struct mock_dmabuf *mock = to_mock(dma_buf); + + vm_unmap_ram(vaddr, mock->npages); +} + +static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) +{ + struct mock_dmabuf *mock = to_mock(dma_buf); + + return kmap(mock->pages[page_num]); +} + +static void mock_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) +{ + struct mock_dmabuf *mock = to_mock(dma_buf); + + return kunmap(mock->pages[page_num]); +} + +static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) +{ + return -ENODEV; +} + +static const struct dma_buf_ops mock_dmabuf_ops = { + .map_dma_buf = mock_map_dma_buf, + .unmap_dma_buf = mock_unmap_dma_buf, + .release = mock_dmabuf_release, + .map = mock_dmabuf_kmap, + .unmap = mock_dmabuf_kunmap, + .mmap = mock_dmabuf_mmap, + .vmap = mock_dmabuf_vmap, + .vunmap = mock_dmabuf_vunmap, +}; + +static struct dma_buf *mock_dmabuf(int npages) +{ + struct mock_dmabuf *mock; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + struct dma_buf *dmabuf; + int i; + + mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), + GFP_KERNEL); + if (!mock) + return ERR_PTR(-ENOMEM); + + mock->npages = npages; + for (i = 0; i < npages; i++) { + mock->pages[i] = alloc_page(GFP_KERNEL); + if (!mock->pages[i]) + goto err; + } + + exp_info.ops = &mock_dmabuf_ops; + exp_info.size = npages * PAGE_SIZE; + exp_info.flags = O_CLOEXEC; + exp_info.priv = mock; + + dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(dmabuf)) + goto err; + + return dmabuf; + +err: + while (i--) + put_page(mock->pages[i]); + kfree(mock); + return ERR_PTR(-ENOMEM); +} diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h new file mode 100644 index 000000000000..f0f8bbd82dfc --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h @@ -0,0 +1,22 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#ifndef __MOCK_DMABUF_H__ +#define __MOCK_DMABUF_H__ + +#include + +struct mock_dmabuf { + int npages; + struct page *pages[]; +}; + +static struct mock_dmabuf *to_mock(struct dma_buf *buf) +{ + return buf->priv; +} + +#endif /* !__MOCK_DMABUF_H__ */ diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h new file mode 100644 index 000000000000..370360b4a148 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/mock_gem_object.h @@ -0,0 +1,14 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#ifndef __MOCK_GEM_OBJECT_H__ +#define __MOCK_GEM_OBJECT_H__ + +struct mock_object { + struct drm_i915_gem_object base; +}; + +#endif /* !__MOCK_GEM_OBJECT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 5b31e1e05ddd..c78ec0b58e77 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -4,8 +4,10 @@ * Copyright © 2019 Intel Corporation */ +#include "gem/i915_gem_context.h" +#include "gem/i915_gem_pm.h" + #include "i915_drv.h" -#include "i915_gem_context.h" #include "i915_globals.h" #include "intel_context.h" diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 1c83ea9adac0..672dde71a46c 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -24,10 +24,13 @@ #include +#include "gem/i915_gem_context.h" + #include "i915_drv.h" #include "intel_engine.h" #include "intel_engine_pm.h" +#include "intel_context.h" #include "intel_lrc.h" #include "intel_reset.h" diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 38a8e55a7c85..448f3c0d8704 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -133,6 +133,8 @@ */ #include +#include "gem/i915_gem_context.h" + #include "i915_drv.h" #include "i915_gem_render_state.h" #include "i915_vgpu.h" diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h index e029aee87adf..c2bba82bcc16 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc.h @@ -24,7 +24,15 @@ #ifndef _INTEL_LRC_H_ #define _INTEL_LRC_H_ -#include "intel_engine.h" +#include + +struct drm_printer; + +struct drm_i915_private; +struct i915_gem_context; +struct i915_request; +struct intel_context; +struct intel_engine_cs; /* Execlists regs */ #define RING_ELSP(base) _MMIO((base) + 0x230) @@ -96,10 +104,6 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine); */ #define LRC_HEADER_PAGES LRC_PPHWSP_PN -struct drm_printer; - -struct drm_i915_private; - void intel_execlists_set_default_submission(struct intel_engine_cs *engine); void intel_lr_context_reset(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 8c60f7550f9c..377bc546a68f 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -7,6 +7,8 @@ #include #include +#include "gem/i915_gem_context.h" + #include "i915_drv.h" #include "i915_gpu_error.h" #include "i915_irq.h" diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index ac93080bd863..66d5a52d505c 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -31,9 +31,12 @@ #include +#include "gem/i915_gem_context.h" + #include "i915_drv.h" #include "i915_gem_render_state.h" #include "i915_trace.h" +#include "intel_context.h" #include "intel_reset.h" #include "intel_workarounds.h" diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index ce4bcca3f83c..133d069244f4 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -5,6 +5,7 @@ */ #include "i915_drv.h" +#include "intel_context.h" #include "intel_workarounds.h" /** diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 2941916b37bf..6d7562769eb2 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -22,8 +22,9 @@ * */ +#include "gem/i915_gem_context.h" + #include "i915_drv.h" -#include "i915_gem_context.h" #include "intel_context.h" #include "intel_engine_pm.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 48a51739b926..690d77f5ecf6 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -24,19 +24,21 @@ #include +#include "gem/i915_gem_context.h" #include "intel_engine_pm.h" #include "i915_selftest.h" #include "selftests/i915_random.h" #include "selftests/igt_flush_test.h" -#include "selftests/igt_gem_utils.h" #include "selftests/igt_reset.h" #include "selftests/igt_wedge_me.h" #include "selftests/igt_atomic.h" -#include "selftests/mock_context.h" #include "selftests/mock_drm.h" +#include "gem/selftests/mock_context.h" +#include "gem/selftests/igt_gem_utils.h" + #define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */ struct hang { diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index a8c50900e2d4..dfacc46ae7d3 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -6,15 +6,18 @@ #include +#include "gem/i915_gem_pm.h" #include "gt/intel_reset.h" + #include "i915_selftest.h" #include "selftests/i915_random.h" #include "selftests/igt_flush_test.h" -#include "selftests/igt_gem_utils.h" #include "selftests/igt_live_test.h" #include "selftests/igt_spinner.h" #include "selftests/lib_sw_fence.h" -#include "selftests/mock_context.h" + +#include "gem/selftests/igt_gem_utils.h" +#include "gem/selftests/mock_context.h" static int live_sanitycheck(void *arg) { diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index f9c9e7291187..9040cae38fc5 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -4,17 +4,19 @@ * Copyright © 2018 Intel Corporation */ +#include "gem/i915_gem_pm.h" #include "i915_selftest.h" #include "intel_reset.h" #include "selftests/igt_flush_test.h" -#include "selftests/igt_gem_utils.h" #include "selftests/igt_reset.h" #include "selftests/igt_spinner.h" #include "selftests/igt_wedge_me.h" -#include "selftests/mock_context.h" #include "selftests/mock_drm.h" +#include "gem/selftests/igt_gem_utils.h" +#include "gem/selftests/mock_context.h" + static const struct wo_register { enum intel_platform platform; u32 reg; diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 96e1edf21b3f..2998999e8568 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -34,6 +34,7 @@ */ #include "i915_drv.h" +#include "gt/intel_context.h" #include "gvt.h" #include "trace.h" diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 3a691447f76c..d66bf77f55fd 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -35,8 +35,11 @@ #include +#include "gem/i915_gem_context.h" +#include "gem/i915_gem_pm.h" +#include "gt/intel_context.h" + #include "i915_drv.h" -#include "i915_gem_pm.h" #include "gvt.h" #define RING_CTX_OFF(x) \ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 344beab229a0..7ab8340af991 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -32,10 +32,10 @@ #include #include +#include "gem/i915_gem_context.h" #include "gt/intel_reset.h" #include "i915_debugfs.h" -#include "i915_gem_context.h" #include "i915_irq.h" #include "intel_csr.h" #include "intel_dp.h" diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index a1f43dc5a8b5..5ca1594f3075 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -47,6 +47,7 @@ #include #include +#include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" #include "gt/intel_gt_pm.h" #include "gt/intel_reset.h" diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 596af542afea..38da46e773a3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -80,7 +80,7 @@ #include "intel_wopcm.h" #include "i915_gem.h" -#include "i915_gem_context.h" +#include "gem/i915_gem_context_types.h" #include "i915_gem_fence_reg.h" #include "i915_gem_gtt.h" #include "i915_gpu_error.h" diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0570907cc9d2..096e31e3df92 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -38,7 +38,11 @@ #include #include +#include "gem/i915_gem_clflush.h" +#include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" +#include "gem/i915_gem_pm.h" +#include "gem/i915_gemfs.h" #include "gt/intel_engine_pm.h" #include "gt/intel_gt_pm.h" #include "gt/intel_mocs.h" @@ -46,9 +50,6 @@ #include "gt/intel_workarounds.h" #include "i915_drv.h" -#include "i915_gem_clflush.h" -#include "i915_gemfs.h" -#include "i915_gem_pm.h" #include "i915_trace.h" #include "i915_vgpu.h" @@ -2371,9 +2372,5 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/scatterlist.c" #include "selftests/mock_gem_device.c" -#include "selftests/huge_gem_object.c" -#include "selftests/huge_pages.c" -#include "selftests/i915_gem_object.c" -#include "selftests/i915_gem_coherency.c" #include "selftests/i915_gem.c" #endif diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c deleted file mode 100644 index 8e74c23cbd91..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_clflush.c +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "i915_drv.h" -#include "intel_frontbuffer.h" -#include "i915_gem_clflush.h" - -static DEFINE_SPINLOCK(clflush_lock); - -struct clflush { - struct dma_fence dma; /* Must be first for dma_fence_free() */ - struct i915_sw_fence wait; - struct work_struct work; - struct drm_i915_gem_object *obj; -}; - -static const char *i915_clflush_get_driver_name(struct dma_fence *fence) -{ - return DRIVER_NAME; -} - -static const char *i915_clflush_get_timeline_name(struct dma_fence *fence) -{ - return "clflush"; -} - -static void i915_clflush_release(struct dma_fence *fence) -{ - struct clflush *clflush = container_of(fence, typeof(*clflush), dma); - - i915_sw_fence_fini(&clflush->wait); - - BUILD_BUG_ON(offsetof(typeof(*clflush), dma)); - dma_fence_free(&clflush->dma); -} - -static const struct dma_fence_ops i915_clflush_ops = { - .get_driver_name = i915_clflush_get_driver_name, - .get_timeline_name = i915_clflush_get_timeline_name, - .release = i915_clflush_release, -}; - -static void __i915_do_clflush(struct drm_i915_gem_object *obj) -{ - GEM_BUG_ON(!i915_gem_object_has_pages(obj)); - drm_clflush_sg(obj->mm.pages); - intel_fb_obj_flush(obj, ORIGIN_CPU); -} - -static void i915_clflush_work(struct work_struct *work) -{ - struct clflush *clflush = container_of(work, typeof(*clflush), work); - struct drm_i915_gem_object *obj = clflush->obj; - - if (i915_gem_object_pin_pages(obj)) { - DRM_ERROR("Failed to acquire obj->pages for clflushing\n"); - goto out; - } - - __i915_do_clflush(obj); - - i915_gem_object_unpin_pages(obj); - -out: - i915_gem_object_put(obj); - - dma_fence_signal(&clflush->dma); - dma_fence_put(&clflush->dma); -} - -static int __i915_sw_fence_call -i915_clflush_notify(struct i915_sw_fence *fence, - enum i915_sw_fence_notify state) -{ - struct clflush *clflush = container_of(fence, typeof(*clflush), wait); - - switch (state) { - case FENCE_COMPLETE: - schedule_work(&clflush->work); - break; - - case FENCE_FREE: - dma_fence_put(&clflush->dma); - break; - } - - return NOTIFY_DONE; -} - -bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, - unsigned int flags) -{ - struct clflush *clflush; - - /* - * Stolen memory is always coherent with the GPU as it is explicitly - * marked as wc by the system, or the system is cache-coherent. - * Similarly, we only access struct pages through the CPU cache, so - * anything not backed by physical memory we consider to be always - * coherent and not need clflushing. - */ - if (!i915_gem_object_has_struct_page(obj)) { - obj->cache_dirty = false; - return false; - } - - /* If the GPU is snooping the contents of the CPU cache, - * we do not need to manually clear the CPU cache lines. However, - * the caches are only snooped when the render cache is - * flushed/invalidated. As we always have to emit invalidations - * and flushes when moving into and out of the RENDER domain, correct - * snooping behaviour occurs naturally as the result of our domain - * tracking. - */ - if (!(flags & I915_CLFLUSH_FORCE) && - obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ) - return false; - - trace_i915_gem_object_clflush(obj); - - clflush = NULL; - if (!(flags & I915_CLFLUSH_SYNC)) - clflush = kmalloc(sizeof(*clflush), GFP_KERNEL); - if (clflush) { - GEM_BUG_ON(!obj->cache_dirty); - - dma_fence_init(&clflush->dma, - &i915_clflush_ops, - &clflush_lock, - to_i915(obj->base.dev)->mm.unordered_timeline, - 0); - i915_sw_fence_init(&clflush->wait, i915_clflush_notify); - - clflush->obj = i915_gem_object_get(obj); - INIT_WORK(&clflush->work, i915_clflush_work); - - dma_fence_get(&clflush->dma); - - i915_sw_fence_await_reservation(&clflush->wait, - obj->resv, NULL, - true, I915_FENCE_TIMEOUT, - I915_FENCE_GFP); - - reservation_object_lock(obj->resv, NULL); - reservation_object_add_excl_fence(obj->resv, &clflush->dma); - reservation_object_unlock(obj->resv); - - i915_sw_fence_commit(&clflush->wait); - } else if (obj->mm.pages) { - __i915_do_clflush(obj); - } else { - GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); - } - - obj->cache_dirty = false; - return true; -} diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.h b/drivers/gpu/drm/i915/i915_gem_clflush.h deleted file mode 100644 index f390247561b3..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_clflush.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef __I915_GEM_CLFLUSH_H__ -#define __I915_GEM_CLFLUSH_H__ - -struct drm_i915_private; -struct drm_i915_gem_object; - -bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, - unsigned int flags); -#define I915_CLFLUSH_FORCE BIT(0) -#define I915_CLFLUSH_SYNC BIT(1) - -#endif /* __I915_GEM_CLFLUSH_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c deleted file mode 100644 index 5d2f8ba92b59..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ /dev/null @@ -1,2474 +0,0 @@ -/* - * Copyright © 2011-2012 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Authors: - * Ben Widawsky - * - */ - -/* - * This file implements HW context support. On gen5+ a HW context consists of an - * opaque GPU object which is referenced at times of context saves and restores. - * With RC6 enabled, the context is also referenced as the GPU enters and exists - * from RC6 (GPU has it's own internal power context, except on gen5). Though - * something like a context does exist for the media ring, the code only - * supports contexts for the render ring. - * - * In software, there is a distinction between contexts created by the user, - * and the default HW context. The default HW context is used by GPU clients - * that do not request setup of their own hardware context. The default - * context's state is never restored to help prevent programming errors. This - * would happen if a client ran and piggy-backed off another clients GPU state. - * The default context only exists to give the GPU some offset to load as the - * current to invoke a save of the context we actually care about. In fact, the - * code could likely be constructed, albeit in a more complicated fashion, to - * never use the default context, though that limits the driver's ability to - * swap out, and/or destroy other contexts. - * - * All other contexts are created as a request by the GPU client. These contexts - * store GPU state, and thus allow GPU clients to not re-emit state (and - * potentially query certain state) at any time. The kernel driver makes - * certain that the appropriate commands are inserted. - * - * The context life cycle is semi-complicated in that context BOs may live - * longer than the context itself because of the way the hardware, and object - * tracking works. Below is a very crude representation of the state machine - * describing the context life. - * refcount pincount active - * S0: initial state 0 0 0 - * S1: context created 1 0 0 - * S2: context is currently running 2 1 X - * S3: GPU referenced, but not current 2 0 1 - * S4: context is current, but destroyed 1 1 0 - * S5: like S3, but destroyed 1 0 1 - * - * The most common (but not all) transitions: - * S0->S1: client creates a context - * S1->S2: client submits execbuf with context - * S2->S3: other clients submits execbuf with context - * S3->S1: context object was retired - * S3->S2: clients submits another execbuf - * S2->S4: context destroy called with current context - * S3->S5->S0: destroy path - * S4->S5->S0: destroy path on current context - * - * There are two confusing terms used above: - * The "current context" means the context which is currently running on the - * GPU. The GPU has loaded its state already and has stored away the gtt - * offset of the BO. The GPU is not actively referencing the data at this - * offset, but it will on the next context switch. The only way to avoid this - * is to do a GPU reset. - * - * An "active context' is one which was previously the "current context" and is - * on the active list waiting for the next context switch to occur. Until this - * happens, the object must remain at the same gtt offset. It is therefore - * possible to destroy a context, but it is still active. - * - */ - -#include -#include - -#include - -#include "gt/intel_lrc_reg.h" - -#include "i915_drv.h" -#include "i915_globals.h" -#include "i915_trace.h" -#include "i915_user_extensions.h" - -#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 - -static struct i915_global_gem_context { - struct i915_global base; - struct kmem_cache *slab_luts; -} global; - -struct i915_lut_handle *i915_lut_handle_alloc(void) -{ - return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); -} - -void i915_lut_handle_free(struct i915_lut_handle *lut) -{ - return kmem_cache_free(global.slab_luts, lut); -} - -static void lut_close(struct i915_gem_context *ctx) -{ - struct i915_lut_handle *lut, *ln; - struct radix_tree_iter iter; - void __rcu **slot; - - list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) { - list_del(&lut->obj_link); - i915_lut_handle_free(lut); - } - INIT_LIST_HEAD(&ctx->handles_list); - - rcu_read_lock(); - radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { - struct i915_vma *vma = rcu_dereference_raw(*slot); - - radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); - - vma->open_count--; - __i915_gem_object_release_unless_active(vma->obj); - } - rcu_read_unlock(); -} - -static struct intel_context * -lookup_user_engine(struct i915_gem_context *ctx, - unsigned long flags, - const struct i915_engine_class_instance *ci) -#define LOOKUP_USER_INDEX BIT(0) -{ - int idx; - - if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) - return ERR_PTR(-EINVAL); - - if (!i915_gem_context_user_engines(ctx)) { - struct intel_engine_cs *engine; - - engine = intel_engine_lookup_user(ctx->i915, - ci->engine_class, - ci->engine_instance); - if (!engine) - return ERR_PTR(-EINVAL); - - idx = engine->id; - } else { - idx = ci->engine_instance; - } - - return i915_gem_context_get_engine(ctx, idx); -} - -static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp) -{ - unsigned int max; - - lockdep_assert_held(&i915->contexts.mutex); - - if (INTEL_GEN(i915) >= 11) - max = GEN11_MAX_CONTEXT_HW_ID; - else if (USES_GUC_SUBMISSION(i915)) - /* - * When using GuC in proxy submission, GuC consumes the - * highest bit in the context id to indicate proxy submission. - */ - max = MAX_GUC_CONTEXT_HW_ID; - else - max = MAX_CONTEXT_HW_ID; - - return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp); -} - -static int steal_hw_id(struct drm_i915_private *i915) -{ - struct i915_gem_context *ctx, *cn; - LIST_HEAD(pinned); - int id = -ENOSPC; - - lockdep_assert_held(&i915->contexts.mutex); - - list_for_each_entry_safe(ctx, cn, - &i915->contexts.hw_id_list, hw_id_link) { - if (atomic_read(&ctx->hw_id_pin_count)) { - list_move_tail(&ctx->hw_id_link, &pinned); - continue; - } - - GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */ - list_del_init(&ctx->hw_id_link); - id = ctx->hw_id; - break; - } - - /* - * Remember how far we got up on the last repossesion scan, so the - * list is kept in a "least recently scanned" order. - */ - list_splice_tail(&pinned, &i915->contexts.hw_id_list); - return id; -} - -static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out) -{ - int ret; - - lockdep_assert_held(&i915->contexts.mutex); - - /* - * We prefer to steal/stall ourselves and our users over that of the - * entire system. That may be a little unfair to our users, and - * even hurt high priority clients. The choice is whether to oomkill - * something else, or steal a context id. - */ - ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); - if (unlikely(ret < 0)) { - ret = steal_hw_id(i915); - if (ret < 0) /* once again for the correct errno code */ - ret = new_hw_id(i915, GFP_KERNEL); - if (ret < 0) - return ret; - } - - *out = ret; - return 0; -} - -static void release_hw_id(struct i915_gem_context *ctx) -{ - struct drm_i915_private *i915 = ctx->i915; - - if (list_empty(&ctx->hw_id_link)) - return; - - mutex_lock(&i915->contexts.mutex); - if (!list_empty(&ctx->hw_id_link)) { - ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id); - list_del_init(&ctx->hw_id_link); - } - mutex_unlock(&i915->contexts.mutex); -} - -static void __free_engines(struct i915_gem_engines *e, unsigned int count) -{ - while (count--) { - if (!e->engines[count]) - continue; - - intel_context_put(e->engines[count]); - } - kfree(e); -} - -static void free_engines(struct i915_gem_engines *e) -{ - __free_engines(e, e->num_engines); -} - -static void free_engines_rcu(struct work_struct *wrk) -{ - struct i915_gem_engines *e = - container_of(wrk, struct i915_gem_engines, rcu.work); - struct drm_i915_private *i915 = e->i915; - - mutex_lock(&i915->drm.struct_mutex); - free_engines(e); - mutex_unlock(&i915->drm.struct_mutex); -} - -static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) -{ - struct intel_engine_cs *engine; - struct i915_gem_engines *e; - enum intel_engine_id id; - - e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL); - if (!e) - return ERR_PTR(-ENOMEM); - - e->i915 = ctx->i915; - for_each_engine(engine, ctx->i915, id) { - struct intel_context *ce; - - ce = intel_context_create(ctx, engine); - if (IS_ERR(ce)) { - __free_engines(e, id); - return ERR_CAST(ce); - } - - e->engines[id] = ce; - } - e->num_engines = id; - - return e; -} - -static void i915_gem_context_free(struct i915_gem_context *ctx) -{ - lockdep_assert_held(&ctx->i915->drm.struct_mutex); - GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); - - release_hw_id(ctx); - i915_ppgtt_put(ctx->ppgtt); - - free_engines(rcu_access_pointer(ctx->engines)); - mutex_destroy(&ctx->engines_mutex); - - if (ctx->timeline) - i915_timeline_put(ctx->timeline); - - kfree(ctx->name); - put_pid(ctx->pid); - - list_del(&ctx->link); - mutex_destroy(&ctx->mutex); - - kfree_rcu(ctx, rcu); -} - -static void contexts_free(struct drm_i915_private *i915) -{ - struct llist_node *freed = llist_del_all(&i915->contexts.free_list); - struct i915_gem_context *ctx, *cn; - - lockdep_assert_held(&i915->drm.struct_mutex); - - llist_for_each_entry_safe(ctx, cn, freed, free_link) - i915_gem_context_free(ctx); -} - -static void contexts_free_first(struct drm_i915_private *i915) -{ - struct i915_gem_context *ctx; - struct llist_node *freed; - - lockdep_assert_held(&i915->drm.struct_mutex); - - freed = llist_del_first(&i915->contexts.free_list); - if (!freed) - return; - - ctx = container_of(freed, typeof(*ctx), free_link); - i915_gem_context_free(ctx); -} - -static void contexts_free_worker(struct work_struct *work) -{ - struct drm_i915_private *i915 = - container_of(work, typeof(*i915), contexts.free_work); - - mutex_lock(&i915->drm.struct_mutex); - contexts_free(i915); - mutex_unlock(&i915->drm.struct_mutex); -} - -void i915_gem_context_release(struct kref *ref) -{ - struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); - struct drm_i915_private *i915 = ctx->i915; - - trace_i915_context_free(ctx); - if (llist_add(&ctx->free_link, &i915->contexts.free_list)) - queue_work(i915->wq, &i915->contexts.free_work); -} - -static void context_close(struct i915_gem_context *ctx) -{ - i915_gem_context_set_closed(ctx); - - /* - * This context will never again be assinged to HW, so we can - * reuse its ID for the next context. - */ - release_hw_id(ctx); - - /* - * The LUT uses the VMA as a backpointer to unref the object, - * so we need to clear the LUT before we close all the VMA (inside - * the ppgtt). - */ - lut_close(ctx); - - ctx->file_priv = ERR_PTR(-EBADF); - i915_gem_context_put(ctx); -} - -static u32 default_desc_template(const struct drm_i915_private *i915, - const struct i915_hw_ppgtt *ppgtt) -{ - u32 address_mode; - u32 desc; - - desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; - - address_mode = INTEL_LEGACY_32B_CONTEXT; - if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm)) - address_mode = INTEL_LEGACY_64B_CONTEXT; - desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; - - if (IS_GEN(i915, 8)) - desc |= GEN8_CTX_L3LLC_COHERENT; - - /* TODO: WaDisableLiteRestore when we start using semaphore - * signalling between Command Streamers - * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; - */ - - return desc; -} - -static struct i915_gem_context * -__create_context(struct drm_i915_private *dev_priv) -{ - struct i915_gem_context *ctx; - struct i915_gem_engines *e; - int err; - int i; - - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return ERR_PTR(-ENOMEM); - - kref_init(&ctx->ref); - list_add_tail(&ctx->link, &dev_priv->contexts.list); - ctx->i915 = dev_priv; - ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); - mutex_init(&ctx->mutex); - - mutex_init(&ctx->engines_mutex); - e = default_engines(ctx); - if (IS_ERR(e)) { - err = PTR_ERR(e); - goto err_free; - } - RCU_INIT_POINTER(ctx->engines, e); - - INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); - INIT_LIST_HEAD(&ctx->handles_list); - INIT_LIST_HEAD(&ctx->hw_id_link); - - /* NB: Mark all slices as needing a remap so that when the context first - * loads it will restore whatever remap state already exists. If there - * is no remap info, it will be a NOP. */ - ctx->remap_slice = ALL_L3_SLICES(dev_priv); - - i915_gem_context_set_bannable(ctx); - i915_gem_context_set_recoverable(ctx); - - ctx->ring_size = 4 * PAGE_SIZE; - ctx->desc_template = - default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt); - - for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) - ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; - - return ctx; - -err_free: - kfree(ctx); - return ERR_PTR(err); -} - -static struct i915_hw_ppgtt * -__set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt) -{ - struct i915_hw_ppgtt *old = ctx->ppgtt; - - ctx->ppgtt = i915_ppgtt_get(ppgtt); - ctx->desc_template = default_desc_template(ctx->i915, ppgtt); - - return old; -} - -static void __assign_ppgtt(struct i915_gem_context *ctx, - struct i915_hw_ppgtt *ppgtt) -{ - if (ppgtt == ctx->ppgtt) - return; - - ppgtt = __set_ppgtt(ctx, ppgtt); - if (ppgtt) - i915_ppgtt_put(ppgtt); -} - -static struct i915_gem_context * -i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) -{ - struct i915_gem_context *ctx; - - lockdep_assert_held(&dev_priv->drm.struct_mutex); - - if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && - !HAS_EXECLISTS(dev_priv)) - return ERR_PTR(-EINVAL); - - /* Reap the most stale context */ - contexts_free_first(dev_priv); - - ctx = __create_context(dev_priv); - if (IS_ERR(ctx)) - return ctx; - - if (HAS_FULL_PPGTT(dev_priv)) { - struct i915_hw_ppgtt *ppgtt; - - ppgtt = i915_ppgtt_create(dev_priv); - if (IS_ERR(ppgtt)) { - DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", - PTR_ERR(ppgtt)); - context_close(ctx); - return ERR_CAST(ppgtt); - } - - __assign_ppgtt(ctx, ppgtt); - i915_ppgtt_put(ppgtt); - } - - if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { - struct i915_timeline *timeline; - - timeline = i915_timeline_create(dev_priv, NULL); - if (IS_ERR(timeline)) { - context_close(ctx); - return ERR_CAST(timeline); - } - - ctx->timeline = timeline; - } - - trace_i915_context_create(ctx); - - return ctx; -} - -/** - * i915_gem_context_create_gvt - create a GVT GEM context - * @dev: drm device * - * - * This function is used to create a GVT specific GEM context. - * - * Returns: - * pointer to i915_gem_context on success, error pointer if failed - * - */ -struct i915_gem_context * -i915_gem_context_create_gvt(struct drm_device *dev) -{ - struct i915_gem_context *ctx; - int ret; - - if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) - return ERR_PTR(-ENODEV); - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ERR_PTR(ret); - - ctx = i915_gem_create_context(to_i915(dev), 0); - if (IS_ERR(ctx)) - goto out; - - ret = i915_gem_context_pin_hw_id(ctx); - if (ret) { - context_close(ctx); - ctx = ERR_PTR(ret); - goto out; - } - - ctx->file_priv = ERR_PTR(-EBADF); - i915_gem_context_set_closed(ctx); /* not user accessible */ - i915_gem_context_clear_bannable(ctx); - i915_gem_context_set_force_single_submission(ctx); - if (!USES_GUC_SUBMISSION(to_i915(dev))) - ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */ - - GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); -out: - mutex_unlock(&dev->struct_mutex); - return ctx; -} - -static void -destroy_kernel_context(struct i915_gem_context **ctxp) -{ - struct i915_gem_context *ctx; - - /* Keep the context ref so that we can free it immediately ourselves */ - ctx = i915_gem_context_get(fetch_and_zero(ctxp)); - GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); - - context_close(ctx); - i915_gem_context_free(ctx); -} - -struct i915_gem_context * -i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) -{ - struct i915_gem_context *ctx; - int err; - - ctx = i915_gem_create_context(i915, 0); - if (IS_ERR(ctx)) - return ctx; - - err = i915_gem_context_pin_hw_id(ctx); - if (err) { - destroy_kernel_context(&ctx); - return ERR_PTR(err); - } - - i915_gem_context_clear_bannable(ctx); - ctx->sched.priority = I915_USER_PRIORITY(prio); - ctx->ring_size = PAGE_SIZE; - - GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); - - return ctx; -} - -static void init_contexts(struct drm_i915_private *i915) -{ - mutex_init(&i915->contexts.mutex); - INIT_LIST_HEAD(&i915->contexts.list); - - /* Using the simple ida interface, the max is limited by sizeof(int) */ - BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); - BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX); - ida_init(&i915->contexts.hw_ida); - INIT_LIST_HEAD(&i915->contexts.hw_id_list); - - INIT_WORK(&i915->contexts.free_work, contexts_free_worker); - init_llist_head(&i915->contexts.free_list); -} - -static bool needs_preempt_context(struct drm_i915_private *i915) -{ - return HAS_EXECLISTS(i915); -} - -int i915_gem_contexts_init(struct drm_i915_private *dev_priv) -{ - struct i915_gem_context *ctx; - - /* Reassure ourselves we are only called once */ - GEM_BUG_ON(dev_priv->kernel_context); - GEM_BUG_ON(dev_priv->preempt_context); - - intel_engine_init_ctx_wa(dev_priv->engine[RCS0]); - init_contexts(dev_priv); - - /* lowest priority; idle task */ - ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN); - if (IS_ERR(ctx)) { - DRM_ERROR("Failed to create default global context\n"); - return PTR_ERR(ctx); - } - /* - * For easy recognisablity, we want the kernel context to be 0 and then - * all user contexts will have non-zero hw_id. Kernel contexts are - * permanently pinned, so that we never suffer a stall and can - * use them from any allocation context (e.g. for evicting other - * contexts and from inside the shrinker). - */ - GEM_BUG_ON(ctx->hw_id); - GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count)); - dev_priv->kernel_context = ctx; - - /* highest priority; preempting task */ - if (needs_preempt_context(dev_priv)) { - ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX); - if (!IS_ERR(ctx)) - dev_priv->preempt_context = ctx; - else - DRM_ERROR("Failed to create preempt context; disabling preemption\n"); - } - - DRM_DEBUG_DRIVER("%s context support initialized\n", - DRIVER_CAPS(dev_priv)->has_logical_contexts ? - "logical" : "fake"); - return 0; -} - -void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - lockdep_assert_held(&dev_priv->drm.struct_mutex); - - for_each_engine(engine, dev_priv, id) - intel_engine_lost_context(engine); -} - -void i915_gem_contexts_fini(struct drm_i915_private *i915) -{ - lockdep_assert_held(&i915->drm.struct_mutex); - - if (i915->preempt_context) - destroy_kernel_context(&i915->preempt_context); - destroy_kernel_context(&i915->kernel_context); - - /* Must free all deferred contexts (via flush_workqueue) first */ - GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list)); - ida_destroy(&i915->contexts.hw_ida); -} - -static int context_idr_cleanup(int id, void *p, void *data) -{ - context_close(p); - return 0; -} - -static int vm_idr_cleanup(int id, void *p, void *data) -{ - i915_ppgtt_put(p); - return 0; -} - -static int gem_context_register(struct i915_gem_context *ctx, - struct drm_i915_file_private *fpriv) -{ - int ret; - - ctx->file_priv = fpriv; - if (ctx->ppgtt) - ctx->ppgtt->vm.file = fpriv; - - ctx->pid = get_task_pid(current, PIDTYPE_PID); - ctx->name = kasprintf(GFP_KERNEL, "%s[%d]", - current->comm, pid_nr(ctx->pid)); - if (!ctx->name) { - ret = -ENOMEM; - goto err_pid; - } - - /* And finally expose ourselves to userspace via the idr */ - mutex_lock(&fpriv->context_idr_lock); - ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL); - mutex_unlock(&fpriv->context_idr_lock); - if (ret >= 0) - goto out; - - kfree(fetch_and_zero(&ctx->name)); -err_pid: - put_pid(fetch_and_zero(&ctx->pid)); -out: - return ret; -} - -int i915_gem_context_open(struct drm_i915_private *i915, - struct drm_file *file) -{ - struct drm_i915_file_private *file_priv = file->driver_priv; - struct i915_gem_context *ctx; - int err; - - mutex_init(&file_priv->context_idr_lock); - mutex_init(&file_priv->vm_idr_lock); - - idr_init(&file_priv->context_idr); - idr_init_base(&file_priv->vm_idr, 1); - - mutex_lock(&i915->drm.struct_mutex); - ctx = i915_gem_create_context(i915, 0); - mutex_unlock(&i915->drm.struct_mutex); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto err; - } - - err = gem_context_register(ctx, file_priv); - if (err < 0) - goto err_ctx; - - GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); - GEM_BUG_ON(err > 0); - - return 0; - -err_ctx: - mutex_lock(&i915->drm.struct_mutex); - context_close(ctx); - mutex_unlock(&i915->drm.struct_mutex); -err: - idr_destroy(&file_priv->vm_idr); - idr_destroy(&file_priv->context_idr); - mutex_destroy(&file_priv->vm_idr_lock); - mutex_destroy(&file_priv->context_idr_lock); - return err; -} - -void i915_gem_context_close(struct drm_file *file) -{ - struct drm_i915_file_private *file_priv = file->driver_priv; - - lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex); - - idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); - idr_destroy(&file_priv->context_idr); - mutex_destroy(&file_priv->context_idr_lock); - - idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL); - idr_destroy(&file_priv->vm_idr); - mutex_destroy(&file_priv->vm_idr_lock); -} - -int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_private *i915 = to_i915(dev); - struct drm_i915_gem_vm_control *args = data; - struct drm_i915_file_private *file_priv = file->driver_priv; - struct i915_hw_ppgtt *ppgtt; - int err; - - if (!HAS_FULL_PPGTT(i915)) - return -ENODEV; - - if (args->flags) - return -EINVAL; - - ppgtt = i915_ppgtt_create(i915); - if (IS_ERR(ppgtt)) - return PTR_ERR(ppgtt); - - ppgtt->vm.file = file_priv; - - if (args->extensions) { - err = i915_user_extensions(u64_to_user_ptr(args->extensions), - NULL, 0, - ppgtt); - if (err) - goto err_put; - } - - err = mutex_lock_interruptible(&file_priv->vm_idr_lock); - if (err) - goto err_put; - - err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL); - if (err < 0) - goto err_unlock; - - GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */ - - mutex_unlock(&file_priv->vm_idr_lock); - - args->vm_id = err; - return 0; - -err_unlock: - mutex_unlock(&file_priv->vm_idr_lock); -err_put: - i915_ppgtt_put(ppgtt); - return err; -} - -int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_file_private *file_priv = file->driver_priv; - struct drm_i915_gem_vm_control *args = data; - struct i915_hw_ppgtt *ppgtt; - int err; - u32 id; - - if (args->flags) - return -EINVAL; - - if (args->extensions) - return -EINVAL; - - id = args->vm_id; - if (!id) - return -ENOENT; - - err = mutex_lock_interruptible(&file_priv->vm_idr_lock); - if (err) - return err; - - ppgtt = idr_remove(&file_priv->vm_idr, id); - - mutex_unlock(&file_priv->vm_idr_lock); - if (!ppgtt) - return -ENOENT; - - i915_ppgtt_put(ppgtt); - return 0; -} - -struct context_barrier_task { - struct i915_active base; - void (*task)(void *data); - void *data; -}; - -static void cb_retire(struct i915_active *base) -{ - struct context_barrier_task *cb = container_of(base, typeof(*cb), base); - - if (cb->task) - cb->task(cb->data); - - i915_active_fini(&cb->base); - kfree(cb); -} - -I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); -static int context_barrier_task(struct i915_gem_context *ctx, - intel_engine_mask_t engines, - int (*emit)(struct i915_request *rq, void *data), - void (*task)(void *data), - void *data) -{ - struct drm_i915_private *i915 = ctx->i915; - struct context_barrier_task *cb; - struct i915_gem_engines_iter it; - struct intel_context *ce; - int err = 0; - - lockdep_assert_held(&i915->drm.struct_mutex); - GEM_BUG_ON(!task); - - cb = kmalloc(sizeof(*cb), GFP_KERNEL); - if (!cb) - return -ENOMEM; - - i915_active_init(i915, &cb->base, cb_retire); - i915_active_acquire(&cb->base); - - for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { - struct i915_request *rq; - - if (I915_SELFTEST_ONLY(context_barrier_inject_fault & - ce->engine->mask)) { - err = -ENXIO; - break; - } - - if (!(ce->engine->mask & engines) || !ce->state) - continue; - - rq = intel_context_create_request(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - break; - } - - err = 0; - if (emit) - err = emit(rq, data); - if (err == 0) - err = i915_active_ref(&cb->base, rq->fence.context, rq); - - i915_request_add(rq); - if (err) - break; - } - i915_gem_context_unlock_engines(ctx); - - cb->task = err ? NULL : task; /* caller needs to unwind instead */ - cb->data = data; - - i915_active_release(&cb->base); - - return err; -} - -static int get_ppgtt(struct drm_i915_file_private *file_priv, - struct i915_gem_context *ctx, - struct drm_i915_gem_context_param *args) -{ - struct i915_hw_ppgtt *ppgtt; - int ret; - - if (!ctx->ppgtt) - return -ENODEV; - - /* XXX rcu acquire? */ - ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); - if (ret) - return ret; - - ppgtt = i915_ppgtt_get(ctx->ppgtt); - mutex_unlock(&ctx->i915->drm.struct_mutex); - - ret = mutex_lock_interruptible(&file_priv->vm_idr_lock); - if (ret) - goto err_put; - - ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL); - GEM_BUG_ON(!ret); - if (ret < 0) - goto err_unlock; - - i915_ppgtt_get(ppgtt); - - args->size = 0; - args->value = ret; - - ret = 0; -err_unlock: - mutex_unlock(&file_priv->vm_idr_lock); -err_put: - i915_ppgtt_put(ppgtt); - return ret; -} - -static void set_ppgtt_barrier(void *data) -{ - struct i915_hw_ppgtt *old = data; - - if (INTEL_GEN(old->vm.i915) < 8) - gen6_ppgtt_unpin_all(old); - - i915_ppgtt_put(old); -} - -static int emit_ppgtt_update(struct i915_request *rq, void *data) -{ - struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt; - struct intel_engine_cs *engine = rq->engine; - u32 base = engine->mmio_base; - u32 *cs; - int i; - - if (i915_vm_is_4lvl(&ppgtt->vm)) { - const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4); - - cs = intel_ring_begin(rq, 6); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - *cs++ = MI_LOAD_REGISTER_IMM(2); - - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); - *cs++ = upper_32_bits(pd_daddr); - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); - *cs++ = lower_32_bits(pd_daddr); - - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); - } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { - cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES); - for (i = GEN8_3LVL_PDPES; i--; ) { - const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); - - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); - *cs++ = upper_32_bits(pd_daddr); - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); - *cs++ = lower_32_bits(pd_daddr); - } - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); - } else { - /* ppGTT is not part of the legacy context image */ - gen6_ppgtt_pin(ppgtt); - } - - return 0; -} - -static int set_ppgtt(struct drm_i915_file_private *file_priv, - struct i915_gem_context *ctx, - struct drm_i915_gem_context_param *args) -{ - struct i915_hw_ppgtt *ppgtt, *old; - int err; - - if (args->size) - return -EINVAL; - - if (!ctx->ppgtt) - return -ENODEV; - - if (upper_32_bits(args->value)) - return -ENOENT; - - err = mutex_lock_interruptible(&file_priv->vm_idr_lock); - if (err) - return err; - - ppgtt = idr_find(&file_priv->vm_idr, args->value); - if (ppgtt) - i915_ppgtt_get(ppgtt); - mutex_unlock(&file_priv->vm_idr_lock); - if (!ppgtt) - return -ENOENT; - - err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); - if (err) - goto out; - - if (ppgtt == ctx->ppgtt) - goto unlock; - - /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ - lut_close(ctx); - - old = __set_ppgtt(ctx, ppgtt); - - /* - * We need to flush any requests using the current ppgtt before - * we release it as the requests do not hold a reference themselves, - * only indirectly through the context. - */ - err = context_barrier_task(ctx, ALL_ENGINES, - emit_ppgtt_update, - set_ppgtt_barrier, - old); - if (err) { - ctx->ppgtt = old; - ctx->desc_template = default_desc_template(ctx->i915, old); - i915_ppgtt_put(ppgtt); - } - -unlock: - mutex_unlock(&ctx->i915->drm.struct_mutex); - -out: - i915_ppgtt_put(ppgtt); - return err; -} - -static int gen8_emit_rpcs_config(struct i915_request *rq, - struct intel_context *ce, - struct intel_sseu sseu) -{ - u64 offset; - u32 *cs; - - cs = intel_ring_begin(rq, 4); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - offset = i915_ggtt_offset(ce->state) + - LRC_STATE_PN * PAGE_SIZE + - (CTX_R_PWR_CLK_STATE + 1) * 4; - - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = lower_32_bits(offset); - *cs++ = upper_32_bits(offset); - *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu); - - intel_ring_advance(rq, cs); - - return 0; -} - -static int -gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) -{ - struct i915_request *rq; - int ret; - - lockdep_assert_held(&ce->pin_mutex); - - /* - * If the context is not idle, we have to submit an ordered request to - * modify its context image via the kernel context (writing to our own - * image, or into the registers directory, does not stick). Pristine - * and idle contexts will be configured on pinning. - */ - if (!intel_context_is_pinned(ce)) - return 0; - - rq = i915_request_create(ce->engine->kernel_context); - if (IS_ERR(rq)) - return PTR_ERR(rq); - - /* Queue this switch after all other activity by this context. */ - ret = i915_active_request_set(&ce->ring->timeline->last_request, rq); - if (ret) - goto out_add; - - ret = gen8_emit_rpcs_config(rq, ce, sseu); - if (ret) - goto out_add; - - /* - * Guarantee context image and the timeline remains pinned until the - * modifying request is retired by setting the ce activity tracker. - * - * But we only need to take one pin on the account of it. Or in other - * words transfer the pinned ce object to tracked active request. - */ - if (!i915_active_request_isset(&ce->active_tracker)) - __intel_context_pin(ce); - __i915_active_request_set(&ce->active_tracker, rq); - -out_add: - i915_request_add(rq); - return ret; -} - -static int -__intel_context_reconfigure_sseu(struct intel_context *ce, - struct intel_sseu sseu) -{ - int ret; - - GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8); - - ret = intel_context_lock_pinned(ce); - if (ret) - return ret; - - /* Nothing to do if unmodified. */ - if (!memcmp(&ce->sseu, &sseu, sizeof(sseu))) - goto unlock; - - ret = gen8_modify_rpcs(ce, sseu); - if (!ret) - ce->sseu = sseu; - -unlock: - intel_context_unlock_pinned(ce); - return ret; -} - -static int -intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu) -{ - struct drm_i915_private *i915 = ce->gem_context->i915; - int ret; - - ret = mutex_lock_interruptible(&i915->drm.struct_mutex); - if (ret) - return ret; - - ret = __intel_context_reconfigure_sseu(ce, sseu); - - mutex_unlock(&i915->drm.struct_mutex); - - return ret; -} - -static int -user_to_context_sseu(struct drm_i915_private *i915, - const struct drm_i915_gem_context_param_sseu *user, - struct intel_sseu *context) -{ - const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu; - - /* No zeros in any field. */ - if (!user->slice_mask || !user->subslice_mask || - !user->min_eus_per_subslice || !user->max_eus_per_subslice) - return -EINVAL; - - /* Max > min. */ - if (user->max_eus_per_subslice < user->min_eus_per_subslice) - return -EINVAL; - - /* - * Some future proofing on the types since the uAPI is wider than the - * current internal implementation. - */ - if (overflows_type(user->slice_mask, context->slice_mask) || - overflows_type(user->subslice_mask, context->subslice_mask) || - overflows_type(user->min_eus_per_subslice, - context->min_eus_per_subslice) || - overflows_type(user->max_eus_per_subslice, - context->max_eus_per_subslice)) - return -EINVAL; - - /* Check validity against hardware. */ - if (user->slice_mask & ~device->slice_mask) - return -EINVAL; - - if (user->subslice_mask & ~device->subslice_mask[0]) - return -EINVAL; - - if (user->max_eus_per_subslice > device->max_eus_per_subslice) - return -EINVAL; - - context->slice_mask = user->slice_mask; - context->subslice_mask = user->subslice_mask; - context->min_eus_per_subslice = user->min_eus_per_subslice; - context->max_eus_per_subslice = user->max_eus_per_subslice; - - /* Part specific restrictions. */ - if (IS_GEN(i915, 11)) { - unsigned int hw_s = hweight8(device->slice_mask); - unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); - unsigned int req_s = hweight8(context->slice_mask); - unsigned int req_ss = hweight8(context->subslice_mask); - - /* - * Only full subslice enablement is possible if more than one - * slice is turned on. - */ - if (req_s > 1 && req_ss != hw_ss_per_s) - return -EINVAL; - - /* - * If more than four (SScount bitfield limit) subslices are - * requested then the number has to be even. - */ - if (req_ss > 4 && (req_ss & 1)) - return -EINVAL; - - /* - * If only one slice is enabled and subslice count is below the - * device full enablement, it must be at most half of the all - * available subslices. - */ - if (req_s == 1 && req_ss < hw_ss_per_s && - req_ss > (hw_ss_per_s / 2)) - return -EINVAL; - - /* ABI restriction - VME use case only. */ - - /* All slices or one slice only. */ - if (req_s != 1 && req_s != hw_s) - return -EINVAL; - - /* - * Half subslices or full enablement only when one slice is - * enabled. - */ - if (req_s == 1 && - (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) - return -EINVAL; - - /* No EU configuration changes. */ - if ((user->min_eus_per_subslice != - device->max_eus_per_subslice) || - (user->max_eus_per_subslice != - device->max_eus_per_subslice)) - return -EINVAL; - } - - return 0; -} - -static int set_sseu(struct i915_gem_context *ctx, - struct drm_i915_gem_context_param *args) -{ - struct drm_i915_private *i915 = ctx->i915; - struct drm_i915_gem_context_param_sseu user_sseu; - struct intel_context *ce; - struct intel_sseu sseu; - unsigned long lookup; - int ret; - - if (args->size < sizeof(user_sseu)) - return -EINVAL; - - if (!IS_GEN(i915, 11)) - return -ENODEV; - - if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), - sizeof(user_sseu))) - return -EFAULT; - - if (user_sseu.rsvd) - return -EINVAL; - - if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) - return -EINVAL; - - lookup = 0; - if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) - lookup |= LOOKUP_USER_INDEX; - - ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); - if (IS_ERR(ce)) - return PTR_ERR(ce); - - /* Only render engine supports RPCS configuration. */ - if (ce->engine->class != RENDER_CLASS) { - ret = -ENODEV; - goto out_ce; - } - - ret = user_to_context_sseu(i915, &user_sseu, &sseu); - if (ret) - goto out_ce; - - ret = intel_context_reconfigure_sseu(ce, sseu); - if (ret) - goto out_ce; - - args->size = sizeof(user_sseu); - -out_ce: - intel_context_put(ce); - return ret; -} - -struct set_engines { - struct i915_gem_context *ctx; - struct i915_gem_engines *engines; -}; - -static int -set_engines__load_balance(struct i915_user_extension __user *base, void *data) -{ - struct i915_context_engines_load_balance __user *ext = - container_of_user(base, typeof(*ext), base); - const struct set_engines *set = data; - struct intel_engine_cs *stack[16]; - struct intel_engine_cs **siblings; - struct intel_context *ce; - u16 num_siblings, idx; - unsigned int n; - int err; - - if (!HAS_EXECLISTS(set->ctx->i915)) - return -ENODEV; - - if (USES_GUC_SUBMISSION(set->ctx->i915)) - return -ENODEV; /* not implement yet */ - - if (get_user(idx, &ext->engine_index)) - return -EFAULT; - - if (idx >= set->engines->num_engines) { - DRM_DEBUG("Invalid placement value, %d >= %d\n", - idx, set->engines->num_engines); - return -EINVAL; - } - - idx = array_index_nospec(idx, set->engines->num_engines); - if (set->engines->engines[idx]) { - DRM_DEBUG("Invalid placement[%d], already occupied\n", idx); - return -EEXIST; - } - - if (get_user(num_siblings, &ext->num_siblings)) - return -EFAULT; - - err = check_user_mbz(&ext->flags); - if (err) - return err; - - err = check_user_mbz(&ext->mbz64); - if (err) - return err; - - siblings = stack; - if (num_siblings > ARRAY_SIZE(stack)) { - siblings = kmalloc_array(num_siblings, - sizeof(*siblings), - GFP_KERNEL); - if (!siblings) - return -ENOMEM; - } - - for (n = 0; n < num_siblings; n++) { - struct i915_engine_class_instance ci; - - if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { - err = -EFAULT; - goto out_siblings; - } - - siblings[n] = intel_engine_lookup_user(set->ctx->i915, - ci.engine_class, - ci.engine_instance); - if (!siblings[n]) { - DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n", - n, ci.engine_class, ci.engine_instance); - err = -EINVAL; - goto out_siblings; - } - } - - ce = intel_execlists_create_virtual(set->ctx, siblings, n); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - goto out_siblings; - } - - if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { - intel_context_put(ce); - err = -EEXIST; - goto out_siblings; - } - -out_siblings: - if (siblings != stack) - kfree(siblings); - - return err; -} - -static int -set_engines__bond(struct i915_user_extension __user *base, void *data) -{ - struct i915_context_engines_bond __user *ext = - container_of_user(base, typeof(*ext), base); - const struct set_engines *set = data; - struct i915_engine_class_instance ci; - struct intel_engine_cs *virtual; - struct intel_engine_cs *master; - u16 idx, num_bonds; - int err, n; - - if (get_user(idx, &ext->virtual_index)) - return -EFAULT; - - if (idx >= set->engines->num_engines) { - DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n", - idx, set->engines->num_engines); - return -EINVAL; - } - - idx = array_index_nospec(idx, set->engines->num_engines); - if (!set->engines->engines[idx]) { - DRM_DEBUG("Invalid engine at %d\n", idx); - return -EINVAL; - } - virtual = set->engines->engines[idx]->engine; - - err = check_user_mbz(&ext->flags); - if (err) - return err; - - for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { - err = check_user_mbz(&ext->mbz64[n]); - if (err) - return err; - } - - if (copy_from_user(&ci, &ext->master, sizeof(ci))) - return -EFAULT; - - master = intel_engine_lookup_user(set->ctx->i915, - ci.engine_class, ci.engine_instance); - if (!master) { - DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n", - ci.engine_class, ci.engine_instance); - return -EINVAL; - } - - if (get_user(num_bonds, &ext->num_bonds)) - return -EFAULT; - - for (n = 0; n < num_bonds; n++) { - struct intel_engine_cs *bond; - - if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) - return -EFAULT; - - bond = intel_engine_lookup_user(set->ctx->i915, - ci.engine_class, - ci.engine_instance); - if (!bond) { - DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", - n, ci.engine_class, ci.engine_instance); - return -EINVAL; - } - - /* - * A non-virtual engine has no siblings to choose between; and - * a submit fence will always be directed to the one engine. - */ - if (intel_engine_is_virtual(virtual)) { - err = intel_virtual_engine_attach_bond(virtual, - master, - bond); - if (err) - return err; - } - } - - return 0; -} - -static const i915_user_extension_fn set_engines__extensions[] = { - [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance, - [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond, -}; - -static int -set_engines(struct i915_gem_context *ctx, - const struct drm_i915_gem_context_param *args) -{ - struct i915_context_param_engines __user *user = - u64_to_user_ptr(args->value); - struct set_engines set = { .ctx = ctx }; - unsigned int num_engines, n; - u64 extensions; - int err; - - if (!args->size) { /* switch back to legacy user_ring_map */ - if (!i915_gem_context_user_engines(ctx)) - return 0; - - set.engines = default_engines(ctx); - if (IS_ERR(set.engines)) - return PTR_ERR(set.engines); - - goto replace; - } - - BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines))); - if (args->size < sizeof(*user) || - !IS_ALIGNED(args->size, sizeof(*user->engines))) { - DRM_DEBUG("Invalid size for engine array: %d\n", - args->size); - return -EINVAL; - } - - /* - * Note that I915_EXEC_RING_MASK limits execbuf to only using the - * first 64 engines defined here. - */ - num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); - - set.engines = kmalloc(struct_size(set.engines, engines, num_engines), - GFP_KERNEL); - if (!set.engines) - return -ENOMEM; - - set.engines->i915 = ctx->i915; - for (n = 0; n < num_engines; n++) { - struct i915_engine_class_instance ci; - struct intel_engine_cs *engine; - - if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { - __free_engines(set.engines, n); - return -EFAULT; - } - - if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && - ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) { - set.engines->engines[n] = NULL; - continue; - } - - engine = intel_engine_lookup_user(ctx->i915, - ci.engine_class, - ci.engine_instance); - if (!engine) { - DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n", - n, ci.engine_class, ci.engine_instance); - __free_engines(set.engines, n); - return -ENOENT; - } - - set.engines->engines[n] = intel_context_create(ctx, engine); - if (!set.engines->engines[n]) { - __free_engines(set.engines, n); - return -ENOMEM; - } - } - set.engines->num_engines = num_engines; - - err = -EFAULT; - if (!get_user(extensions, &user->extensions)) - err = i915_user_extensions(u64_to_user_ptr(extensions), - set_engines__extensions, - ARRAY_SIZE(set_engines__extensions), - &set); - if (err) { - free_engines(set.engines); - return err; - } - -replace: - mutex_lock(&ctx->engines_mutex); - if (args->size) - i915_gem_context_set_user_engines(ctx); - else - i915_gem_context_clear_user_engines(ctx); - rcu_swap_protected(ctx->engines, set.engines, 1); - mutex_unlock(&ctx->engines_mutex); - - INIT_RCU_WORK(&set.engines->rcu, free_engines_rcu); - queue_rcu_work(system_wq, &set.engines->rcu); - - return 0; -} - -static struct i915_gem_engines * -__copy_engines(struct i915_gem_engines *e) -{ - struct i915_gem_engines *copy; - unsigned int n; - - copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); - if (!copy) - return ERR_PTR(-ENOMEM); - - copy->i915 = e->i915; - for (n = 0; n < e->num_engines; n++) { - if (e->engines[n]) - copy->engines[n] = intel_context_get(e->engines[n]); - else - copy->engines[n] = NULL; - } - copy->num_engines = n; - - return copy; -} - -static int -get_engines(struct i915_gem_context *ctx, - struct drm_i915_gem_context_param *args) -{ - struct i915_context_param_engines __user *user; - struct i915_gem_engines *e; - size_t n, count, size; - int err = 0; - - err = mutex_lock_interruptible(&ctx->engines_mutex); - if (err) - return err; - - e = NULL; - if (i915_gem_context_user_engines(ctx)) - e = __copy_engines(i915_gem_context_engines(ctx)); - mutex_unlock(&ctx->engines_mutex); - if (IS_ERR_OR_NULL(e)) { - args->size = 0; - return PTR_ERR_OR_ZERO(e); - } - - count = e->num_engines; - - /* Be paranoid in case we have an impedance mismatch */ - if (!check_struct_size(user, engines, count, &size)) { - err = -EINVAL; - goto err_free; - } - if (overflows_type(size, args->size)) { - err = -EINVAL; - goto err_free; - } - - if (!args->size) { - args->size = size; - goto err_free; - } - - if (args->size < size) { - err = -EINVAL; - goto err_free; - } - - user = u64_to_user_ptr(args->value); - if (!access_ok(user, size)) { - err = -EFAULT; - goto err_free; - } - - if (put_user(0, &user->extensions)) { - err = -EFAULT; - goto err_free; - } - - for (n = 0; n < count; n++) { - struct i915_engine_class_instance ci = { - .engine_class = I915_ENGINE_CLASS_INVALID, - .engine_instance = I915_ENGINE_CLASS_INVALID_NONE, - }; - - if (e->engines[n]) { - ci.engine_class = e->engines[n]->engine->uabi_class; - ci.engine_instance = e->engines[n]->engine->instance; - } - - if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { - err = -EFAULT; - goto err_free; - } - } - - args->size = size; - -err_free: - INIT_RCU_WORK(&e->rcu, free_engines_rcu); - queue_rcu_work(system_wq, &e->rcu); - return err; -} - -static int ctx_setparam(struct drm_i915_file_private *fpriv, - struct i915_gem_context *ctx, - struct drm_i915_gem_context_param *args) -{ - int ret = 0; - - switch (args->param) { - case I915_CONTEXT_PARAM_NO_ZEROMAP: - if (args->size) - ret = -EINVAL; - else if (args->value) - set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); - else - clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); - break; - - case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: - if (args->size) - ret = -EINVAL; - else if (args->value) - i915_gem_context_set_no_error_capture(ctx); - else - i915_gem_context_clear_no_error_capture(ctx); - break; - - case I915_CONTEXT_PARAM_BANNABLE: - if (args->size) - ret = -EINVAL; - else if (!capable(CAP_SYS_ADMIN) && !args->value) - ret = -EPERM; - else if (args->value) - i915_gem_context_set_bannable(ctx); - else - i915_gem_context_clear_bannable(ctx); - break; - - case I915_CONTEXT_PARAM_RECOVERABLE: - if (args->size) - ret = -EINVAL; - else if (args->value) - i915_gem_context_set_recoverable(ctx); - else - i915_gem_context_clear_recoverable(ctx); - break; - - case I915_CONTEXT_PARAM_PRIORITY: - { - s64 priority = args->value; - - if (args->size) - ret = -EINVAL; - else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) - ret = -ENODEV; - else if (priority > I915_CONTEXT_MAX_USER_PRIORITY || - priority < I915_CONTEXT_MIN_USER_PRIORITY) - ret = -EINVAL; - else if (priority > I915_CONTEXT_DEFAULT_PRIORITY && - !capable(CAP_SYS_NICE)) - ret = -EPERM; - else - ctx->sched.priority = - I915_USER_PRIORITY(priority); - } - break; - - case I915_CONTEXT_PARAM_SSEU: - ret = set_sseu(ctx, args); - break; - - case I915_CONTEXT_PARAM_VM: - ret = set_ppgtt(fpriv, ctx, args); - break; - - case I915_CONTEXT_PARAM_ENGINES: - ret = set_engines(ctx, args); - break; - - case I915_CONTEXT_PARAM_BAN_PERIOD: - default: - ret = -EINVAL; - break; - } - - return ret; -} - -struct create_ext { - struct i915_gem_context *ctx; - struct drm_i915_file_private *fpriv; -}; - -static int create_setparam(struct i915_user_extension __user *ext, void *data) -{ - struct drm_i915_gem_context_create_ext_setparam local; - const struct create_ext *arg = data; - - if (copy_from_user(&local, ext, sizeof(local))) - return -EFAULT; - - if (local.param.ctx_id) - return -EINVAL; - - return ctx_setparam(arg->fpriv, arg->ctx, &local.param); -} - -static int clone_engines(struct i915_gem_context *dst, - struct i915_gem_context *src) -{ - struct i915_gem_engines *e = i915_gem_context_lock_engines(src); - struct i915_gem_engines *clone; - bool user_engines; - unsigned long n; - - clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); - if (!clone) - goto err_unlock; - - clone->i915 = dst->i915; - for (n = 0; n < e->num_engines; n++) { - struct intel_engine_cs *engine; - - if (!e->engines[n]) { - clone->engines[n] = NULL; - continue; - } - engine = e->engines[n]->engine; - - /* - * Virtual engines are singletons; they can only exist - * inside a single context, because they embed their - * HW context... As each virtual context implies a single - * timeline (each engine can only dequeue a single request - * at any time), it would be surprising for two contexts - * to use the same engine. So let's create a copy of - * the virtual engine instead. - */ - if (intel_engine_is_virtual(engine)) - clone->engines[n] = - intel_execlists_clone_virtual(dst, engine); - else - clone->engines[n] = intel_context_create(dst, engine); - if (IS_ERR_OR_NULL(clone->engines[n])) { - __free_engines(clone, n); - goto err_unlock; - } - } - clone->num_engines = n; - - user_engines = i915_gem_context_user_engines(src); - i915_gem_context_unlock_engines(src); - - free_engines(dst->engines); - RCU_INIT_POINTER(dst->engines, clone); - if (user_engines) - i915_gem_context_set_user_engines(dst); - else - i915_gem_context_clear_user_engines(dst); - return 0; - -err_unlock: - i915_gem_context_unlock_engines(src); - return -ENOMEM; -} - -static int clone_flags(struct i915_gem_context *dst, - struct i915_gem_context *src) -{ - dst->user_flags = src->user_flags; - return 0; -} - -static int clone_schedattr(struct i915_gem_context *dst, - struct i915_gem_context *src) -{ - dst->sched = src->sched; - return 0; -} - -static int clone_sseu(struct i915_gem_context *dst, - struct i915_gem_context *src) -{ - struct i915_gem_engines *e = i915_gem_context_lock_engines(src); - struct i915_gem_engines *clone; - unsigned long n; - int err; - - clone = dst->engines; /* no locking required; sole access */ - if (e->num_engines != clone->num_engines) { - err = -EINVAL; - goto unlock; - } - - for (n = 0; n < e->num_engines; n++) { - struct intel_context *ce = e->engines[n]; - - if (clone->engines[n]->engine->class != ce->engine->class) { - /* Must have compatible engine maps! */ - err = -EINVAL; - goto unlock; - } - - /* serialises with set_sseu */ - err = intel_context_lock_pinned(ce); - if (err) - goto unlock; - - clone->engines[n]->sseu = ce->sseu; - intel_context_unlock_pinned(ce); - } - - err = 0; -unlock: - i915_gem_context_unlock_engines(src); - return err; -} - -static int clone_timeline(struct i915_gem_context *dst, - struct i915_gem_context *src) -{ - if (src->timeline) { - GEM_BUG_ON(src->timeline == dst->timeline); - - if (dst->timeline) - i915_timeline_put(dst->timeline); - dst->timeline = i915_timeline_get(src->timeline); - } - - return 0; -} - -static int clone_vm(struct i915_gem_context *dst, - struct i915_gem_context *src) -{ - struct i915_hw_ppgtt *ppgtt; - - rcu_read_lock(); - do { - ppgtt = READ_ONCE(src->ppgtt); - if (!ppgtt) - break; - - if (!kref_get_unless_zero(&ppgtt->ref)) - continue; - - /* - * This ppgtt may have be reallocated between - * the read and the kref, and reassigned to a third - * context. In order to avoid inadvertent sharing - * of this ppgtt with that third context (and not - * src), we have to confirm that we have the same - * ppgtt after passing through the strong memory - * barrier implied by a successful - * kref_get_unless_zero(). - * - * Once we have acquired the current ppgtt of src, - * we no longer care if it is released from src, as - * it cannot be reallocated elsewhere. - */ - - if (ppgtt == READ_ONCE(src->ppgtt)) - break; - - i915_ppgtt_put(ppgtt); - } while (1); - rcu_read_unlock(); - - if (ppgtt) { - __assign_ppgtt(dst, ppgtt); - i915_ppgtt_put(ppgtt); - } - - return 0; -} - -static int create_clone(struct i915_user_extension __user *ext, void *data) -{ - static int (* const fn[])(struct i915_gem_context *dst, - struct i915_gem_context *src) = { -#define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y - MAP(ENGINES, clone_engines), - MAP(FLAGS, clone_flags), - MAP(SCHEDATTR, clone_schedattr), - MAP(SSEU, clone_sseu), - MAP(TIMELINE, clone_timeline), - MAP(VM, clone_vm), -#undef MAP - }; - struct drm_i915_gem_context_create_ext_clone local; - const struct create_ext *arg = data; - struct i915_gem_context *dst = arg->ctx; - struct i915_gem_context *src; - int err, bit; - - if (copy_from_user(&local, ext, sizeof(local))) - return -EFAULT; - - BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) != - I915_CONTEXT_CLONE_UNKNOWN); - - if (local.flags & I915_CONTEXT_CLONE_UNKNOWN) - return -EINVAL; - - if (local.rsvd) - return -EINVAL; - - rcu_read_lock(); - src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id); - rcu_read_unlock(); - if (!src) - return -ENOENT; - - GEM_BUG_ON(src == dst); - - for (bit = 0; bit < ARRAY_SIZE(fn); bit++) { - if (!(local.flags & BIT(bit))) - continue; - - err = fn[bit](dst, src); - if (err) - return err; - } - - return 0; -} - -static const i915_user_extension_fn create_extensions[] = { - [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, - [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone, -}; - -static bool client_is_banned(struct drm_i915_file_private *file_priv) -{ - return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; -} - -int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_private *i915 = to_i915(dev); - struct drm_i915_gem_context_create_ext *args = data; - struct create_ext ext_data; - int ret; - - if (!DRIVER_CAPS(i915)->has_logical_contexts) - return -ENODEV; - - if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) - return -EINVAL; - - ret = i915_terminally_wedged(i915); - if (ret) - return ret; - - ext_data.fpriv = file->driver_priv; - if (client_is_banned(ext_data.fpriv)) { - DRM_DEBUG("client %s[%d] banned from creating ctx\n", - current->comm, - pid_nr(get_task_pid(current, PIDTYPE_PID))); - return -EIO; - } - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - ext_data.ctx = i915_gem_create_context(i915, args->flags); - mutex_unlock(&dev->struct_mutex); - if (IS_ERR(ext_data.ctx)) - return PTR_ERR(ext_data.ctx); - - if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { - ret = i915_user_extensions(u64_to_user_ptr(args->extensions), - create_extensions, - ARRAY_SIZE(create_extensions), - &ext_data); - if (ret) - goto err_ctx; - } - - ret = gem_context_register(ext_data.ctx, ext_data.fpriv); - if (ret < 0) - goto err_ctx; - - args->ctx_id = ret; - DRM_DEBUG("HW context %d created\n", args->ctx_id); - - return 0; - -err_ctx: - mutex_lock(&dev->struct_mutex); - context_close(ext_data.ctx); - mutex_unlock(&dev->struct_mutex); - return ret; -} - -int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_gem_context_destroy *args = data; - struct drm_i915_file_private *file_priv = file->driver_priv; - struct i915_gem_context *ctx; - - if (args->pad != 0) - return -EINVAL; - - if (!args->ctx_id) - return -ENOENT; - - if (mutex_lock_interruptible(&file_priv->context_idr_lock)) - return -EINTR; - - ctx = idr_remove(&file_priv->context_idr, args->ctx_id); - mutex_unlock(&file_priv->context_idr_lock); - if (!ctx) - return -ENOENT; - - mutex_lock(&dev->struct_mutex); - context_close(ctx); - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -static int get_sseu(struct i915_gem_context *ctx, - struct drm_i915_gem_context_param *args) -{ - struct drm_i915_gem_context_param_sseu user_sseu; - struct intel_context *ce; - unsigned long lookup; - int err; - - if (args->size == 0) - goto out; - else if (args->size < sizeof(user_sseu)) - return -EINVAL; - - if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), - sizeof(user_sseu))) - return -EFAULT; - - if (user_sseu.rsvd) - return -EINVAL; - - if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) - return -EINVAL; - - lookup = 0; - if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) - lookup |= LOOKUP_USER_INDEX; - - ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); - if (IS_ERR(ce)) - return PTR_ERR(ce); - - err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ - if (err) { - intel_context_put(ce); - return err; - } - - user_sseu.slice_mask = ce->sseu.slice_mask; - user_sseu.subslice_mask = ce->sseu.subslice_mask; - user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; - user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; - - intel_context_unlock_pinned(ce); - intel_context_put(ce); - - if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, - sizeof(user_sseu))) - return -EFAULT; - -out: - args->size = sizeof(user_sseu); - - return 0; -} - -int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_file_private *file_priv = file->driver_priv; - struct drm_i915_gem_context_param *args = data; - struct i915_gem_context *ctx; - int ret = 0; - - ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (!ctx) - return -ENOENT; - - switch (args->param) { - case I915_CONTEXT_PARAM_NO_ZEROMAP: - args->size = 0; - args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); - break; - - case I915_CONTEXT_PARAM_GTT_SIZE: - args->size = 0; - if (ctx->ppgtt) - args->value = ctx->ppgtt->vm.total; - else if (to_i915(dev)->mm.aliasing_ppgtt) - args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total; - else - args->value = to_i915(dev)->ggtt.vm.total; - break; - - case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: - args->size = 0; - args->value = i915_gem_context_no_error_capture(ctx); - break; - - case I915_CONTEXT_PARAM_BANNABLE: - args->size = 0; - args->value = i915_gem_context_is_bannable(ctx); - break; - - case I915_CONTEXT_PARAM_RECOVERABLE: - args->size = 0; - args->value = i915_gem_context_is_recoverable(ctx); - break; - - case I915_CONTEXT_PARAM_PRIORITY: - args->size = 0; - args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; - break; - - case I915_CONTEXT_PARAM_SSEU: - ret = get_sseu(ctx, args); - break; - - case I915_CONTEXT_PARAM_VM: - ret = get_ppgtt(file_priv, ctx, args); - break; - - case I915_CONTEXT_PARAM_ENGINES: - ret = get_engines(ctx, args); - break; - - case I915_CONTEXT_PARAM_BAN_PERIOD: - default: - ret = -EINVAL; - break; - } - - i915_gem_context_put(ctx); - return ret; -} - -int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_file_private *file_priv = file->driver_priv; - struct drm_i915_gem_context_param *args = data; - struct i915_gem_context *ctx; - int ret; - - ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (!ctx) - return -ENOENT; - - ret = ctx_setparam(file_priv, ctx, args); - - i915_gem_context_put(ctx); - return ret; -} - -int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, - void *data, struct drm_file *file) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_reset_stats *args = data; - struct i915_gem_context *ctx; - int ret; - - if (args->flags || args->pad) - return -EINVAL; - - ret = -ENOENT; - rcu_read_lock(); - ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id); - if (!ctx) - goto out; - - /* - * We opt for unserialised reads here. This may result in tearing - * in the extremely unlikely event of a GPU hang on this context - * as we are querying them. If we need that extra layer of protection, - * we should wrap the hangstats with a seqlock. - */ - - if (capable(CAP_SYS_ADMIN)) - args->reset_count = i915_reset_count(&dev_priv->gpu_error); - else - args->reset_count = 0; - - args->batch_active = atomic_read(&ctx->guilty_count); - args->batch_pending = atomic_read(&ctx->active_count); - - ret = 0; -out: - rcu_read_unlock(); - return ret; -} - -int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx) -{ - struct drm_i915_private *i915 = ctx->i915; - int err = 0; - - mutex_lock(&i915->contexts.mutex); - - GEM_BUG_ON(i915_gem_context_is_closed(ctx)); - - if (list_empty(&ctx->hw_id_link)) { - GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count)); - - err = assign_hw_id(i915, &ctx->hw_id); - if (err) - goto out_unlock; - - list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list); - } - - GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u); - atomic_inc(&ctx->hw_id_pin_count); - -out_unlock: - mutex_unlock(&i915->contexts.mutex); - return err; -} - -/* GEM context-engines iterator: for_each_gem_engine() */ -struct intel_context * -i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) -{ - const struct i915_gem_engines *e = it->engines; - struct intel_context *ctx; - - do { - if (it->idx >= e->num_engines) - return NULL; - - ctx = e->engines[it->idx++]; - } while (!ctx); - - return ctx; -} - -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftests/mock_context.c" -#include "selftests/i915_gem_context.c" -#endif - -static void i915_global_gem_context_shrink(void) -{ - kmem_cache_shrink(global.slab_luts); -} - -static void i915_global_gem_context_exit(void) -{ - kmem_cache_destroy(global.slab_luts); -} - -static struct i915_global_gem_context global = { { - .shrink = i915_global_gem_context_shrink, - .exit = i915_global_gem_context_exit, -} }; - -int __init i915_global_gem_context_init(void) -{ - global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); - if (!global.slab_luts) - return -ENOMEM; - - i915_global_register(&global.base); - return 0; -} diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h deleted file mode 100644 index 9ad4a6362438..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef __I915_GEM_CONTEXT_H__ -#define __I915_GEM_CONTEXT_H__ - -#include "i915_gem_context_types.h" - -#include "gt/intel_context.h" - -#include "i915_gem.h" -#include "i915_scheduler.h" -#include "intel_device_info.h" - -struct drm_device; -struct drm_file; - -static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx) -{ - return test_bit(CONTEXT_CLOSED, &ctx->flags); -} - -static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx) -{ - GEM_BUG_ON(i915_gem_context_is_closed(ctx)); - set_bit(CONTEXT_CLOSED, &ctx->flags); -} - -static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx) -{ - return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags); -} - -static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx) -{ - set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags); -} - -static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx) -{ - clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags); -} - -static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx) -{ - return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags); -} - -static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx) -{ - set_bit(UCONTEXT_BANNABLE, &ctx->user_flags); -} - -static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx) -{ - clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags); -} - -static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx) -{ - return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); -} - -static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx) -{ - set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); -} - -static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx) -{ - clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); -} - -static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx) -{ - return test_bit(CONTEXT_BANNED, &ctx->flags); -} - -static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx) -{ - set_bit(CONTEXT_BANNED, &ctx->flags); -} - -static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx) -{ - return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags); -} - -static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx) -{ - __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags); -} - -static inline bool -i915_gem_context_user_engines(const struct i915_gem_context *ctx) -{ - return test_bit(CONTEXT_USER_ENGINES, &ctx->flags); -} - -static inline void -i915_gem_context_set_user_engines(struct i915_gem_context *ctx) -{ - set_bit(CONTEXT_USER_ENGINES, &ctx->flags); -} - -static inline void -i915_gem_context_clear_user_engines(struct i915_gem_context *ctx) -{ - clear_bit(CONTEXT_USER_ENGINES, &ctx->flags); -} - -int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx); -static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx) -{ - if (atomic_inc_not_zero(&ctx->hw_id_pin_count)) - return 0; - - return __i915_gem_context_pin_hw_id(ctx); -} - -static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx) -{ - GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u); - atomic_dec(&ctx->hw_id_pin_count); -} - -static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx) -{ - return !ctx->file_priv; -} - -/* i915_gem_context.c */ -int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv); -void i915_gem_contexts_lost(struct drm_i915_private *dev_priv); -void i915_gem_contexts_fini(struct drm_i915_private *dev_priv); - -int i915_gem_context_open(struct drm_i915_private *i915, - struct drm_file *file); -void i915_gem_context_close(struct drm_file *file); - -void i915_gem_context_release(struct kref *ctx_ref); -struct i915_gem_context * -i915_gem_context_create_gvt(struct drm_device *dev); - -int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); -int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); - -int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); -int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); -int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); - -struct i915_gem_context * -i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio); - -static inline struct i915_gem_context * -i915_gem_context_get(struct i915_gem_context *ctx) -{ - kref_get(&ctx->ref); - return ctx; -} - -static inline void i915_gem_context_put(struct i915_gem_context *ctx) -{ - kref_put(&ctx->ref, i915_gem_context_release); -} - -static inline struct i915_gem_engines * -i915_gem_context_engines(struct i915_gem_context *ctx) -{ - return rcu_dereference_protected(ctx->engines, - lockdep_is_held(&ctx->engines_mutex)); -} - -static inline struct i915_gem_engines * -i915_gem_context_lock_engines(struct i915_gem_context *ctx) - __acquires(&ctx->engines_mutex) -{ - mutex_lock(&ctx->engines_mutex); - return i915_gem_context_engines(ctx); -} - -static inline void -i915_gem_context_unlock_engines(struct i915_gem_context *ctx) - __releases(&ctx->engines_mutex) -{ - mutex_unlock(&ctx->engines_mutex); -} - -static inline struct intel_context * -i915_gem_context_lookup_engine(struct i915_gem_context *ctx, unsigned int idx) -{ - return i915_gem_context_engines(ctx)->engines[idx]; -} - -static inline struct intel_context * -i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx) -{ - struct intel_context *ce = ERR_PTR(-EINVAL); - - rcu_read_lock(); { - struct i915_gem_engines *e = rcu_dereference(ctx->engines); - if (likely(idx < e->num_engines && e->engines[idx])) - ce = intel_context_get(e->engines[idx]); - } rcu_read_unlock(); - - return ce; -} - -static inline void -i915_gem_engines_iter_init(struct i915_gem_engines_iter *it, - struct i915_gem_engines *engines) -{ - GEM_BUG_ON(!engines); - it->engines = engines; - it->idx = 0; -} - -struct intel_context * -i915_gem_engines_iter_next(struct i915_gem_engines_iter *it); - -#define for_each_gem_engine(ce, engines, it) \ - for (i915_gem_engines_iter_init(&(it), (engines)); \ - ((ce) = i915_gem_engines_iter_next(&(it)));) - -struct i915_lut_handle *i915_lut_handle_alloc(void); -void i915_lut_handle_free(struct i915_lut_handle *lut); - -#endif /* !__I915_GEM_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_context_types.h b/drivers/gpu/drm/i915/i915_gem_context_types.h deleted file mode 100644 index fb965ded2508..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_context_types.h +++ /dev/null @@ -1,208 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2019 Intel Corporation - */ - -#ifndef __I915_GEM_CONTEXT_TYPES_H__ -#define __I915_GEM_CONTEXT_TYPES_H__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "gt/intel_context_types.h" - -#include "i915_scheduler.h" - -struct pid; - -struct drm_i915_private; -struct drm_i915_file_private; -struct i915_hw_ppgtt; -struct i915_timeline; -struct intel_ring; - -struct i915_gem_engines { - struct rcu_work rcu; - struct drm_i915_private *i915; - unsigned int num_engines; - struct intel_context *engines[]; -}; - -struct i915_gem_engines_iter { - unsigned int idx; - const struct i915_gem_engines *engines; -}; - -/** - * struct i915_gem_context - client state - * - * The struct i915_gem_context represents the combined view of the driver and - * logical hardware state for a particular client. - */ -struct i915_gem_context { - /** i915: i915 device backpointer */ - struct drm_i915_private *i915; - - /** file_priv: owning file descriptor */ - struct drm_i915_file_private *file_priv; - - /** - * @engines: User defined engines for this context - * - * Various uAPI offer the ability to lookup up an - * index from this array to select an engine operate on. - * - * Multiple logically distinct instances of the same engine - * may be defined in the array, as well as composite virtual - * engines. - * - * Execbuf uses the I915_EXEC_RING_MASK as an index into this - * array to select which HW context + engine to execute on. For - * the default array, the user_ring_map[] is used to translate - * the legacy uABI onto the approprate index (e.g. both - * I915_EXEC_DEFAULT and I915_EXEC_RENDER select the same - * context, and I915_EXEC_BSD is weird). For a use defined - * array, execbuf uses I915_EXEC_RING_MASK as a plain index. - * - * User defined by I915_CONTEXT_PARAM_ENGINE (when the - * CONTEXT_USER_ENGINES flag is set). - */ - struct i915_gem_engines __rcu *engines; - struct mutex engines_mutex; /* guards writes to engines */ - - struct i915_timeline *timeline; - - /** - * @ppgtt: unique address space (GTT) - * - * In full-ppgtt mode, each context has its own address space ensuring - * complete seperation of one client from all others. - * - * In other modes, this is a NULL pointer with the expectation that - * the caller uses the shared global GTT. - */ - struct i915_hw_ppgtt *ppgtt; - - /** - * @pid: process id of creator - * - * Note that who created the context may not be the principle user, - * as the context may be shared across a local socket. However, - * that should only affect the default context, all contexts created - * explicitly by the client are expected to be isolated. - */ - struct pid *pid; - - /** - * @name: arbitrary name - * - * A name is constructed for the context from the creator's process - * name, pid and user handle in order to uniquely identify the - * context in messages. - */ - const char *name; - - /** link: place with &drm_i915_private.context_list */ - struct list_head link; - struct llist_node free_link; - - /** - * @ref: reference count - * - * A reference to a context is held by both the client who created it - * and on each request submitted to the hardware using the request - * (to ensure the hardware has access to the state until it has - * finished all pending writes). See i915_gem_context_get() and - * i915_gem_context_put() for access. - */ - struct kref ref; - - /** - * @rcu: rcu_head for deferred freeing. - */ - struct rcu_head rcu; - - /** - * @user_flags: small set of booleans controlled by the user - */ - unsigned long user_flags; -#define UCONTEXT_NO_ZEROMAP 0 -#define UCONTEXT_NO_ERROR_CAPTURE 1 -#define UCONTEXT_BANNABLE 2 -#define UCONTEXT_RECOVERABLE 3 - - /** - * @flags: small set of booleans - */ - unsigned long flags; -#define CONTEXT_BANNED 0 -#define CONTEXT_CLOSED 1 -#define CONTEXT_FORCE_SINGLE_SUBMISSION 2 -#define CONTEXT_USER_ENGINES 3 - - /** - * @hw_id: - unique identifier for the context - * - * The hardware needs to uniquely identify the context for a few - * functions like fault reporting, PASID, scheduling. The - * &drm_i915_private.context_hw_ida is used to assign a unqiue - * id for the lifetime of the context. - * - * @hw_id_pin_count: - number of times this context had been pinned - * for use (should be, at most, once per engine). - * - * @hw_id_link: - all contexts with an assigned id are tracked - * for possible repossession. - */ - unsigned int hw_id; - atomic_t hw_id_pin_count; - struct list_head hw_id_link; - - struct mutex mutex; - - struct i915_sched_attr sched; - - /** ring_size: size for allocating the per-engine ring buffer */ - u32 ring_size; - /** desc_template: invariant fields for the HW context descriptor */ - u32 desc_template; - - /** guilty_count: How many times this context has caused a GPU hang. */ - atomic_t guilty_count; - /** - * @active_count: How many times this context was active during a GPU - * hang, but did not cause it. - */ - atomic_t active_count; - - /** - * @hang_timestamp: The last time(s) this context caused a GPU hang - */ - unsigned long hang_timestamp[2]; -#define CONTEXT_FAST_HANG_JIFFIES (120 * HZ) /* 3 hangs within 120s? Banned! */ - - /** remap_slice: Bitmask of cache lines that need remapping */ - u8 remap_slice; - - /** handles_vma: rbtree to look up our context specific obj/vma for - * the user handle. (user handles are per fd, but the binding is - * per vm, which may be one per context or shared with the global GTT) - */ - struct radix_tree_root handles_vma; - - /** handles_list: reverse list of all the rbtree entries in use for - * this context, which allows us to free all the allocations on - * context close. - */ - struct list_head handles_list; -}; - -#endif /* __I915_GEM_CONTEXT_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c deleted file mode 100644 index 5a101a9462d8..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ /dev/null @@ -1,337 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: - * Dave Airlie - */ - -#include -#include - - -#include "i915_drv.h" - -static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) -{ - return to_intel_bo(buf->priv); -} - -static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, - enum dma_data_direction dir) -{ - struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); - struct sg_table *st; - struct scatterlist *src, *dst; - int ret, i; - - ret = i915_gem_object_pin_pages(obj); - if (ret) - goto err; - - /* Copy sg so that we make an independent mapping */ - st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); - if (st == NULL) { - ret = -ENOMEM; - goto err_unpin_pages; - } - - ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL); - if (ret) - goto err_free; - - src = obj->mm.pages->sgl; - dst = st->sgl; - for (i = 0; i < obj->mm.pages->nents; i++) { - sg_set_page(dst, sg_page(src), src->length, 0); - dst = sg_next(dst); - src = sg_next(src); - } - - if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { - ret = -ENOMEM; - goto err_free_sg; - } - - return st; - -err_free_sg: - sg_free_table(st); -err_free: - kfree(st); -err_unpin_pages: - i915_gem_object_unpin_pages(obj); -err: - return ERR_PTR(ret); -} - -static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, - struct sg_table *sg, - enum dma_data_direction dir) -{ - struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); - - dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); - sg_free_table(sg); - kfree(sg); - - i915_gem_object_unpin_pages(obj); -} - -static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) -{ - struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); - - return i915_gem_object_pin_map(obj, I915_MAP_WB); -} - -static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) -{ - struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); - - i915_gem_object_flush_map(obj); - i915_gem_object_unpin_map(obj); -} - -static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) -{ - struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); - struct page *page; - - if (page_num >= obj->base.size >> PAGE_SHIFT) - return NULL; - - if (!i915_gem_object_has_struct_page(obj)) - return NULL; - - if (i915_gem_object_pin_pages(obj)) - return NULL; - - /* Synchronisation is left to the caller (via .begin_cpu_access()) */ - page = i915_gem_object_get_page(obj, page_num); - if (IS_ERR(page)) - goto err_unpin; - - return kmap(page); - -err_unpin: - i915_gem_object_unpin_pages(obj); - return NULL; -} - -static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) -{ - struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); - - kunmap(virt_to_page(addr)); - i915_gem_object_unpin_pages(obj); -} - -static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) -{ - struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); - int ret; - - if (obj->base.size < vma->vm_end - vma->vm_start) - return -EINVAL; - - if (!obj->base.filp) - return -ENODEV; - - ret = call_mmap(obj->base.filp, vma); - if (ret) - return ret; - - fput(vma->vm_file); - vma->vm_file = get_file(obj->base.filp); - - return 0; -} - -static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) -{ - struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); - struct drm_device *dev = obj->base.dev; - bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); - int err; - - err = i915_gem_object_pin_pages(obj); - if (err) - return err; - - err = i915_mutex_lock_interruptible(dev); - if (err) - goto out; - - err = i915_gem_object_set_to_cpu_domain(obj, write); - mutex_unlock(&dev->struct_mutex); - -out: - i915_gem_object_unpin_pages(obj); - return err; -} - -static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) -{ - struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); - struct drm_device *dev = obj->base.dev; - int err; - - err = i915_gem_object_pin_pages(obj); - if (err) - return err; - - err = i915_mutex_lock_interruptible(dev); - if (err) - goto out; - - err = i915_gem_object_set_to_gtt_domain(obj, false); - mutex_unlock(&dev->struct_mutex); - -out: - i915_gem_object_unpin_pages(obj); - return err; -} - -static const struct dma_buf_ops i915_dmabuf_ops = { - .map_dma_buf = i915_gem_map_dma_buf, - .unmap_dma_buf = i915_gem_unmap_dma_buf, - .release = drm_gem_dmabuf_release, - .map = i915_gem_dmabuf_kmap, - .unmap = i915_gem_dmabuf_kunmap, - .mmap = i915_gem_dmabuf_mmap, - .vmap = i915_gem_dmabuf_vmap, - .vunmap = i915_gem_dmabuf_vunmap, - .begin_cpu_access = i915_gem_begin_cpu_access, - .end_cpu_access = i915_gem_end_cpu_access, -}; - -struct dma_buf *i915_gem_prime_export(struct drm_device *dev, - struct drm_gem_object *gem_obj, int flags) -{ - struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - - exp_info.ops = &i915_dmabuf_ops; - exp_info.size = gem_obj->size; - exp_info.flags = flags; - exp_info.priv = gem_obj; - exp_info.resv = obj->resv; - - if (obj->ops->dmabuf_export) { - int ret = obj->ops->dmabuf_export(obj); - if (ret) - return ERR_PTR(ret); - } - - return drm_gem_dmabuf_export(dev, &exp_info); -} - -static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) -{ - struct sg_table *pages; - unsigned int sg_page_sizes; - - pages = dma_buf_map_attachment(obj->base.import_attach, - DMA_BIDIRECTIONAL); - if (IS_ERR(pages)) - return PTR_ERR(pages); - - sg_page_sizes = i915_sg_page_sizes(pages->sgl); - - __i915_gem_object_set_pages(obj, pages, sg_page_sizes); - - return 0; -} - -static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj, - struct sg_table *pages) -{ - dma_buf_unmap_attachment(obj->base.import_attach, pages, - DMA_BIDIRECTIONAL); -} - -static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { - .get_pages = i915_gem_object_get_pages_dmabuf, - .put_pages = i915_gem_object_put_pages_dmabuf, -}; - -struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) -{ - struct dma_buf_attachment *attach; - struct drm_i915_gem_object *obj; - int ret; - - /* is this one of own objects? */ - if (dma_buf->ops == &i915_dmabuf_ops) { - obj = dma_buf_to_obj(dma_buf); - /* is it from our device? */ - if (obj->base.dev == dev) { - /* - * Importing dmabuf exported from out own gem increases - * refcount on gem itself instead of f_count of dmabuf. - */ - return &i915_gem_object_get(obj)->base; - } - } - - /* need to attach */ - attach = dma_buf_attach(dma_buf, dev->dev); - if (IS_ERR(attach)) - return ERR_CAST(attach); - - get_dma_buf(dma_buf); - - obj = i915_gem_object_alloc(); - if (obj == NULL) { - ret = -ENOMEM; - goto fail_detach; - } - - drm_gem_private_object_init(dev, &obj->base, dma_buf->size); - i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); - obj->base.import_attach = attach; - obj->resv = dma_buf->resv; - - /* We use GTT as shorthand for a coherent domain, one that is - * neither in the GPU cache nor in the CPU cache, where all - * writes are immediately visible in memory. (That's not strictly - * true, but it's close! There are internal buffers such as the - * write-combined buffer or a delay through the chipset for GTT - * writes that do require us to treat GTT as a separate cache domain.) - */ - obj->read_domains = I915_GEM_DOMAIN_GTT; - obj->write_domain = 0; - - return &obj->base; - -fail_detach: - dma_buf_detach(dma_buf, attach); - dma_buf_put(dma_buf); - - return ERR_PTR(ret); -} - -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftests/mock_dmabuf.c" -#include "selftests/i915_gem_dmabuf.c" -#endif diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 0bdb3e072ba5..a5783c4cb98b 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -28,6 +28,8 @@ #include +#include "gem/i915_gem_context.h" + #include "i915_drv.h" #include "intel_drv.h" #include "i915_trace.h" diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c deleted file mode 100644 index 699f3f180d8a..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ /dev/null @@ -1,2788 +0,0 @@ -/* - * Copyright © 2008,2010 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Authors: - * Eric Anholt - * Chris Wilson - * - */ - -#include -#include -#include -#include - -#include -#include - -#include "gem/i915_gem_ioctls.h" -#include "gt/intel_gt_pm.h" - -#include "i915_drv.h" -#include "i915_gem_clflush.h" -#include "i915_trace.h" -#include "intel_drv.h" -#include "intel_frontbuffer.h" - -enum { - FORCE_CPU_RELOC = 1, - FORCE_GTT_RELOC, - FORCE_GPU_RELOC, -#define DBG_FORCE_RELOC 0 /* choose one of the above! */ -}; - -#define __EXEC_OBJECT_HAS_REF BIT(31) -#define __EXEC_OBJECT_HAS_PIN BIT(30) -#define __EXEC_OBJECT_HAS_FENCE BIT(29) -#define __EXEC_OBJECT_NEEDS_MAP BIT(28) -#define __EXEC_OBJECT_NEEDS_BIAS BIT(27) -#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */ -#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE) - -#define __EXEC_HAS_RELOC BIT(31) -#define __EXEC_VALIDATED BIT(30) -#define __EXEC_INTERNAL_FLAGS (~0u << 30) -#define UPDATE PIN_OFFSET_FIXED - -#define BATCH_OFFSET_BIAS (256*1024) - -#define __I915_EXEC_ILLEGAL_FLAGS \ - (__I915_EXEC_UNKNOWN_FLAGS | \ - I915_EXEC_CONSTANTS_MASK | \ - I915_EXEC_RESOURCE_STREAMER) - -/* Catch emission of unexpected errors for CI! */ -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) -#undef EINVAL -#define EINVAL ({ \ - DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \ - 22; \ -}) -#endif - -/** - * DOC: User command execution - * - * Userspace submits commands to be executed on the GPU as an instruction - * stream within a GEM object we call a batchbuffer. This instructions may - * refer to other GEM objects containing auxiliary state such as kernels, - * samplers, render targets and even secondary batchbuffers. Userspace does - * not know where in the GPU memory these objects reside and so before the - * batchbuffer is passed to the GPU for execution, those addresses in the - * batchbuffer and auxiliary objects are updated. This is known as relocation, - * or patching. To try and avoid having to relocate each object on the next - * execution, userspace is told the location of those objects in this pass, - * but this remains just a hint as the kernel may choose a new location for - * any object in the future. - * - * At the level of talking to the hardware, submitting a batchbuffer for the - * GPU to execute is to add content to a buffer from which the HW - * command streamer is reading. - * - * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e. - * Execlists, this command is not placed on the same buffer as the - * remaining items. - * - * 2. Add a command to invalidate caches to the buffer. - * - * 3. Add a batchbuffer start command to the buffer; the start command is - * essentially a token together with the GPU address of the batchbuffer - * to be executed. - * - * 4. Add a pipeline flush to the buffer. - * - * 5. Add a memory write command to the buffer to record when the GPU - * is done executing the batchbuffer. The memory write writes the - * global sequence number of the request, ``i915_request::global_seqno``; - * the i915 driver uses the current value in the register to determine - * if the GPU has completed the batchbuffer. - * - * 6. Add a user interrupt command to the buffer. This command instructs - * the GPU to issue an interrupt when the command, pipeline flush and - * memory write are completed. - * - * 7. Inform the hardware of the additional commands added to the buffer - * (by updating the tail pointer). - * - * Processing an execbuf ioctl is conceptually split up into a few phases. - * - * 1. Validation - Ensure all the pointers, handles and flags are valid. - * 2. Reservation - Assign GPU address space for every object - * 3. Relocation - Update any addresses to point to the final locations - * 4. Serialisation - Order the request with respect to its dependencies - * 5. Construction - Construct a request to execute the batchbuffer - * 6. Submission (at some point in the future execution) - * - * Reserving resources for the execbuf is the most complicated phase. We - * neither want to have to migrate the object in the address space, nor do - * we want to have to update any relocations pointing to this object. Ideally, - * we want to leave the object where it is and for all the existing relocations - * to match. If the object is given a new address, or if userspace thinks the - * object is elsewhere, we have to parse all the relocation entries and update - * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that - * all the target addresses in all of its objects match the value in the - * relocation entries and that they all match the presumed offsets given by the - * list of execbuffer objects. Using this knowledge, we know that if we haven't - * moved any buffers, all the relocation entries are valid and we can skip - * the update. (If userspace is wrong, the likely outcome is an impromptu GPU - * hang.) The requirement for using I915_EXEC_NO_RELOC are: - * - * The addresses written in the objects must match the corresponding - * reloc.presumed_offset which in turn must match the corresponding - * execobject.offset. - * - * Any render targets written to in the batch must be flagged with - * EXEC_OBJECT_WRITE. - * - * To avoid stalling, execobject.offset should match the current - * address of that object within the active context. - * - * The reservation is done is multiple phases. First we try and keep any - * object already bound in its current location - so as long as meets the - * constraints imposed by the new execbuffer. Any object left unbound after the - * first pass is then fitted into any available idle space. If an object does - * not fit, all objects are removed from the reservation and the process rerun - * after sorting the objects into a priority order (more difficult to fit - * objects are tried first). Failing that, the entire VM is cleared and we try - * to fit the execbuf once last time before concluding that it simply will not - * fit. - * - * A small complication to all of this is that we allow userspace not only to - * specify an alignment and a size for the object in the address space, but - * we also allow userspace to specify the exact offset. This objects are - * simpler to place (the location is known a priori) all we have to do is make - * sure the space is available. - * - * Once all the objects are in place, patching up the buried pointers to point - * to the final locations is a fairly simple job of walking over the relocation - * entry arrays, looking up the right address and rewriting the value into - * the object. Simple! ... The relocation entries are stored in user memory - * and so to access them we have to copy them into a local buffer. That copy - * has to avoid taking any pagefaults as they may lead back to a GEM object - * requiring the struct_mutex (i.e. recursive deadlock). So once again we split - * the relocation into multiple passes. First we try to do everything within an - * atomic context (avoid the pagefaults) which requires that we never wait. If - * we detect that we may wait, or if we need to fault, then we have to fallback - * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm - * bells yet?) Dropping the mutex means that we lose all the state we have - * built up so far for the execbuf and we must reset any global data. However, - * we do leave the objects pinned in their final locations - which is a - * potential issue for concurrent execbufs. Once we have left the mutex, we can - * allocate and copy all the relocation entries into a large array at our - * leisure, reacquire the mutex, reclaim all the objects and other state and - * then proceed to update any incorrect addresses with the objects. - * - * As we process the relocation entries, we maintain a record of whether the - * object is being written to. Using NORELOC, we expect userspace to provide - * this information instead. We also check whether we can skip the relocation - * by comparing the expected value inside the relocation entry with the target's - * final address. If they differ, we have to map the current object and rewrite - * the 4 or 8 byte pointer within. - * - * Serialising an execbuf is quite simple according to the rules of the GEM - * ABI. Execution within each context is ordered by the order of submission. - * Writes to any GEM object are in order of submission and are exclusive. Reads - * from a GEM object are unordered with respect to other reads, but ordered by - * writes. A write submitted after a read cannot occur before the read, and - * similarly any read submitted after a write cannot occur before the write. - * Writes are ordered between engines such that only one write occurs at any - * time (completing any reads beforehand) - using semaphores where available - * and CPU serialisation otherwise. Other GEM access obey the same rules, any - * write (either via mmaps using set-domain, or via pwrite) must flush all GPU - * reads before starting, and any read (either using set-domain or pread) must - * flush all GPU writes before starting. (Note we only employ a barrier before, - * we currently rely on userspace not concurrently starting a new execution - * whilst reading or writing to an object. This may be an advantage or not - * depending on how much you trust userspace not to shoot themselves in the - * foot.) Serialisation may just result in the request being inserted into - * a DAG awaiting its turn, but most simple is to wait on the CPU until - * all dependencies are resolved. - * - * After all of that, is just a matter of closing the request and handing it to - * the hardware (well, leaving it in a queue to be executed). However, we also - * offer the ability for batchbuffers to be run with elevated privileges so - * that they access otherwise hidden registers. (Used to adjust L3 cache etc.) - * Before any batch is given extra privileges we first must check that it - * contains no nefarious instructions, we check that each instruction is from - * our whitelist and all registers are also from an allowed list. We first - * copy the user's batchbuffer to a shadow (so that the user doesn't have - * access to it, either by the CPU or GPU as we scan it) and then parse each - * instruction. If everything is ok, we set a flag telling the hardware to run - * the batchbuffer in trusted mode, otherwise the ioctl is rejected. - */ - -struct i915_execbuffer { - struct drm_i915_private *i915; /** i915 backpointer */ - struct drm_file *file; /** per-file lookup tables and limits */ - struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */ - struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */ - struct i915_vma **vma; - unsigned int *flags; - - struct intel_engine_cs *engine; /** engine to queue the request to */ - struct intel_context *context; /* logical state for the request */ - struct i915_gem_context *gem_context; /** caller's context */ - struct i915_address_space *vm; /** GTT and vma for the request */ - - struct i915_request *request; /** our request to build */ - struct i915_vma *batch; /** identity of the batch obj/vma */ - - /** actual size of execobj[] as we may extend it for the cmdparser */ - unsigned int buffer_count; - - /** list of vma not yet bound during reservation phase */ - struct list_head unbound; - - /** list of vma that have execobj.relocation_count */ - struct list_head relocs; - - /** - * Track the most recently used object for relocations, as we - * frequently have to perform multiple relocations within the same - * obj/page - */ - struct reloc_cache { - struct drm_mm_node node; /** temporary GTT binding */ - unsigned long vaddr; /** Current kmap address */ - unsigned long page; /** Currently mapped page index */ - unsigned int gen; /** Cached value of INTEL_GEN */ - bool use_64bit_reloc : 1; - bool has_llc : 1; - bool has_fence : 1; - bool needs_unfenced : 1; - - struct i915_request *rq; - u32 *rq_cmd; - unsigned int rq_size; - } reloc_cache; - - u64 invalid_flags; /** Set of execobj.flags that are invalid */ - u32 context_flags; /** Set of execobj.flags to insert from the ctx */ - - u32 batch_start_offset; /** Location within object of batch */ - u32 batch_len; /** Length of batch within object */ - u32 batch_flags; /** Flags composed for emit_bb_start() */ - - /** - * Indicate either the size of the hastable used to resolve - * relocation handles, or if negative that we are using a direct - * index into the execobj[]. - */ - int lut_size; - struct hlist_head *buckets; /** ht for relocation handles */ -}; - -#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags]) - -/* - * Used to convert any address to canonical form. - * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS, - * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the - * addresses to be in a canonical form: - * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct - * canonical form [63:48] == [47]." - */ -#define GEN8_HIGH_ADDRESS_BIT 47 -static inline u64 gen8_canonical_addr(u64 address) -{ - return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT); -} - -static inline u64 gen8_noncanonical_addr(u64 address) -{ - return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0); -} - -static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) -{ - return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len; -} - -static int eb_create(struct i915_execbuffer *eb) -{ - if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { - unsigned int size = 1 + ilog2(eb->buffer_count); - - /* - * Without a 1:1 association between relocation handles and - * the execobject[] index, we instead create a hashtable. - * We size it dynamically based on available memory, starting - * first with 1:1 assocative hash and scaling back until - * the allocation succeeds. - * - * Later on we use a positive lut_size to indicate we are - * using this hashtable, and a negative value to indicate a - * direct lookup. - */ - do { - gfp_t flags; - - /* While we can still reduce the allocation size, don't - * raise a warning and allow the allocation to fail. - * On the last pass though, we want to try as hard - * as possible to perform the allocation and warn - * if it fails. - */ - flags = GFP_KERNEL; - if (size > 1) - flags |= __GFP_NORETRY | __GFP_NOWARN; - - eb->buckets = kzalloc(sizeof(struct hlist_head) << size, - flags); - if (eb->buckets) - break; - } while (--size); - - if (unlikely(!size)) - return -ENOMEM; - - eb->lut_size = size; - } else { - eb->lut_size = -eb->buffer_count; - } - - return 0; -} - -static bool -eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, - const struct i915_vma *vma, - unsigned int flags) -{ - if (vma->node.size < entry->pad_to_size) - return true; - - if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment)) - return true; - - if (flags & EXEC_OBJECT_PINNED && - vma->node.start != entry->offset) - return true; - - if (flags & __EXEC_OBJECT_NEEDS_BIAS && - vma->node.start < BATCH_OFFSET_BIAS) - return true; - - if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) && - (vma->node.start + vma->node.size - 1) >> 32) - return true; - - if (flags & __EXEC_OBJECT_NEEDS_MAP && - !i915_vma_is_map_and_fenceable(vma)) - return true; - - return false; -} - -static inline bool -eb_pin_vma(struct i915_execbuffer *eb, - const struct drm_i915_gem_exec_object2 *entry, - struct i915_vma *vma) -{ - unsigned int exec_flags = *vma->exec_flags; - u64 pin_flags; - - if (vma->node.size) - pin_flags = vma->node.start; - else - pin_flags = entry->offset & PIN_OFFSET_MASK; - - pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED; - if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT)) - pin_flags |= PIN_GLOBAL; - - if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) - return false; - - if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) { - if (unlikely(i915_vma_pin_fence(vma))) { - i915_vma_unpin(vma); - return false; - } - - if (vma->fence) - exec_flags |= __EXEC_OBJECT_HAS_FENCE; - } - - *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN; - return !eb_vma_misplaced(entry, vma, exec_flags); -} - -static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags) -{ - GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN)); - - if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE)) - __i915_vma_unpin_fence(vma); - - __i915_vma_unpin(vma); -} - -static inline void -eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags) -{ - if (!(*flags & __EXEC_OBJECT_HAS_PIN)) - return; - - __eb_unreserve_vma(vma, *flags); - *flags &= ~__EXEC_OBJECT_RESERVED; -} - -static int -eb_validate_vma(struct i915_execbuffer *eb, - struct drm_i915_gem_exec_object2 *entry, - struct i915_vma *vma) -{ - if (unlikely(entry->flags & eb->invalid_flags)) - return -EINVAL; - - if (unlikely(entry->alignment && !is_power_of_2(entry->alignment))) - return -EINVAL; - - /* - * Offset can be used as input (EXEC_OBJECT_PINNED), reject - * any non-page-aligned or non-canonical addresses. - */ - if (unlikely(entry->flags & EXEC_OBJECT_PINNED && - entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK))) - return -EINVAL; - - /* pad_to_size was once a reserved field, so sanitize it */ - if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) { - if (unlikely(offset_in_page(entry->pad_to_size))) - return -EINVAL; - } else { - entry->pad_to_size = 0; - } - - if (unlikely(vma->exec_flags)) { - DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n", - entry->handle, (int)(entry - eb->exec)); - return -EINVAL; - } - - /* - * From drm_mm perspective address space is continuous, - * so from this point we're always using non-canonical - * form internally. - */ - entry->offset = gen8_noncanonical_addr(entry->offset); - - if (!eb->reloc_cache.has_fence) { - entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; - } else { - if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE || - eb->reloc_cache.needs_unfenced) && - i915_gem_object_is_tiled(vma->obj)) - entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP; - } - - if (!(entry->flags & EXEC_OBJECT_PINNED)) - entry->flags |= eb->context_flags; - - return 0; -} - -static int -eb_add_vma(struct i915_execbuffer *eb, - unsigned int i, unsigned batch_idx, - struct i915_vma *vma) -{ - struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; - int err; - - GEM_BUG_ON(i915_vma_is_closed(vma)); - - if (!(eb->args->flags & __EXEC_VALIDATED)) { - err = eb_validate_vma(eb, entry, vma); - if (unlikely(err)) - return err; - } - - if (eb->lut_size > 0) { - vma->exec_handle = entry->handle; - hlist_add_head(&vma->exec_node, - &eb->buckets[hash_32(entry->handle, - eb->lut_size)]); - } - - if (entry->relocation_count) - list_add_tail(&vma->reloc_link, &eb->relocs); - - /* - * Stash a pointer from the vma to execobj, so we can query its flags, - * size, alignment etc as provided by the user. Also we stash a pointer - * to the vma inside the execobj so that we can use a direct lookup - * to find the right target VMA when doing relocations. - */ - eb->vma[i] = vma; - eb->flags[i] = entry->flags; - vma->exec_flags = &eb->flags[i]; - - /* - * SNA is doing fancy tricks with compressing batch buffers, which leads - * to negative relocation deltas. Usually that works out ok since the - * relocate address is still positive, except when the batch is placed - * very low in the GTT. Ensure this doesn't happen. - * - * Note that actual hangs have only been observed on gen7, but for - * paranoia do it everywhere. - */ - if (i == batch_idx) { - if (entry->relocation_count && - !(eb->flags[i] & EXEC_OBJECT_PINNED)) - eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; - if (eb->reloc_cache.has_fence) - eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; - - eb->batch = vma; - } - - err = 0; - if (eb_pin_vma(eb, entry, vma)) { - if (entry->offset != vma->node.start) { - entry->offset = vma->node.start | UPDATE; - eb->args->flags |= __EXEC_HAS_RELOC; - } - } else { - eb_unreserve_vma(vma, vma->exec_flags); - - list_add_tail(&vma->exec_link, &eb->unbound); - if (drm_mm_node_allocated(&vma->node)) - err = i915_vma_unbind(vma); - if (unlikely(err)) - vma->exec_flags = NULL; - } - return err; -} - -static inline int use_cpu_reloc(const struct reloc_cache *cache, - const struct drm_i915_gem_object *obj) -{ - if (!i915_gem_object_has_struct_page(obj)) - return false; - - if (DBG_FORCE_RELOC == FORCE_CPU_RELOC) - return true; - - if (DBG_FORCE_RELOC == FORCE_GTT_RELOC) - return false; - - return (cache->has_llc || - obj->cache_dirty || - obj->cache_level != I915_CACHE_NONE); -} - -static int eb_reserve_vma(const struct i915_execbuffer *eb, - struct i915_vma *vma) -{ - struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); - unsigned int exec_flags = *vma->exec_flags; - u64 pin_flags; - int err; - - pin_flags = PIN_USER | PIN_NONBLOCK; - if (exec_flags & EXEC_OBJECT_NEEDS_GTT) - pin_flags |= PIN_GLOBAL; - - /* - * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset, - * limit address to the first 4GBs for unflagged objects. - */ - if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) - pin_flags |= PIN_ZONE_4G; - - if (exec_flags & __EXEC_OBJECT_NEEDS_MAP) - pin_flags |= PIN_MAPPABLE; - - if (exec_flags & EXEC_OBJECT_PINNED) { - pin_flags |= entry->offset | PIN_OFFSET_FIXED; - pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */ - } else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) { - pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; - } - - err = i915_vma_pin(vma, - entry->pad_to_size, entry->alignment, - pin_flags); - if (err) - return err; - - if (entry->offset != vma->node.start) { - entry->offset = vma->node.start | UPDATE; - eb->args->flags |= __EXEC_HAS_RELOC; - } - - if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) { - err = i915_vma_pin_fence(vma); - if (unlikely(err)) { - i915_vma_unpin(vma); - return err; - } - - if (vma->fence) - exec_flags |= __EXEC_OBJECT_HAS_FENCE; - } - - *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN; - GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags)); - - return 0; -} - -static int eb_reserve(struct i915_execbuffer *eb) -{ - const unsigned int count = eb->buffer_count; - struct list_head last; - struct i915_vma *vma; - unsigned int i, pass; - int err; - - /* - * Attempt to pin all of the buffers into the GTT. - * This is done in 3 phases: - * - * 1a. Unbind all objects that do not match the GTT constraints for - * the execbuffer (fenceable, mappable, alignment etc). - * 1b. Increment pin count for already bound objects. - * 2. Bind new objects. - * 3. Decrement pin count. - * - * This avoid unnecessary unbinding of later objects in order to make - * room for the earlier objects *unless* we need to defragment. - */ - - pass = 0; - err = 0; - do { - list_for_each_entry(vma, &eb->unbound, exec_link) { - err = eb_reserve_vma(eb, vma); - if (err) - break; - } - if (err != -ENOSPC) - return err; - - /* Resort *all* the objects into priority order */ - INIT_LIST_HEAD(&eb->unbound); - INIT_LIST_HEAD(&last); - for (i = 0; i < count; i++) { - unsigned int flags = eb->flags[i]; - struct i915_vma *vma = eb->vma[i]; - - if (flags & EXEC_OBJECT_PINNED && - flags & __EXEC_OBJECT_HAS_PIN) - continue; - - eb_unreserve_vma(vma, &eb->flags[i]); - - if (flags & EXEC_OBJECT_PINNED) - /* Pinned must have their slot */ - list_add(&vma->exec_link, &eb->unbound); - else if (flags & __EXEC_OBJECT_NEEDS_MAP) - /* Map require the lowest 256MiB (aperture) */ - list_add_tail(&vma->exec_link, &eb->unbound); - else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) - /* Prioritise 4GiB region for restricted bo */ - list_add(&vma->exec_link, &last); - else - list_add_tail(&vma->exec_link, &last); - } - list_splice_tail(&last, &eb->unbound); - - switch (pass++) { - case 0: - break; - - case 1: - /* Too fragmented, unbind everything and retry */ - err = i915_gem_evict_vm(eb->vm); - if (err) - return err; - break; - - default: - return -ENOSPC; - } - } while (1); -} - -static unsigned int eb_batch_index(const struct i915_execbuffer *eb) -{ - if (eb->args->flags & I915_EXEC_BATCH_FIRST) - return 0; - else - return eb->buffer_count - 1; -} - -static int eb_select_context(struct i915_execbuffer *eb) -{ - struct i915_gem_context *ctx; - - ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1); - if (unlikely(!ctx)) - return -ENOENT; - - eb->gem_context = ctx; - if (ctx->ppgtt) { - eb->vm = &ctx->ppgtt->vm; - eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; - } else { - eb->vm = &eb->i915->ggtt.vm; - } - - eb->context_flags = 0; - if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags)) - eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS; - - return 0; -} - -static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring) -{ - struct i915_request *rq; - - /* - * Completely unscientific finger-in-the-air estimates for suitable - * maximum user request size (to avoid blocking) and then backoff. - */ - if (intel_ring_update_space(ring) >= PAGE_SIZE) - return NULL; - - /* - * Find a request that after waiting upon, there will be at least half - * the ring available. The hysteresis allows us to compete for the - * shared ring and should mean that we sleep less often prior to - * claiming our resources, but not so long that the ring completely - * drains before we can submit our next request. - */ - list_for_each_entry(rq, &ring->request_list, ring_link) { - if (__intel_ring_space(rq->postfix, - ring->emit, ring->size) > ring->size / 2) - break; - } - if (&rq->ring_link == &ring->request_list) - return NULL; /* weird, we will check again later for real */ - - return i915_request_get(rq); -} - -static int eb_wait_for_ring(const struct i915_execbuffer *eb) -{ - struct i915_request *rq; - int ret = 0; - - /* - * Apply a light amount of backpressure to prevent excessive hogs - * from blocking waiting for space whilst holding struct_mutex and - * keeping all of their resources pinned. - */ - - rq = __eb_wait_for_ring(eb->context->ring); - if (rq) { - mutex_unlock(&eb->i915->drm.struct_mutex); - - if (i915_request_wait(rq, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT) < 0) - ret = -EINTR; - - i915_request_put(rq); - - mutex_lock(&eb->i915->drm.struct_mutex); - } - - return ret; -} - -static int eb_lookup_vmas(struct i915_execbuffer *eb) -{ - struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma; - struct drm_i915_gem_object *obj; - unsigned int i, batch; - int err; - - if (unlikely(i915_gem_context_is_closed(eb->gem_context))) - return -ENOENT; - - if (unlikely(i915_gem_context_is_banned(eb->gem_context))) - return -EIO; - - INIT_LIST_HEAD(&eb->relocs); - INIT_LIST_HEAD(&eb->unbound); - - batch = eb_batch_index(eb); - - for (i = 0; i < eb->buffer_count; i++) { - u32 handle = eb->exec[i].handle; - struct i915_lut_handle *lut; - struct i915_vma *vma; - - vma = radix_tree_lookup(handles_vma, handle); - if (likely(vma)) - goto add_vma; - - obj = i915_gem_object_lookup(eb->file, handle); - if (unlikely(!obj)) { - err = -ENOENT; - goto err_vma; - } - - vma = i915_vma_instance(obj, eb->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_obj; - } - - lut = i915_lut_handle_alloc(); - if (unlikely(!lut)) { - err = -ENOMEM; - goto err_obj; - } - - err = radix_tree_insert(handles_vma, handle, vma); - if (unlikely(err)) { - i915_lut_handle_free(lut); - goto err_obj; - } - - /* transfer ref to ctx */ - if (!vma->open_count++) - i915_vma_reopen(vma); - list_add(&lut->obj_link, &obj->lut_list); - list_add(&lut->ctx_link, &eb->gem_context->handles_list); - lut->ctx = eb->gem_context; - lut->handle = handle; - -add_vma: - err = eb_add_vma(eb, i, batch, vma); - if (unlikely(err)) - goto err_vma; - - GEM_BUG_ON(vma != eb->vma[i]); - GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); - GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && - eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i])); - } - - eb->args->flags |= __EXEC_VALIDATED; - return eb_reserve(eb); - -err_obj: - i915_gem_object_put(obj); -err_vma: - eb->vma[i] = NULL; - return err; -} - -static struct i915_vma * -eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) -{ - if (eb->lut_size < 0) { - if (handle >= -eb->lut_size) - return NULL; - return eb->vma[handle]; - } else { - struct hlist_head *head; - struct i915_vma *vma; - - head = &eb->buckets[hash_32(handle, eb->lut_size)]; - hlist_for_each_entry(vma, head, exec_node) { - if (vma->exec_handle == handle) - return vma; - } - return NULL; - } -} - -static void eb_release_vmas(const struct i915_execbuffer *eb) -{ - const unsigned int count = eb->buffer_count; - unsigned int i; - - for (i = 0; i < count; i++) { - struct i915_vma *vma = eb->vma[i]; - unsigned int flags = eb->flags[i]; - - if (!vma) - break; - - GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); - vma->exec_flags = NULL; - eb->vma[i] = NULL; - - if (flags & __EXEC_OBJECT_HAS_PIN) - __eb_unreserve_vma(vma, flags); - - if (flags & __EXEC_OBJECT_HAS_REF) - i915_vma_put(vma); - } -} - -static void eb_reset_vmas(const struct i915_execbuffer *eb) -{ - eb_release_vmas(eb); - if (eb->lut_size > 0) - memset(eb->buckets, 0, - sizeof(struct hlist_head) << eb->lut_size); -} - -static void eb_destroy(const struct i915_execbuffer *eb) -{ - GEM_BUG_ON(eb->reloc_cache.rq); - - if (eb->lut_size > 0) - kfree(eb->buckets); -} - -static inline u64 -relocation_target(const struct drm_i915_gem_relocation_entry *reloc, - const struct i915_vma *target) -{ - return gen8_canonical_addr((int)reloc->delta + target->node.start); -} - -static void reloc_cache_init(struct reloc_cache *cache, - struct drm_i915_private *i915) -{ - cache->page = -1; - cache->vaddr = 0; - /* Must be a variable in the struct to allow GCC to unroll. */ - cache->gen = INTEL_GEN(i915); - cache->has_llc = HAS_LLC(i915); - cache->use_64bit_reloc = HAS_64BIT_RELOC(i915); - cache->has_fence = cache->gen < 4; - cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; - cache->node.allocated = false; - cache->rq = NULL; - cache->rq_size = 0; -} - -static inline void *unmask_page(unsigned long p) -{ - return (void *)(uintptr_t)(p & PAGE_MASK); -} - -static inline unsigned int unmask_flags(unsigned long p) -{ - return p & ~PAGE_MASK; -} - -#define KMAP 0x4 /* after CLFLUSH_FLAGS */ - -static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) -{ - struct drm_i915_private *i915 = - container_of(cache, struct i915_execbuffer, reloc_cache)->i915; - return &i915->ggtt; -} - -static void reloc_gpu_flush(struct reloc_cache *cache) -{ - GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); - cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; - - __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size); - i915_gem_object_unpin_map(cache->rq->batch->obj); - - i915_gem_chipset_flush(cache->rq->i915); - - i915_request_add(cache->rq); - cache->rq = NULL; -} - -static void reloc_cache_reset(struct reloc_cache *cache) -{ - void *vaddr; - - if (cache->rq) - reloc_gpu_flush(cache); - - if (!cache->vaddr) - return; - - vaddr = unmask_page(cache->vaddr); - if (cache->vaddr & KMAP) { - if (cache->vaddr & CLFLUSH_AFTER) - mb(); - - kunmap_atomic(vaddr); - i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm); - } else { - wmb(); - io_mapping_unmap_atomic((void __iomem *)vaddr); - if (cache->node.allocated) { - struct i915_ggtt *ggtt = cache_to_ggtt(cache); - - ggtt->vm.clear_range(&ggtt->vm, - cache->node.start, - cache->node.size); - drm_mm_remove_node(&cache->node); - } else { - i915_vma_unpin((struct i915_vma *)cache->node.mm); - } - } - - cache->vaddr = 0; - cache->page = -1; -} - -static void *reloc_kmap(struct drm_i915_gem_object *obj, - struct reloc_cache *cache, - unsigned long page) -{ - void *vaddr; - - if (cache->vaddr) { - kunmap_atomic(unmask_page(cache->vaddr)); - } else { - unsigned int flushes; - int err; - - err = i915_gem_object_prepare_write(obj, &flushes); - if (err) - return ERR_PTR(err); - - BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS); - BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK); - - cache->vaddr = flushes | KMAP; - cache->node.mm = (void *)obj; - if (flushes) - mb(); - } - - vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page)); - cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr; - cache->page = page; - - return vaddr; -} - -static void *reloc_iomap(struct drm_i915_gem_object *obj, - struct reloc_cache *cache, - unsigned long page) -{ - struct i915_ggtt *ggtt = cache_to_ggtt(cache); - unsigned long offset; - void *vaddr; - - if (cache->vaddr) { - io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); - } else { - struct i915_vma *vma; - int err; - - if (use_cpu_reloc(cache, obj)) - return NULL; - - err = i915_gem_object_set_to_gtt_domain(obj, true); - if (err) - return ERR_PTR(err); - - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, - PIN_MAPPABLE | - PIN_NONBLOCK | - PIN_NONFAULT); - if (IS_ERR(vma)) { - memset(&cache->node, 0, sizeof(cache->node)); - err = drm_mm_insert_node_in_range - (&ggtt->vm.mm, &cache->node, - PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, - 0, ggtt->mappable_end, - DRM_MM_INSERT_LOW); - if (err) /* no inactive aperture space, use cpu reloc */ - return NULL; - } else { - err = i915_vma_put_fence(vma); - if (err) { - i915_vma_unpin(vma); - return ERR_PTR(err); - } - - cache->node.start = vma->node.start; - cache->node.mm = (void *)vma; - } - } - - offset = cache->node.start; - if (cache->node.allocated) { - wmb(); - ggtt->vm.insert_page(&ggtt->vm, - i915_gem_object_get_dma_address(obj, page), - offset, I915_CACHE_NONE, 0); - } else { - offset += page << PAGE_SHIFT; - } - - vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap, - offset); - cache->page = page; - cache->vaddr = (unsigned long)vaddr; - - return vaddr; -} - -static void *reloc_vaddr(struct drm_i915_gem_object *obj, - struct reloc_cache *cache, - unsigned long page) -{ - void *vaddr; - - if (cache->page == page) { - vaddr = unmask_page(cache->vaddr); - } else { - vaddr = NULL; - if ((cache->vaddr & KMAP) == 0) - vaddr = reloc_iomap(obj, cache, page); - if (!vaddr) - vaddr = reloc_kmap(obj, cache, page); - } - - return vaddr; -} - -static void clflush_write32(u32 *addr, u32 value, unsigned int flushes) -{ - if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) { - if (flushes & CLFLUSH_BEFORE) { - clflushopt(addr); - mb(); - } - - *addr = value; - - /* - * Writes to the same cacheline are serialised by the CPU - * (including clflush). On the write path, we only require - * that it hits memory in an orderly fashion and place - * mb barriers at the start and end of the relocation phase - * to ensure ordering of clflush wrt to the system. - */ - if (flushes & CLFLUSH_AFTER) - clflushopt(addr); - } else - *addr = value; -} - -static int __reloc_gpu_alloc(struct i915_execbuffer *eb, - struct i915_vma *vma, - unsigned int len) -{ - struct reloc_cache *cache = &eb->reloc_cache; - struct drm_i915_gem_object *obj; - struct i915_request *rq; - struct i915_vma *batch; - u32 *cmd; - int err; - - if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) { - obj = vma->obj; - if (obj->cache_dirty & ~obj->cache_coherent) - i915_gem_clflush_object(obj, 0); - obj->write_domain = 0; - } - - GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU); - - obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - cmd = i915_gem_object_pin_map(obj, - cache->has_llc ? - I915_MAP_FORCE_WB : - I915_MAP_FORCE_WC); - i915_gem_object_unpin_pages(obj); - if (IS_ERR(cmd)) - return PTR_ERR(cmd); - - batch = i915_vma_instance(obj, vma->vm, NULL); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto err_unmap; - } - - err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK); - if (err) - goto err_unmap; - - rq = i915_request_create(eb->context); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_unpin; - } - - err = i915_request_await_object(rq, vma->obj, true); - if (err) - goto err_request; - - err = eb->engine->emit_bb_start(rq, - batch->node.start, PAGE_SIZE, - cache->gen > 5 ? 0 : I915_DISPATCH_SECURE); - if (err) - goto err_request; - - GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); - err = i915_vma_move_to_active(batch, rq, 0); - if (err) - goto skip_request; - - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - if (err) - goto skip_request; - - rq->batch = batch; - i915_vma_unpin(batch); - - cache->rq = rq; - cache->rq_cmd = cmd; - cache->rq_size = 0; - - /* Return with batch mapping (cmd) still pinned */ - return 0; - -skip_request: - i915_request_skip(rq, err); -err_request: - i915_request_add(rq); -err_unpin: - i915_vma_unpin(batch); -err_unmap: - i915_gem_object_unpin_map(obj); - return err; -} - -static u32 *reloc_gpu(struct i915_execbuffer *eb, - struct i915_vma *vma, - unsigned int len) -{ - struct reloc_cache *cache = &eb->reloc_cache; - u32 *cmd; - - if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1)) - reloc_gpu_flush(cache); - - if (unlikely(!cache->rq)) { - int err; - - /* If we need to copy for the cmdparser, we will stall anyway */ - if (eb_use_cmdparser(eb)) - return ERR_PTR(-EWOULDBLOCK); - - if (!intel_engine_can_store_dword(eb->engine)) - return ERR_PTR(-ENODEV); - - err = __reloc_gpu_alloc(eb, vma, len); - if (unlikely(err)) - return ERR_PTR(err); - } - - cmd = cache->rq_cmd + cache->rq_size; - cache->rq_size += len; - - return cmd; -} - -static u64 -relocate_entry(struct i915_vma *vma, - const struct drm_i915_gem_relocation_entry *reloc, - struct i915_execbuffer *eb, - const struct i915_vma *target) -{ - u64 offset = reloc->offset; - u64 target_offset = relocation_target(reloc, target); - bool wide = eb->reloc_cache.use_64bit_reloc; - void *vaddr; - - if (!eb->reloc_cache.vaddr && - (DBG_FORCE_RELOC == FORCE_GPU_RELOC || - !reservation_object_test_signaled_rcu(vma->resv, true))) { - const unsigned int gen = eb->reloc_cache.gen; - unsigned int len; - u32 *batch; - u64 addr; - - if (wide) - len = offset & 7 ? 8 : 5; - else if (gen >= 4) - len = 4; - else - len = 3; - - batch = reloc_gpu(eb, vma, len); - if (IS_ERR(batch)) - goto repeat; - - addr = gen8_canonical_addr(vma->node.start + offset); - if (wide) { - if (offset & 7) { - *batch++ = MI_STORE_DWORD_IMM_GEN4; - *batch++ = lower_32_bits(addr); - *batch++ = upper_32_bits(addr); - *batch++ = lower_32_bits(target_offset); - - addr = gen8_canonical_addr(addr + 4); - - *batch++ = MI_STORE_DWORD_IMM_GEN4; - *batch++ = lower_32_bits(addr); - *batch++ = upper_32_bits(addr); - *batch++ = upper_32_bits(target_offset); - } else { - *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1; - *batch++ = lower_32_bits(addr); - *batch++ = upper_32_bits(addr); - *batch++ = lower_32_bits(target_offset); - *batch++ = upper_32_bits(target_offset); - } - } else if (gen >= 6) { - *batch++ = MI_STORE_DWORD_IMM_GEN4; - *batch++ = 0; - *batch++ = addr; - *batch++ = target_offset; - } else if (gen >= 4) { - *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *batch++ = 0; - *batch++ = addr; - *batch++ = target_offset; - } else { - *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; - *batch++ = addr; - *batch++ = target_offset; - } - - goto out; - } - -repeat: - vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT); - if (IS_ERR(vaddr)) - return PTR_ERR(vaddr); - - clflush_write32(vaddr + offset_in_page(offset), - lower_32_bits(target_offset), - eb->reloc_cache.vaddr); - - if (wide) { - offset += sizeof(u32); - target_offset >>= 32; - wide = false; - goto repeat; - } - -out: - return target->node.start | UPDATE; -} - -static u64 -eb_relocate_entry(struct i915_execbuffer *eb, - struct i915_vma *vma, - const struct drm_i915_gem_relocation_entry *reloc) -{ - struct i915_vma *target; - int err; - - /* we've already hold a reference to all valid objects */ - target = eb_get_vma(eb, reloc->target_handle); - if (unlikely(!target)) - return -ENOENT; - - /* Validate that the target is in a valid r/w GPU domain */ - if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { - DRM_DEBUG("reloc with multiple write domains: " - "target %d offset %d " - "read %08x write %08x", - reloc->target_handle, - (int) reloc->offset, - reloc->read_domains, - reloc->write_domain); - return -EINVAL; - } - if (unlikely((reloc->write_domain | reloc->read_domains) - & ~I915_GEM_GPU_DOMAINS)) { - DRM_DEBUG("reloc with read/write non-GPU domains: " - "target %d offset %d " - "read %08x write %08x", - reloc->target_handle, - (int) reloc->offset, - reloc->read_domains, - reloc->write_domain); - return -EINVAL; - } - - if (reloc->write_domain) { - *target->exec_flags |= EXEC_OBJECT_WRITE; - - /* - * Sandybridge PPGTT errata: We need a global gtt mapping - * for MI and pipe_control writes because the gpu doesn't - * properly redirect them through the ppgtt for non_secure - * batchbuffers. - */ - if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && - IS_GEN(eb->i915, 6)) { - err = i915_vma_bind(target, target->obj->cache_level, - PIN_GLOBAL); - if (WARN_ONCE(err, - "Unexpected failure to bind target VMA!")) - return err; - } - } - - /* - * If the relocation already has the right value in it, no - * more work needs to be done. - */ - if (!DBG_FORCE_RELOC && - gen8_canonical_addr(target->node.start) == reloc->presumed_offset) - return 0; - - /* Check that the relocation address is valid... */ - if (unlikely(reloc->offset > - vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) { - DRM_DEBUG("Relocation beyond object bounds: " - "target %d offset %d size %d.\n", - reloc->target_handle, - (int)reloc->offset, - (int)vma->size); - return -EINVAL; - } - if (unlikely(reloc->offset & 3)) { - DRM_DEBUG("Relocation not 4-byte aligned: " - "target %d offset %d.\n", - reloc->target_handle, - (int)reloc->offset); - return -EINVAL; - } - - /* - * If we write into the object, we need to force the synchronisation - * barrier, either with an asynchronous clflush or if we executed the - * patching using the GPU (though that should be serialised by the - * timeline). To be completely sure, and since we are required to - * do relocations we are already stalling, disable the user's opt - * out of our synchronisation. - */ - *vma->exec_flags &= ~EXEC_OBJECT_ASYNC; - - /* and update the user's relocation entry */ - return relocate_entry(vma, reloc, eb, target); -} - -static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) -{ -#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) - struct drm_i915_gem_relocation_entry stack[N_RELOC(512)]; - struct drm_i915_gem_relocation_entry __user *urelocs; - const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); - unsigned int remain; - - urelocs = u64_to_user_ptr(entry->relocs_ptr); - remain = entry->relocation_count; - if (unlikely(remain > N_RELOC(ULONG_MAX))) - return -EINVAL; - - /* - * We must check that the entire relocation array is safe - * to read. However, if the array is not writable the user loses - * the updated relocation values. - */ - if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs)))) - return -EFAULT; - - do { - struct drm_i915_gem_relocation_entry *r = stack; - unsigned int count = - min_t(unsigned int, remain, ARRAY_SIZE(stack)); - unsigned int copied; - - /* - * This is the fast path and we cannot handle a pagefault - * whilst holding the struct mutex lest the user pass in the - * relocations contained within a mmaped bo. For in such a case - * we, the page fault handler would call i915_gem_fault() and - * we would try to acquire the struct mutex again. Obviously - * this is bad and so lockdep complains vehemently. - */ - pagefault_disable(); - copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0])); - pagefault_enable(); - if (unlikely(copied)) { - remain = -EFAULT; - goto out; - } - - remain -= count; - do { - u64 offset = eb_relocate_entry(eb, vma, r); - - if (likely(offset == 0)) { - } else if ((s64)offset < 0) { - remain = (int)offset; - goto out; - } else { - /* - * Note that reporting an error now - * leaves everything in an inconsistent - * state as we have *already* changed - * the relocation value inside the - * object. As we have not changed the - * reloc.presumed_offset or will not - * change the execobject.offset, on the - * call we may not rewrite the value - * inside the object, leaving it - * dangling and causing a GPU hang. Unless - * userspace dynamically rebuilds the - * relocations on each execbuf rather than - * presume a static tree. - * - * We did previously check if the relocations - * were writable (access_ok), an error now - * would be a strange race with mprotect, - * having already demonstrated that we - * can read from this userspace address. - */ - offset = gen8_canonical_addr(offset & ~UPDATE); - if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) { - remain = -EFAULT; - goto out; - } - } - } while (r++, --count); - urelocs += ARRAY_SIZE(stack); - } while (remain); -out: - reloc_cache_reset(&eb->reloc_cache); - return remain; -} - -static int -eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma) -{ - const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); - struct drm_i915_gem_relocation_entry *relocs = - u64_to_ptr(typeof(*relocs), entry->relocs_ptr); - unsigned int i; - int err; - - for (i = 0; i < entry->relocation_count; i++) { - u64 offset = eb_relocate_entry(eb, vma, &relocs[i]); - - if ((s64)offset < 0) { - err = (int)offset; - goto err; - } - } - err = 0; -err: - reloc_cache_reset(&eb->reloc_cache); - return err; -} - -static int check_relocations(const struct drm_i915_gem_exec_object2 *entry) -{ - const char __user *addr, *end; - unsigned long size; - char __maybe_unused c; - - size = entry->relocation_count; - if (size == 0) - return 0; - - if (size > N_RELOC(ULONG_MAX)) - return -EINVAL; - - addr = u64_to_user_ptr(entry->relocs_ptr); - size *= sizeof(struct drm_i915_gem_relocation_entry); - if (!access_ok(addr, size)) - return -EFAULT; - - end = addr + size; - for (; addr < end; addr += PAGE_SIZE) { - int err = __get_user(c, addr); - if (err) - return err; - } - return __get_user(c, end - 1); -} - -static int eb_copy_relocations(const struct i915_execbuffer *eb) -{ - const unsigned int count = eb->buffer_count; - unsigned int i; - int err; - - for (i = 0; i < count; i++) { - const unsigned int nreloc = eb->exec[i].relocation_count; - struct drm_i915_gem_relocation_entry __user *urelocs; - struct drm_i915_gem_relocation_entry *relocs; - unsigned long size; - unsigned long copied; - - if (nreloc == 0) - continue; - - err = check_relocations(&eb->exec[i]); - if (err) - goto err; - - urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); - size = nreloc * sizeof(*relocs); - - relocs = kvmalloc_array(size, 1, GFP_KERNEL); - if (!relocs) { - err = -ENOMEM; - goto err; - } - - /* copy_from_user is limited to < 4GiB */ - copied = 0; - do { - unsigned int len = - min_t(u64, BIT_ULL(31), size - copied); - - if (__copy_from_user((char *)relocs + copied, - (char __user *)urelocs + copied, - len)) { -end_user: - user_access_end(); -end: - kvfree(relocs); - err = -EFAULT; - goto err; - } - - copied += len; - } while (copied < size); - - /* - * As we do not update the known relocation offsets after - * relocating (due to the complexities in lock handling), - * we need to mark them as invalid now so that we force the - * relocation processing next time. Just in case the target - * object is evicted and then rebound into its old - * presumed_offset before the next execbuffer - if that - * happened we would make the mistake of assuming that the - * relocations were valid. - */ - if (!user_access_begin(urelocs, size)) - goto end; - - for (copied = 0; copied < nreloc; copied++) - unsafe_put_user(-1, - &urelocs[copied].presumed_offset, - end_user); - user_access_end(); - - eb->exec[i].relocs_ptr = (uintptr_t)relocs; - } - - return 0; - -err: - while (i--) { - struct drm_i915_gem_relocation_entry *relocs = - u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr); - if (eb->exec[i].relocation_count) - kvfree(relocs); - } - return err; -} - -static int eb_prefault_relocations(const struct i915_execbuffer *eb) -{ - const unsigned int count = eb->buffer_count; - unsigned int i; - - if (unlikely(i915_modparams.prefault_disable)) - return 0; - - for (i = 0; i < count; i++) { - int err; - - err = check_relocations(&eb->exec[i]); - if (err) - return err; - } - - return 0; -} - -static noinline int eb_relocate_slow(struct i915_execbuffer *eb) -{ - struct drm_device *dev = &eb->i915->drm; - bool have_copy = false; - struct i915_vma *vma; - int err = 0; - -repeat: - if (signal_pending(current)) { - err = -ERESTARTSYS; - goto out; - } - - /* We may process another execbuffer during the unlock... */ - eb_reset_vmas(eb); - mutex_unlock(&dev->struct_mutex); - - /* - * We take 3 passes through the slowpatch. - * - * 1 - we try to just prefault all the user relocation entries and - * then attempt to reuse the atomic pagefault disabled fast path again. - * - * 2 - we copy the user entries to a local buffer here outside of the - * local and allow ourselves to wait upon any rendering before - * relocations - * - * 3 - we already have a local copy of the relocation entries, but - * were interrupted (EAGAIN) whilst waiting for the objects, try again. - */ - if (!err) { - err = eb_prefault_relocations(eb); - } else if (!have_copy) { - err = eb_copy_relocations(eb); - have_copy = err == 0; - } else { - cond_resched(); - err = 0; - } - if (err) { - mutex_lock(&dev->struct_mutex); - goto out; - } - - /* A frequent cause for EAGAIN are currently unavailable client pages */ - flush_workqueue(eb->i915->mm.userptr_wq); - - err = i915_mutex_lock_interruptible(dev); - if (err) { - mutex_lock(&dev->struct_mutex); - goto out; - } - - /* reacquire the objects */ - err = eb_lookup_vmas(eb); - if (err) - goto err; - - GEM_BUG_ON(!eb->batch); - - list_for_each_entry(vma, &eb->relocs, reloc_link) { - if (!have_copy) { - pagefault_disable(); - err = eb_relocate_vma(eb, vma); - pagefault_enable(); - if (err) - goto repeat; - } else { - err = eb_relocate_vma_slow(eb, vma); - if (err) - goto err; - } - } - - /* - * Leave the user relocations as are, this is the painfully slow path, - * and we want to avoid the complication of dropping the lock whilst - * having buffers reserved in the aperture and so causing spurious - * ENOSPC for random operations. - */ - -err: - if (err == -EAGAIN) - goto repeat; - -out: - if (have_copy) { - const unsigned int count = eb->buffer_count; - unsigned int i; - - for (i = 0; i < count; i++) { - const struct drm_i915_gem_exec_object2 *entry = - &eb->exec[i]; - struct drm_i915_gem_relocation_entry *relocs; - - if (!entry->relocation_count) - continue; - - relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr); - kvfree(relocs); - } - } - - return err; -} - -static int eb_relocate(struct i915_execbuffer *eb) -{ - if (eb_lookup_vmas(eb)) - goto slow; - - /* The objects are in their final locations, apply the relocations. */ - if (eb->args->flags & __EXEC_HAS_RELOC) { - struct i915_vma *vma; - - list_for_each_entry(vma, &eb->relocs, reloc_link) { - if (eb_relocate_vma(eb, vma)) - goto slow; - } - } - - return 0; - -slow: - return eb_relocate_slow(eb); -} - -static int eb_move_to_gpu(struct i915_execbuffer *eb) -{ - const unsigned int count = eb->buffer_count; - unsigned int i; - int err; - - for (i = 0; i < count; i++) { - unsigned int flags = eb->flags[i]; - struct i915_vma *vma = eb->vma[i]; - struct drm_i915_gem_object *obj = vma->obj; - - if (flags & EXEC_OBJECT_CAPTURE) { - struct i915_capture_list *capture; - - capture = kmalloc(sizeof(*capture), GFP_KERNEL); - if (unlikely(!capture)) - return -ENOMEM; - - capture->next = eb->request->capture_list; - capture->vma = eb->vma[i]; - eb->request->capture_list = capture; - } - - /* - * If the GPU is not _reading_ through the CPU cache, we need - * to make sure that any writes (both previous GPU writes from - * before a change in snooping levels and normal CPU writes) - * caught in that cache are flushed to main memory. - * - * We want to say - * obj->cache_dirty && - * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ) - * but gcc's optimiser doesn't handle that as well and emits - * two jumps instead of one. Maybe one day... - */ - if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) { - if (i915_gem_clflush_object(obj, 0)) - flags &= ~EXEC_OBJECT_ASYNC; - } - - if (flags & EXEC_OBJECT_ASYNC) - continue; - - err = i915_request_await_object - (eb->request, obj, flags & EXEC_OBJECT_WRITE); - if (err) - return err; - } - - for (i = 0; i < count; i++) { - unsigned int flags = eb->flags[i]; - struct i915_vma *vma = eb->vma[i]; - - err = i915_vma_move_to_active(vma, eb->request, flags); - if (unlikely(err)) { - i915_request_skip(eb->request, err); - return err; - } - - __eb_unreserve_vma(vma, flags); - vma->exec_flags = NULL; - - if (unlikely(flags & __EXEC_OBJECT_HAS_REF)) - i915_vma_put(vma); - } - eb->exec = NULL; - - /* Unconditionally flush any chipset caches (for streaming writes). */ - i915_gem_chipset_flush(eb->i915); - - return 0; -} - -static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) -{ - if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS) - return false; - - /* Kernel clipping was a DRI1 misfeature */ - if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) { - if (exec->num_cliprects || exec->cliprects_ptr) - return false; - } - - if (exec->DR4 == 0xffffffff) { - DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); - exec->DR4 = 0; - } - if (exec->DR1 || exec->DR4) - return false; - - if ((exec->batch_start_offset | exec->batch_len) & 0x7) - return false; - - return true; -} - -static int i915_reset_gen7_sol_offsets(struct i915_request *rq) -{ - u32 *cs; - int i; - - if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) { - DRM_DEBUG("sol reset is gen7/rcs only\n"); - return -EINVAL; - } - - cs = intel_ring_begin(rq, 4 * 2 + 2); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - *cs++ = MI_LOAD_REGISTER_IMM(4); - for (i = 0; i < 4; i++) { - *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i)); - *cs++ = 0; - } - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); - - return 0; -} - -static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) -{ - struct drm_i915_gem_object *shadow_batch_obj; - struct i915_vma *vma; - int err; - - shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, - PAGE_ALIGN(eb->batch_len)); - if (IS_ERR(shadow_batch_obj)) - return ERR_CAST(shadow_batch_obj); - - err = intel_engine_cmd_parser(eb->engine, - eb->batch->obj, - shadow_batch_obj, - eb->batch_start_offset, - eb->batch_len, - is_master); - if (err) { - if (err == -EACCES) /* unhandled chained batch */ - vma = NULL; - else - vma = ERR_PTR(err); - goto out; - } - - vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0); - if (IS_ERR(vma)) - goto out; - - eb->vma[eb->buffer_count] = i915_vma_get(vma); - eb->flags[eb->buffer_count] = - __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF; - vma->exec_flags = &eb->flags[eb->buffer_count]; - eb->buffer_count++; - -out: - i915_gem_object_unpin_pages(shadow_batch_obj); - return vma; -} - -static void -add_to_client(struct i915_request *rq, struct drm_file *file) -{ - rq->file_priv = file->driver_priv; - list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list); -} - -static int eb_submit(struct i915_execbuffer *eb) -{ - int err; - - err = eb_move_to_gpu(eb); - if (err) - return err; - - if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) { - err = i915_reset_gen7_sol_offsets(eb->request); - if (err) - return err; - } - - /* - * After we completed waiting for other engines (using HW semaphores) - * then we can signal that this request/batch is ready to run. This - * allows us to determine if the batch is still waiting on the GPU - * or actually running by checking the breadcrumb. - */ - if (eb->engine->emit_init_breadcrumb) { - err = eb->engine->emit_init_breadcrumb(eb->request); - if (err) - return err; - } - - err = eb->engine->emit_bb_start(eb->request, - eb->batch->node.start + - eb->batch_start_offset, - eb->batch_len, - eb->batch_flags); - if (err) - return err; - - return 0; -} - -/* - * Find one BSD ring to dispatch the corresponding BSD command. - * The engine index is returned. - */ -static unsigned int -gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, - struct drm_file *file) -{ - struct drm_i915_file_private *file_priv = file->driver_priv; - - /* Check whether the file_priv has already selected one ring. */ - if ((int)file_priv->bsd_engine < 0) - file_priv->bsd_engine = atomic_fetch_xor(1, - &dev_priv->mm.bsd_engine_dispatch_index); - - return file_priv->bsd_engine; -} - -static const enum intel_engine_id user_ring_map[] = { - [I915_EXEC_DEFAULT] = RCS0, - [I915_EXEC_RENDER] = RCS0, - [I915_EXEC_BLT] = BCS0, - [I915_EXEC_BSD] = VCS0, - [I915_EXEC_VEBOX] = VECS0 -}; - -static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce) -{ - int err; - - /* - * ABI: Before userspace accesses the GPU (e.g. execbuffer), report - * EIO if the GPU is already wedged. - */ - err = i915_terminally_wedged(eb->i915); - if (err) - return err; - - /* - * Pinning the contexts may generate requests in order to acquire - * GGTT space, so do this first before we reserve a seqno for - * ourselves. - */ - err = intel_context_pin(ce); - if (err) - return err; - - eb->engine = ce->engine; - eb->context = ce; - return 0; -} - -static void eb_unpin_context(struct i915_execbuffer *eb) -{ - intel_context_unpin(eb->context); -} - -static unsigned int -eb_select_legacy_ring(struct i915_execbuffer *eb, - struct drm_file *file, - struct drm_i915_gem_execbuffer2 *args) -{ - struct drm_i915_private *i915 = eb->i915; - unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK; - - if (user_ring_id != I915_EXEC_BSD && - (args->flags & I915_EXEC_BSD_MASK)) { - DRM_DEBUG("execbuf with non bsd ring but with invalid " - "bsd dispatch flags: %d\n", (int)(args->flags)); - return -1; - } - - if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(i915, VCS1)) { - unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; - - if (bsd_idx == I915_EXEC_BSD_DEFAULT) { - bsd_idx = gen8_dispatch_bsd_engine(i915, file); - } else if (bsd_idx >= I915_EXEC_BSD_RING1 && - bsd_idx <= I915_EXEC_BSD_RING2) { - bsd_idx >>= I915_EXEC_BSD_SHIFT; - bsd_idx--; - } else { - DRM_DEBUG("execbuf with unknown bsd ring: %u\n", - bsd_idx); - return -1; - } - - return _VCS(bsd_idx); - } - - if (user_ring_id >= ARRAY_SIZE(user_ring_map)) { - DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id); - return -1; - } - - return user_ring_map[user_ring_id]; -} - -static int -eb_select_engine(struct i915_execbuffer *eb, - struct drm_file *file, - struct drm_i915_gem_execbuffer2 *args) -{ - struct intel_context *ce; - unsigned int idx; - int err; - - if (i915_gem_context_user_engines(eb->gem_context)) - idx = args->flags & I915_EXEC_RING_MASK; - else - idx = eb_select_legacy_ring(eb, file, args); - - ce = i915_gem_context_get_engine(eb->gem_context, idx); - if (IS_ERR(ce)) - return PTR_ERR(ce); - - err = eb_pin_context(eb, ce); - intel_context_put(ce); - - return err; -} - -static void -__free_fence_array(struct drm_syncobj **fences, unsigned int n) -{ - while (n--) - drm_syncobj_put(ptr_mask_bits(fences[n], 2)); - kvfree(fences); -} - -static struct drm_syncobj ** -get_fence_array(struct drm_i915_gem_execbuffer2 *args, - struct drm_file *file) -{ - const unsigned long nfences = args->num_cliprects; - struct drm_i915_gem_exec_fence __user *user; - struct drm_syncobj **fences; - unsigned long n; - int err; - - if (!(args->flags & I915_EXEC_FENCE_ARRAY)) - return NULL; - - /* Check multiplication overflow for access_ok() and kvmalloc_array() */ - BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long)); - if (nfences > min_t(unsigned long, - ULONG_MAX / sizeof(*user), - SIZE_MAX / sizeof(*fences))) - return ERR_PTR(-EINVAL); - - user = u64_to_user_ptr(args->cliprects_ptr); - if (!access_ok(user, nfences * sizeof(*user))) - return ERR_PTR(-EFAULT); - - fences = kvmalloc_array(nfences, sizeof(*fences), - __GFP_NOWARN | GFP_KERNEL); - if (!fences) - return ERR_PTR(-ENOMEM); - - for (n = 0; n < nfences; n++) { - struct drm_i915_gem_exec_fence fence; - struct drm_syncobj *syncobj; - - if (__copy_from_user(&fence, user++, sizeof(fence))) { - err = -EFAULT; - goto err; - } - - if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) { - err = -EINVAL; - goto err; - } - - syncobj = drm_syncobj_find(file, fence.handle); - if (!syncobj) { - DRM_DEBUG("Invalid syncobj handle provided\n"); - err = -ENOENT; - goto err; - } - - BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) & - ~__I915_EXEC_FENCE_UNKNOWN_FLAGS); - - fences[n] = ptr_pack_bits(syncobj, fence.flags, 2); - } - - return fences; - -err: - __free_fence_array(fences, n); - return ERR_PTR(err); -} - -static void -put_fence_array(struct drm_i915_gem_execbuffer2 *args, - struct drm_syncobj **fences) -{ - if (fences) - __free_fence_array(fences, args->num_cliprects); -} - -static int -await_fence_array(struct i915_execbuffer *eb, - struct drm_syncobj **fences) -{ - const unsigned int nfences = eb->args->num_cliprects; - unsigned int n; - int err; - - for (n = 0; n < nfences; n++) { - struct drm_syncobj *syncobj; - struct dma_fence *fence; - unsigned int flags; - - syncobj = ptr_unpack_bits(fences[n], &flags, 2); - if (!(flags & I915_EXEC_FENCE_WAIT)) - continue; - - fence = drm_syncobj_fence_get(syncobj); - if (!fence) - return -EINVAL; - - err = i915_request_await_dma_fence(eb->request, fence); - dma_fence_put(fence); - if (err < 0) - return err; - } - - return 0; -} - -static void -signal_fence_array(struct i915_execbuffer *eb, - struct drm_syncobj **fences) -{ - const unsigned int nfences = eb->args->num_cliprects; - struct dma_fence * const fence = &eb->request->fence; - unsigned int n; - - for (n = 0; n < nfences; n++) { - struct drm_syncobj *syncobj; - unsigned int flags; - - syncobj = ptr_unpack_bits(fences[n], &flags, 2); - if (!(flags & I915_EXEC_FENCE_SIGNAL)) - continue; - - drm_syncobj_replace_fence(syncobj, fence); - } -} - -static int -i915_gem_do_execbuffer(struct drm_device *dev, - struct drm_file *file, - struct drm_i915_gem_execbuffer2 *args, - struct drm_i915_gem_exec_object2 *exec, - struct drm_syncobj **fences) -{ - struct i915_execbuffer eb; - struct dma_fence *in_fence = NULL; - struct dma_fence *exec_fence = NULL; - struct sync_file *out_fence = NULL; - int out_fence_fd = -1; - int err; - - BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS); - BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & - ~__EXEC_OBJECT_UNKNOWN_FLAGS); - - eb.i915 = to_i915(dev); - eb.file = file; - eb.args = args; - if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC)) - args->flags |= __EXEC_HAS_RELOC; - - eb.exec = exec; - eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1); - eb.vma[0] = NULL; - eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1); - - eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; - reloc_cache_init(&eb.reloc_cache, eb.i915); - - eb.buffer_count = args->buffer_count; - eb.batch_start_offset = args->batch_start_offset; - eb.batch_len = args->batch_len; - - eb.batch_flags = 0; - if (args->flags & I915_EXEC_SECURE) { - if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) - return -EPERM; - - eb.batch_flags |= I915_DISPATCH_SECURE; - } - if (args->flags & I915_EXEC_IS_PINNED) - eb.batch_flags |= I915_DISPATCH_PINNED; - - if (args->flags & I915_EXEC_FENCE_IN) { - in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); - if (!in_fence) - return -EINVAL; - } - - if (args->flags & I915_EXEC_FENCE_SUBMIT) { - if (in_fence) { - err = -EINVAL; - goto err_in_fence; - } - - exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); - if (!exec_fence) { - err = -EINVAL; - goto err_in_fence; - } - } - - if (args->flags & I915_EXEC_FENCE_OUT) { - out_fence_fd = get_unused_fd_flags(O_CLOEXEC); - if (out_fence_fd < 0) { - err = out_fence_fd; - goto err_exec_fence; - } - } - - err = eb_create(&eb); - if (err) - goto err_out_fence; - - GEM_BUG_ON(!eb.lut_size); - - err = eb_select_context(&eb); - if (unlikely(err)) - goto err_destroy; - - /* - * Take a local wakeref for preparing to dispatch the execbuf as - * we expect to access the hardware fairly frequently in the - * process. Upon first dispatch, we acquire another prolonged - * wakeref that we hold until the GPU has been idle for at least - * 100ms. - */ - intel_gt_pm_get(eb.i915); - - err = i915_mutex_lock_interruptible(dev); - if (err) - goto err_rpm; - - err = eb_select_engine(&eb, file, args); - if (unlikely(err)) - goto err_unlock; - - err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */ - if (unlikely(err)) - goto err_engine; - - err = eb_relocate(&eb); - if (err) { - /* - * If the user expects the execobject.offset and - * reloc.presumed_offset to be an exact match, - * as for using NO_RELOC, then we cannot update - * the execobject.offset until we have completed - * relocation. - */ - args->flags &= ~__EXEC_HAS_RELOC; - goto err_vma; - } - - if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) { - DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); - err = -EINVAL; - goto err_vma; - } - if (eb.batch_start_offset > eb.batch->size || - eb.batch_len > eb.batch->size - eb.batch_start_offset) { - DRM_DEBUG("Attempting to use out-of-bounds batch\n"); - err = -EINVAL; - goto err_vma; - } - - if (eb_use_cmdparser(&eb)) { - struct i915_vma *vma; - - vma = eb_parse(&eb, drm_is_current_master(file)); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_vma; - } - - if (vma) { - /* - * Batch parsed and accepted: - * - * Set the DISPATCH_SECURE bit to remove the NON_SECURE - * bit from MI_BATCH_BUFFER_START commands issued in - * the dispatch_execbuffer implementations. We - * specifically don't want that set on batches the - * command parser has accepted. - */ - eb.batch_flags |= I915_DISPATCH_SECURE; - eb.batch_start_offset = 0; - eb.batch = vma; - } - } - - if (eb.batch_len == 0) - eb.batch_len = eb.batch->size - eb.batch_start_offset; - - /* - * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure - * batch" bit. Hence we need to pin secure batches into the global gtt. - * hsw should have this fixed, but bdw mucks it up again. */ - if (eb.batch_flags & I915_DISPATCH_SECURE) { - struct i915_vma *vma; - - /* - * So on first glance it looks freaky that we pin the batch here - * outside of the reservation loop. But: - * - The batch is already pinned into the relevant ppgtt, so we - * already have the backing storage fully allocated. - * - No other BO uses the global gtt (well contexts, but meh), - * so we don't really have issues with multiple objects not - * fitting due to fragmentation. - * So this is actually safe. - */ - vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_vma; - } - - eb.batch = vma; - } - - /* All GPU relocation batches must be submitted prior to the user rq */ - GEM_BUG_ON(eb.reloc_cache.rq); - - /* Allocate a request for this batch buffer nice and early. */ - eb.request = i915_request_create(eb.context); - if (IS_ERR(eb.request)) { - err = PTR_ERR(eb.request); - goto err_batch_unpin; - } - - if (in_fence) { - err = i915_request_await_dma_fence(eb.request, in_fence); - if (err < 0) - goto err_request; - } - - if (exec_fence) { - err = i915_request_await_execution(eb.request, exec_fence, - eb.engine->bond_execute); - if (err < 0) - goto err_request; - } - - if (fences) { - err = await_fence_array(&eb, fences); - if (err) - goto err_request; - } - - if (out_fence_fd != -1) { - out_fence = sync_file_create(&eb.request->fence); - if (!out_fence) { - err = -ENOMEM; - goto err_request; - } - } - - /* - * Whilst this request exists, batch_obj will be on the - * active_list, and so will hold the active reference. Only when this - * request is retired will the the batch_obj be moved onto the - * inactive_list and lose its active reference. Hence we do not need - * to explicitly hold another reference here. - */ - eb.request->batch = eb.batch; - - trace_i915_request_queue(eb.request, eb.batch_flags); - err = eb_submit(&eb); -err_request: - add_to_client(eb.request, file); - i915_request_add(eb.request); - - if (fences) - signal_fence_array(&eb, fences); - - if (out_fence) { - if (err == 0) { - fd_install(out_fence_fd, out_fence->file); - args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */ - args->rsvd2 |= (u64)out_fence_fd << 32; - out_fence_fd = -1; - } else { - fput(out_fence->file); - } - } - -err_batch_unpin: - if (eb.batch_flags & I915_DISPATCH_SECURE) - i915_vma_unpin(eb.batch); -err_vma: - if (eb.exec) - eb_release_vmas(&eb); -err_engine: - eb_unpin_context(&eb); -err_unlock: - mutex_unlock(&dev->struct_mutex); -err_rpm: - intel_gt_pm_put(eb.i915); - i915_gem_context_put(eb.gem_context); -err_destroy: - eb_destroy(&eb); -err_out_fence: - if (out_fence_fd != -1) - put_unused_fd(out_fence_fd); -err_exec_fence: - dma_fence_put(exec_fence); -err_in_fence: - dma_fence_put(in_fence); - return err; -} - -static size_t eb_element_size(void) -{ - return (sizeof(struct drm_i915_gem_exec_object2) + - sizeof(struct i915_vma *) + - sizeof(unsigned int)); -} - -static bool check_buffer_count(size_t count) -{ - const size_t sz = eb_element_size(); - - /* - * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup - * array size (see eb_create()). Otherwise, we can accept an array as - * large as can be addressed (though use large arrays at your peril)! - */ - - return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1); -} - -/* - * Legacy execbuffer just creates an exec2 list from the original exec object - * list array and passes it to the real function. - */ -int -i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_gem_execbuffer *args = data; - struct drm_i915_gem_execbuffer2 exec2; - struct drm_i915_gem_exec_object *exec_list = NULL; - struct drm_i915_gem_exec_object2 *exec2_list = NULL; - const size_t count = args->buffer_count; - unsigned int i; - int err; - - if (!check_buffer_count(count)) { - DRM_DEBUG("execbuf2 with %zd buffers\n", count); - return -EINVAL; - } - - exec2.buffers_ptr = args->buffers_ptr; - exec2.buffer_count = args->buffer_count; - exec2.batch_start_offset = args->batch_start_offset; - exec2.batch_len = args->batch_len; - exec2.DR1 = args->DR1; - exec2.DR4 = args->DR4; - exec2.num_cliprects = args->num_cliprects; - exec2.cliprects_ptr = args->cliprects_ptr; - exec2.flags = I915_EXEC_RENDER; - i915_execbuffer2_set_context_id(exec2, 0); - - if (!i915_gem_check_execbuffer(&exec2)) - return -EINVAL; - - /* Copy in the exec list from userland */ - exec_list = kvmalloc_array(count, sizeof(*exec_list), - __GFP_NOWARN | GFP_KERNEL); - exec2_list = kvmalloc_array(count + 1, eb_element_size(), - __GFP_NOWARN | GFP_KERNEL); - if (exec_list == NULL || exec2_list == NULL) { - DRM_DEBUG("Failed to allocate exec list for %d buffers\n", - args->buffer_count); - kvfree(exec_list); - kvfree(exec2_list); - return -ENOMEM; - } - err = copy_from_user(exec_list, - u64_to_user_ptr(args->buffers_ptr), - sizeof(*exec_list) * count); - if (err) { - DRM_DEBUG("copy %d exec entries failed %d\n", - args->buffer_count, err); - kvfree(exec_list); - kvfree(exec2_list); - return -EFAULT; - } - - for (i = 0; i < args->buffer_count; i++) { - exec2_list[i].handle = exec_list[i].handle; - exec2_list[i].relocation_count = exec_list[i].relocation_count; - exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; - exec2_list[i].alignment = exec_list[i].alignment; - exec2_list[i].offset = exec_list[i].offset; - if (INTEL_GEN(to_i915(dev)) < 4) - exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; - else - exec2_list[i].flags = 0; - } - - err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL); - if (exec2.flags & __EXEC_HAS_RELOC) { - struct drm_i915_gem_exec_object __user *user_exec_list = - u64_to_user_ptr(args->buffers_ptr); - - /* Copy the new buffer offsets back to the user's exec list. */ - for (i = 0; i < args->buffer_count; i++) { - if (!(exec2_list[i].offset & UPDATE)) - continue; - - exec2_list[i].offset = - gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK); - exec2_list[i].offset &= PIN_OFFSET_MASK; - if (__copy_to_user(&user_exec_list[i].offset, - &exec2_list[i].offset, - sizeof(user_exec_list[i].offset))) - break; - } - } - - kvfree(exec_list); - kvfree(exec2_list); - return err; -} - -int -i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_gem_execbuffer2 *args = data; - struct drm_i915_gem_exec_object2 *exec2_list; - struct drm_syncobj **fences = NULL; - const size_t count = args->buffer_count; - int err; - - if (!check_buffer_count(count)) { - DRM_DEBUG("execbuf2 with %zd buffers\n", count); - return -EINVAL; - } - - if (!i915_gem_check_execbuffer(args)) - return -EINVAL; - - /* Allocate an extra slot for use by the command parser */ - exec2_list = kvmalloc_array(count + 1, eb_element_size(), - __GFP_NOWARN | GFP_KERNEL); - if (exec2_list == NULL) { - DRM_DEBUG("Failed to allocate exec list for %zd buffers\n", - count); - return -ENOMEM; - } - if (copy_from_user(exec2_list, - u64_to_user_ptr(args->buffers_ptr), - sizeof(*exec2_list) * count)) { - DRM_DEBUG("copy %zd exec entries failed\n", count); - kvfree(exec2_list); - return -EFAULT; - } - - if (args->flags & I915_EXEC_FENCE_ARRAY) { - fences = get_fence_array(args, file); - if (IS_ERR(fences)) { - kvfree(exec2_list); - return PTR_ERR(fences); - } - } - - err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences); - - /* - * Now that we have begun execution of the batchbuffer, we ignore - * any new error after this point. Also given that we have already - * updated the associated relocations, we try to write out the current - * object locations irrespective of any error. - */ - if (args->flags & __EXEC_HAS_RELOC) { - struct drm_i915_gem_exec_object2 __user *user_exec_list = - u64_to_user_ptr(args->buffers_ptr); - unsigned int i; - - /* Copy the new buffer offsets back to the user's exec list. */ - /* - * Note: count * sizeof(*user_exec_list) does not overflow, - * because we checked 'count' in check_buffer_count(). - * - * And this range already got effectively checked earlier - * when we did the "copy_from_user()" above. - */ - if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list))) - goto end; - - for (i = 0; i < args->buffer_count; i++) { - if (!(exec2_list[i].offset & UPDATE)) - continue; - - exec2_list[i].offset = - gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK); - unsafe_put_user(exec2_list[i].offset, - &user_exec_list[i].offset, - end_user); - } -end_user: - user_access_end(); -end:; - } - - args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS; - put_fence_array(args, fences); - kvfree(exec2_list); - return err; -} diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c deleted file mode 100644 index 21662176819f..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_internal.c +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright © 2014-2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include -#include "i915_drv.h" - -#define QUIET (__GFP_NORETRY | __GFP_NOWARN) -#define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN) - -static void internal_free_pages(struct sg_table *st) -{ - struct scatterlist *sg; - - for (sg = st->sgl; sg; sg = __sg_next(sg)) { - if (sg_page(sg)) - __free_pages(sg_page(sg), get_order(sg->length)); - } - - sg_free_table(st); - kfree(st); -} - -static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct sg_table *st; - struct scatterlist *sg; - unsigned int sg_page_sizes; - unsigned int npages; - int max_order; - gfp_t gfp; - - max_order = MAX_ORDER; -#ifdef CONFIG_SWIOTLB - if (swiotlb_nr_tbl()) { - unsigned int max_segment; - - max_segment = swiotlb_max_segment(); - if (max_segment) { - max_segment = max_t(unsigned int, max_segment, - PAGE_SIZE) >> PAGE_SHIFT; - max_order = min(max_order, ilog2(max_segment)); - } - } -#endif - - gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; - if (IS_I965GM(i915) || IS_I965G(i915)) { - /* 965gm cannot relocate objects above 4GiB. */ - gfp &= ~__GFP_HIGHMEM; - gfp |= __GFP_DMA32; - } - -create_st: - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return -ENOMEM; - - npages = obj->base.size / PAGE_SIZE; - if (sg_alloc_table(st, npages, GFP_KERNEL)) { - kfree(st); - return -ENOMEM; - } - - sg = st->sgl; - st->nents = 0; - sg_page_sizes = 0; - - do { - int order = min(fls(npages) - 1, max_order); - struct page *page; - - do { - page = alloc_pages(gfp | (order ? QUIET : MAYFAIL), - order); - if (page) - break; - if (!order--) - goto err; - - /* Limit subsequent allocations as well */ - max_order = order; - } while (1); - - sg_set_page(sg, page, PAGE_SIZE << order, 0); - sg_page_sizes |= PAGE_SIZE << order; - st->nents++; - - npages -= 1 << order; - if (!npages) { - sg_mark_end(sg); - break; - } - - sg = __sg_next(sg); - } while (1); - - if (i915_gem_gtt_prepare_pages(obj, st)) { - /* Failed to dma-map try again with single page sg segments */ - if (get_order(st->sgl->length)) { - internal_free_pages(st); - max_order = 0; - goto create_st; - } - goto err; - } - - /* Mark the pages as dontneed whilst they are still pinned. As soon - * as they are unpinned they are allowed to be reaped by the shrinker, - * and the caller is expected to repopulate - the contents of this - * object are only valid whilst active and pinned. - */ - obj->mm.madv = I915_MADV_DONTNEED; - - __i915_gem_object_set_pages(obj, st, sg_page_sizes); - - return 0; - -err: - sg_set_page(sg, NULL, 0, 0); - sg_mark_end(sg); - internal_free_pages(st); - - return -ENOMEM; -} - -static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, - struct sg_table *pages) -{ - i915_gem_gtt_finish_pages(obj, pages); - internal_free_pages(pages); - - obj->mm.dirty = false; - obj->mm.madv = I915_MADV_WILLNEED; -} - -static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { - .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | - I915_GEM_OBJECT_IS_SHRINKABLE, - .get_pages = i915_gem_object_get_pages_internal, - .put_pages = i915_gem_object_put_pages_internal, -}; - -/** - * i915_gem_object_create_internal: create an object with volatile pages - * @i915: the i915 device - * @size: the size in bytes of backing storage to allocate for the object - * - * Creates a new object that wraps some internal memory for private use. - * This object is not backed by swappable storage, and as such its contents - * are volatile and only valid whilst pinned. If the object is reaped by the - * shrinker, its pages and data will be discarded. Equally, it is not a full - * GEM object and so not valid for access from userspace. This makes it useful - * for hardware interfaces like ringbuffers (which are pinned from the time - * the request is written to the time the hardware stops accessing it), but - * not for contexts (which need to be preserved when not active for later - * reuse). Note that it is not cleared upon allocation. - */ -struct drm_i915_gem_object * -i915_gem_object_create_internal(struct drm_i915_private *i915, - phys_addr_t size) -{ - struct drm_i915_gem_object *obj; - unsigned int cache_level; - - GEM_BUG_ON(!size); - GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); - - if (overflows_type(size, obj->base.size)) - return ERR_PTR(-E2BIG); - - obj = i915_gem_object_alloc(); - if (!obj) - return ERR_PTR(-ENOMEM); - - drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &i915_gem_object_internal_ops); - - obj->read_domains = I915_GEM_DOMAIN_CPU; - obj->write_domain = I915_GEM_DOMAIN_CPU; - - cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; - i915_gem_object_set_cache_coherency(obj, cache_level); - - return obj; -} diff --git a/drivers/gpu/drm/i915/i915_gem_pm.c b/drivers/gpu/drm/i915/i915_gem_pm.c deleted file mode 100644 index c0ad19605297..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_pm.c +++ /dev/null @@ -1,251 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2019 Intel Corporation - */ - -#include "gt/intel_gt_pm.h" - -#include "i915_drv.h" -#include "i915_gem_pm.h" -#include "i915_globals.h" - -static void i915_gem_park(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - lockdep_assert_held(&i915->drm.struct_mutex); - - for_each_engine(engine, i915, id) - i915_gem_batch_pool_fini(&engine->batch_pool); - - i915_timelines_park(i915); - i915_vma_parked(i915); - - i915_globals_park(); -} - -static void idle_work_handler(struct work_struct *work) -{ - struct drm_i915_private *i915 = - container_of(work, typeof(*i915), gem.idle_work); - bool restart = true; - - cancel_delayed_work(&i915->gem.retire_work); - mutex_lock(&i915->drm.struct_mutex); - - intel_wakeref_lock(&i915->gt.wakeref); - if (!intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work)) { - i915_gem_park(i915); - restart = false; - } - intel_wakeref_unlock(&i915->gt.wakeref); - - mutex_unlock(&i915->drm.struct_mutex); - if (restart) - queue_delayed_work(i915->wq, - &i915->gem.retire_work, - round_jiffies_up_relative(HZ)); -} - -static void retire_work_handler(struct work_struct *work) -{ - struct drm_i915_private *i915 = - container_of(work, typeof(*i915), gem.retire_work.work); - - /* Come back later if the device is busy... */ - if (mutex_trylock(&i915->drm.struct_mutex)) { - i915_retire_requests(i915); - mutex_unlock(&i915->drm.struct_mutex); - } - - queue_delayed_work(i915->wq, - &i915->gem.retire_work, - round_jiffies_up_relative(HZ)); -} - -static int pm_notifier(struct notifier_block *nb, - unsigned long action, - void *data) -{ - struct drm_i915_private *i915 = - container_of(nb, typeof(*i915), gem.pm_notifier); - - switch (action) { - case INTEL_GT_UNPARK: - i915_globals_unpark(); - queue_delayed_work(i915->wq, - &i915->gem.retire_work, - round_jiffies_up_relative(HZ)); - break; - - case INTEL_GT_PARK: - queue_work(i915->wq, &i915->gem.idle_work); - break; - } - - return NOTIFY_OK; -} - -static bool switch_to_kernel_context_sync(struct drm_i915_private *i915) -{ - bool result = true; - - do { - if (i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED | - I915_WAIT_FOR_IDLE_BOOST, - I915_GEM_IDLE_TIMEOUT) == -ETIME) { - /* XXX hide warning from gem_eio */ - if (i915_modparams.reset) { - dev_err(i915->drm.dev, - "Failed to idle engines, declaring wedged!\n"); - GEM_TRACE_DUMP(); - } - - /* - * Forcibly cancel outstanding work and leave - * the gpu quiet. - */ - i915_gem_set_wedged(i915); - result = false; - } - } while (i915_retire_requests(i915) && result); - - GEM_BUG_ON(i915->gt.awake); - return result; -} - -bool i915_gem_load_power_context(struct drm_i915_private *i915) -{ - return switch_to_kernel_context_sync(i915); -} - -void i915_gem_suspend(struct drm_i915_private *i915) -{ - GEM_TRACE("\n"); - - intel_wakeref_auto(&i915->mm.userfault_wakeref, 0); - flush_workqueue(i915->wq); - - mutex_lock(&i915->drm.struct_mutex); - - /* - * We have to flush all the executing contexts to main memory so - * that they can saved in the hibernation image. To ensure the last - * context image is coherent, we have to switch away from it. That - * leaves the i915->kernel_context still active when - * we actually suspend, and its image in memory may not match the GPU - * state. Fortunately, the kernel_context is disposable and we do - * not rely on its state. - */ - switch_to_kernel_context_sync(i915); - - mutex_unlock(&i915->drm.struct_mutex); - - /* - * Assert that we successfully flushed all the work and - * reset the GPU back to its idle, low power state. - */ - GEM_BUG_ON(i915->gt.awake); - flush_work(&i915->gem.idle_work); - - cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work); - - i915_gem_drain_freed_objects(i915); - - intel_uc_suspend(i915); -} - -void i915_gem_suspend_late(struct drm_i915_private *i915) -{ - struct drm_i915_gem_object *obj; - struct list_head *phases[] = { - &i915->mm.unbound_list, - &i915->mm.bound_list, - NULL - }, **phase; - - /* - * Neither the BIOS, ourselves or any other kernel - * expects the system to be in execlists mode on startup, - * so we need to reset the GPU back to legacy mode. And the only - * known way to disable logical contexts is through a GPU reset. - * - * So in order to leave the system in a known default configuration, - * always reset the GPU upon unload and suspend. Afterwards we then - * clean up the GEM state tracking, flushing off the requests and - * leaving the system in a known idle state. - * - * Note that is of the upmost importance that the GPU is idle and - * all stray writes are flushed *before* we dismantle the backing - * storage for the pinned objects. - * - * However, since we are uncertain that resetting the GPU on older - * machines is a good idea, we don't - just in case it leaves the - * machine in an unusable condition. - */ - - mutex_lock(&i915->drm.struct_mutex); - for (phase = phases; *phase; phase++) { - list_for_each_entry(obj, *phase, mm.link) - WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); - } - mutex_unlock(&i915->drm.struct_mutex); - - intel_uc_sanitize(i915); - i915_gem_sanitize(i915); -} - -void i915_gem_resume(struct drm_i915_private *i915) -{ - GEM_TRACE("\n"); - - WARN_ON(i915->gt.awake); - - mutex_lock(&i915->drm.struct_mutex); - intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); - - i915_gem_restore_gtt_mappings(i915); - i915_gem_restore_fences(i915); - - /* - * As we didn't flush the kernel context before suspend, we cannot - * guarantee that the context image is complete. So let's just reset - * it and start again. - */ - intel_gt_resume(i915); - - if (i915_gem_init_hw(i915)) - goto err_wedged; - - intel_uc_resume(i915); - - /* Always reload a context for powersaving. */ - if (!i915_gem_load_power_context(i915)) - goto err_wedged; - -out_unlock: - intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); - mutex_unlock(&i915->drm.struct_mutex); - return; - -err_wedged: - if (!i915_reset_failed(i915)) { - dev_err(i915->drm.dev, - "Failed to re-initialize GPU, declaring it wedged!\n"); - i915_gem_set_wedged(i915); - } - goto out_unlock; -} - -void i915_gem_init__pm(struct drm_i915_private *i915) -{ - INIT_WORK(&i915->gem.idle_work, idle_work_handler); - INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler); - - i915->gem.pm_notifier.notifier_call = pm_notifier; - blocking_notifier_chain_register(&i915->gt.pm_notifications, - &i915->gem.pm_notifier); -} diff --git a/drivers/gpu/drm/i915/i915_gem_pm.h b/drivers/gpu/drm/i915/i915_gem_pm.h deleted file mode 100644 index 6f7d5d11ac3b..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_pm.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2019 Intel Corporation - */ - -#ifndef __I915_GEM_PM_H__ -#define __I915_GEM_PM_H__ - -#include - -struct drm_i915_private; -struct work_struct; - -void i915_gem_init__pm(struct drm_i915_private *i915); - -bool i915_gem_load_power_context(struct drm_i915_private *i915); -void i915_gem_resume(struct drm_i915_private *i915); - -void i915_gem_idle_work_handler(struct work_struct *work); - -void i915_gem_suspend(struct drm_i915_private *i915); -void i915_gem_suspend_late(struct drm_i915_private *i915); - -#endif /* __I915_GEM_PM_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c deleted file mode 100644 index 2c7aefb3e101..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ /dev/null @@ -1,574 +0,0 @@ -/* - * Copyright © 2008-2015 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "i915_drv.h" -#include "i915_trace.h" - -static bool shrinker_lock(struct drm_i915_private *i915, - unsigned int flags, - bool *unlock) -{ - struct mutex *m = &i915->drm.struct_mutex; - - switch (mutex_trylock_recursive(m)) { - case MUTEX_TRYLOCK_RECURSIVE: - *unlock = false; - return true; - - case MUTEX_TRYLOCK_FAILED: - *unlock = false; - if (flags & I915_SHRINK_ACTIVE && - mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0) - *unlock = true; - return *unlock; - - case MUTEX_TRYLOCK_SUCCESS: - *unlock = true; - return true; - } - - BUG(); -} - -static void shrinker_unlock(struct drm_i915_private *i915, bool unlock) -{ - if (!unlock) - return; - - mutex_unlock(&i915->drm.struct_mutex); -} - -static bool swap_available(void) -{ - return get_nr_swap_pages() > 0; -} - -static bool can_release_pages(struct drm_i915_gem_object *obj) -{ - /* Consider only shrinkable ojects. */ - if (!i915_gem_object_is_shrinkable(obj)) - return false; - - /* Only report true if by unbinding the object and putting its pages - * we can actually make forward progress towards freeing physical - * pages. - * - * If the pages are pinned for any other reason than being bound - * to the GPU, simply unbinding from the GPU is not going to succeed - * in releasing our pin count on the pages themselves. - */ - if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count) - return false; - - /* If any vma are "permanently" pinned, it will prevent us from - * reclaiming the obj->mm.pages. We only allow scanout objects to claim - * a permanent pin, along with a few others like the context objects. - * To simplify the scan, and to avoid walking the list of vma under the - * object, we just check the count of its permanently pinned. - */ - if (READ_ONCE(obj->pin_global)) - return false; - - /* We can only return physical pages to the system if we can either - * discard the contents (because the user has marked them as being - * purgeable) or if we can move their contents out to swap. - */ - return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; -} - -static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) -{ - if (i915_gem_object_unbind(obj) == 0) - __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); - return !i915_gem_object_has_pages(obj); -} - -static void try_to_writeback(struct drm_i915_gem_object *obj, - unsigned int flags) -{ - switch (obj->mm.madv) { - case I915_MADV_DONTNEED: - i915_gem_object_truncate(obj); - case __I915_MADV_PURGED: - return; - } - - if (flags & I915_SHRINK_WRITEBACK) - i915_gem_object_writeback(obj); -} - -/** - * i915_gem_shrink - Shrink buffer object caches - * @i915: i915 device - * @target: amount of memory to make available, in pages - * @nr_scanned: optional output for number of pages scanned (incremental) - * @flags: control flags for selecting cache types - * - * This function is the main interface to the shrinker. It will try to release - * up to @target pages of main memory backing storage from buffer objects. - * Selection of the specific caches can be done with @flags. This is e.g. useful - * when purgeable objects should be removed from caches preferentially. - * - * Note that it's not guaranteed that released amount is actually available as - * free system memory - the pages might still be in-used to due to other reasons - * (like cpu mmaps) or the mm core has reused them before we could grab them. - * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to - * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all(). - * - * Also note that any kind of pinning (both per-vma address space pins and - * backing storage pins at the buffer object level) result in the shrinker code - * having to skip the object. - * - * Returns: - * The number of pages of backing storage actually released. - */ -unsigned long -i915_gem_shrink(struct drm_i915_private *i915, - unsigned long target, - unsigned long *nr_scanned, - unsigned flags) -{ - const struct { - struct list_head *list; - unsigned int bit; - } phases[] = { - { &i915->mm.unbound_list, I915_SHRINK_UNBOUND }, - { &i915->mm.bound_list, I915_SHRINK_BOUND }, - { NULL, 0 }, - }, *phase; - intel_wakeref_t wakeref = 0; - unsigned long count = 0; - unsigned long scanned = 0; - bool unlock; - - if (!shrinker_lock(i915, flags, &unlock)) - return 0; - - /* - * When shrinking the active list, also consider active contexts. - * Active contexts are pinned until they are retired, and so can - * not be simply unbound to retire and unpin their pages. To shrink - * the contexts, we must wait until the gpu is idle. - * - * We don't care about errors here; if we cannot wait upon the GPU, - * we will free as much as we can and hope to get a second chance. - */ - if (flags & I915_SHRINK_ACTIVE) - i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - - trace_i915_gem_shrink(i915, target, flags); - i915_retire_requests(i915); - - /* - * Unbinding of objects will require HW access; Let us not wake the - * device just to recover a little memory. If absolutely necessary, - * we will force the wake during oom-notifier. - */ - if (flags & I915_SHRINK_BOUND) { - wakeref = intel_runtime_pm_get_if_in_use(i915); - if (!wakeref) - flags &= ~I915_SHRINK_BOUND; - } - - /* - * As we may completely rewrite the (un)bound list whilst unbinding - * (due to retiring requests) we have to strictly process only - * one element of the list at the time, and recheck the list - * on every iteration. - * - * In particular, we must hold a reference whilst removing the - * object as we may end up waiting for and/or retiring the objects. - * This might release the final reference (held by the active list) - * and result in the object being freed from under us. This is - * similar to the precautions the eviction code must take whilst - * removing objects. - * - * Also note that although these lists do not hold a reference to - * the object we can safely grab one here: The final object - * unreferencing and the bound_list are both protected by the - * dev->struct_mutex and so we won't ever be able to observe an - * object on the bound_list with a reference count equals 0. - */ - for (phase = phases; phase->list; phase++) { - struct list_head still_in_list; - struct drm_i915_gem_object *obj; - - if ((flags & phase->bit) == 0) - continue; - - INIT_LIST_HEAD(&still_in_list); - - /* - * We serialize our access to unreferenced objects through - * the use of the struct_mutex. While the objects are not - * yet freed (due to RCU then a workqueue) we still want - * to be able to shrink their pages, so they remain on - * the unbound/bound list until actually freed. - */ - spin_lock(&i915->mm.obj_lock); - while (count < target && - (obj = list_first_entry_or_null(phase->list, - typeof(*obj), - mm.link))) { - list_move_tail(&obj->mm.link, &still_in_list); - - if (flags & I915_SHRINK_PURGEABLE && - obj->mm.madv != I915_MADV_DONTNEED) - continue; - - if (flags & I915_SHRINK_VMAPS && - !is_vmalloc_addr(obj->mm.mapping)) - continue; - - if (!(flags & I915_SHRINK_ACTIVE) && - (i915_gem_object_is_active(obj) || - i915_gem_object_is_framebuffer(obj))) - continue; - - if (!can_release_pages(obj)) - continue; - - spin_unlock(&i915->mm.obj_lock); - - if (unsafe_drop_pages(obj)) { - /* May arrive from get_pages on another bo */ - mutex_lock_nested(&obj->mm.lock, - I915_MM_SHRINKER); - if (!i915_gem_object_has_pages(obj)) { - try_to_writeback(obj, flags); - count += obj->base.size >> PAGE_SHIFT; - } - mutex_unlock(&obj->mm.lock); - } - scanned += obj->base.size >> PAGE_SHIFT; - - spin_lock(&i915->mm.obj_lock); - } - list_splice_tail(&still_in_list, phase->list); - spin_unlock(&i915->mm.obj_lock); - } - - if (flags & I915_SHRINK_BOUND) - intel_runtime_pm_put(i915, wakeref); - - i915_retire_requests(i915); - - shrinker_unlock(i915, unlock); - - if (nr_scanned) - *nr_scanned += scanned; - return count; -} - -/** - * i915_gem_shrink_all - Shrink buffer object caches completely - * @i915: i915 device - * - * This is a simple wraper around i915_gem_shrink() to aggressively shrink all - * caches completely. It also first waits for and retires all outstanding - * requests to also be able to release backing storage for active objects. - * - * This should only be used in code to intentionally quiescent the gpu or as a - * last-ditch effort when memory seems to have run out. - * - * Returns: - * The number of pages of backing storage actually released. - */ -unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) -{ - intel_wakeref_t wakeref; - unsigned long freed = 0; - - with_intel_runtime_pm(i915, wakeref) { - freed = i915_gem_shrink(i915, -1UL, NULL, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_ACTIVE); - } - - return freed; -} - -static unsigned long -i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) -{ - struct drm_i915_private *i915 = - container_of(shrinker, struct drm_i915_private, mm.shrinker); - struct drm_i915_gem_object *obj; - unsigned long num_objects = 0; - unsigned long count = 0; - - spin_lock(&i915->mm.obj_lock); - list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) - if (can_release_pages(obj)) { - count += obj->base.size >> PAGE_SHIFT; - num_objects++; - } - - list_for_each_entry(obj, &i915->mm.bound_list, mm.link) - if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) { - count += obj->base.size >> PAGE_SHIFT; - num_objects++; - } - spin_unlock(&i915->mm.obj_lock); - - /* Update our preferred vmscan batch size for the next pass. - * Our rough guess for an effective batch size is roughly 2 - * available GEM objects worth of pages. That is we don't want - * the shrinker to fire, until it is worth the cost of freeing an - * entire GEM object. - */ - if (num_objects) { - unsigned long avg = 2 * count / num_objects; - - i915->mm.shrinker.batch = - max((i915->mm.shrinker.batch + avg) >> 1, - 128ul /* default SHRINK_BATCH */); - } - - return count; -} - -static unsigned long -i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) -{ - struct drm_i915_private *i915 = - container_of(shrinker, struct drm_i915_private, mm.shrinker); - unsigned long freed; - bool unlock; - - sc->nr_scanned = 0; - - if (!shrinker_lock(i915, 0, &unlock)) - return SHRINK_STOP; - - freed = i915_gem_shrink(i915, - sc->nr_to_scan, - &sc->nr_scanned, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_PURGEABLE | - I915_SHRINK_WRITEBACK); - if (sc->nr_scanned < sc->nr_to_scan) - freed += i915_gem_shrink(i915, - sc->nr_to_scan - sc->nr_scanned, - &sc->nr_scanned, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_WRITEBACK); - if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) { - intel_wakeref_t wakeref; - - with_intel_runtime_pm(i915, wakeref) { - freed += i915_gem_shrink(i915, - sc->nr_to_scan - sc->nr_scanned, - &sc->nr_scanned, - I915_SHRINK_ACTIVE | - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_WRITEBACK); - } - } - - shrinker_unlock(i915, unlock); - - return sc->nr_scanned ? freed : SHRINK_STOP; -} - -static int -i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) -{ - struct drm_i915_private *i915 = - container_of(nb, struct drm_i915_private, mm.oom_notifier); - struct drm_i915_gem_object *obj; - unsigned long unevictable, bound, unbound, freed_pages; - intel_wakeref_t wakeref; - - freed_pages = 0; - with_intel_runtime_pm(i915, wakeref) - freed_pages += i915_gem_shrink(i915, -1UL, NULL, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_WRITEBACK); - - /* Because we may be allocating inside our own driver, we cannot - * assert that there are no objects with pinned pages that are not - * being pointed to by hardware. - */ - unbound = bound = unevictable = 0; - spin_lock(&i915->mm.obj_lock); - list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) { - if (!can_release_pages(obj)) - unevictable += obj->base.size >> PAGE_SHIFT; - else - unbound += obj->base.size >> PAGE_SHIFT; - } - list_for_each_entry(obj, &i915->mm.bound_list, mm.link) { - if (!can_release_pages(obj)) - unevictable += obj->base.size >> PAGE_SHIFT; - else - bound += obj->base.size >> PAGE_SHIFT; - } - spin_unlock(&i915->mm.obj_lock); - - if (freed_pages || unbound || bound) - pr_info("Purging GPU memory, %lu pages freed, " - "%lu pages still pinned.\n", - freed_pages, unevictable); - - *(unsigned long *)ptr += freed_pages; - return NOTIFY_DONE; -} - -static int -i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) -{ - struct drm_i915_private *i915 = - container_of(nb, struct drm_i915_private, mm.vmap_notifier); - struct i915_vma *vma, *next; - unsigned long freed_pages = 0; - intel_wakeref_t wakeref; - bool unlock; - - if (!shrinker_lock(i915, 0, &unlock)) - return NOTIFY_DONE; - - /* Force everything onto the inactive lists */ - if (i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT)) - goto out; - - with_intel_runtime_pm(i915, wakeref) - freed_pages += i915_gem_shrink(i915, -1UL, NULL, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_VMAPS); - - /* We also want to clear any cached iomaps as they wrap vmap */ - mutex_lock(&i915->ggtt.vm.mutex); - list_for_each_entry_safe(vma, next, - &i915->ggtt.vm.bound_list, vm_link) { - unsigned long count = vma->node.size >> PAGE_SHIFT; - - if (!vma->iomap || i915_vma_is_active(vma)) - continue; - - mutex_unlock(&i915->ggtt.vm.mutex); - if (i915_vma_unbind(vma) == 0) - freed_pages += count; - mutex_lock(&i915->ggtt.vm.mutex); - } - mutex_unlock(&i915->ggtt.vm.mutex); - -out: - shrinker_unlock(i915, unlock); - - *(unsigned long *)ptr += freed_pages; - return NOTIFY_DONE; -} - -/** - * i915_gem_shrinker_register - Register the i915 shrinker - * @i915: i915 device - * - * This function registers and sets up the i915 shrinker and OOM handler. - */ -void i915_gem_shrinker_register(struct drm_i915_private *i915) -{ - i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan; - i915->mm.shrinker.count_objects = i915_gem_shrinker_count; - i915->mm.shrinker.seeks = DEFAULT_SEEKS; - i915->mm.shrinker.batch = 4096; - WARN_ON(register_shrinker(&i915->mm.shrinker)); - - i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; - WARN_ON(register_oom_notifier(&i915->mm.oom_notifier)); - - i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap; - WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier)); -} - -/** - * i915_gem_shrinker_unregister - Unregisters the i915 shrinker - * @i915: i915 device - * - * This function unregisters the i915 shrinker and OOM handler. - */ -void i915_gem_shrinker_unregister(struct drm_i915_private *i915) -{ - WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier)); - WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier)); - unregister_shrinker(&i915->mm.shrinker); -} - -void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, - struct mutex *mutex) -{ - bool unlock = false; - - if (!IS_ENABLED(CONFIG_LOCKDEP)) - return; - - if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) { - mutex_acquire(&i915->drm.struct_mutex.dep_map, - I915_MM_NORMAL, 0, _RET_IP_); - unlock = true; - } - - fs_reclaim_acquire(GFP_KERNEL); - - /* - * As we invariably rely on the struct_mutex within the shrinker, - * but have a complicated recursion dance, taint all the mutexes used - * within the shrinker with the struct_mutex. For completeness, we - * taint with all subclass of struct_mutex, even though we should - * only need tainting by I915_MM_NORMAL to catch possible ABBA - * deadlocks from using struct_mutex inside @mutex. - */ - mutex_acquire(&i915->drm.struct_mutex.dep_map, - I915_MM_SHRINKER, 0, _RET_IP_); - - mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_); - mutex_release(&mutex->dep_map, 0, _RET_IP_); - - mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_); - - fs_reclaim_release(GFP_KERNEL); - - if (unlock) - mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_); -} diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c deleted file mode 100644 index 0a8082cfc761..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ /dev/null @@ -1,721 +0,0 @@ -/* - * Copyright © 2008-2012 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Authors: - * Eric Anholt - * Chris Wilson - * - */ - -#include -#include "i915_drv.h" - -/* - * The BIOS typically reserves some of the system's memory for the exclusive - * use of the integrated graphics. This memory is no longer available for - * use by the OS and so the user finds that his system has less memory - * available than he put in. We refer to this memory as stolen. - * - * The BIOS will allocate its framebuffer from the stolen memory. Our - * goal is try to reuse that object for our own fbcon which must always - * be available for panics. Anything else we can reuse the stolen memory - * for is a boon. - */ - -int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, - struct drm_mm_node *node, u64 size, - unsigned alignment, u64 start, u64 end) -{ - int ret; - - if (!drm_mm_initialized(&dev_priv->mm.stolen)) - return -ENODEV; - - /* WaSkipStolenMemoryFirstPage:bdw+ */ - if (INTEL_GEN(dev_priv) >= 8 && start < 4096) - start = 4096; - - mutex_lock(&dev_priv->mm.stolen_lock); - ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, - size, alignment, 0, - start, end, DRM_MM_INSERT_BEST); - mutex_unlock(&dev_priv->mm.stolen_lock); - - return ret; -} - -int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, - struct drm_mm_node *node, u64 size, - unsigned alignment) -{ - return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, - alignment, 0, U64_MAX); -} - -void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, - struct drm_mm_node *node) -{ - mutex_lock(&dev_priv->mm.stolen_lock); - drm_mm_remove_node(node); - mutex_unlock(&dev_priv->mm.stolen_lock); -} - -static int i915_adjust_stolen(struct drm_i915_private *dev_priv, - struct resource *dsm) -{ - struct i915_ggtt *ggtt = &dev_priv->ggtt; - struct resource *r; - - if (dsm->start == 0 || dsm->end <= dsm->start) - return -EINVAL; - - /* - * TODO: We have yet too encounter the case where the GTT wasn't at the - * end of stolen. With that assumption we could simplify this. - */ - - /* Make sure we don't clobber the GTT if it's within stolen memory */ - if (INTEL_GEN(dev_priv) <= 4 && - !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) { - struct resource stolen[2] = {*dsm, *dsm}; - struct resource ggtt_res; - resource_size_t ggtt_start; - - ggtt_start = I915_READ(PGTBL_CTL); - if (IS_GEN(dev_priv, 4)) - ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | - (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; - else - ggtt_start &= PGTBL_ADDRESS_LO_MASK; - - ggtt_res = - (struct resource) DEFINE_RES_MEM(ggtt_start, - ggtt_total_entries(ggtt) * 4); - - if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) - stolen[0].end = ggtt_res.start; - if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end) - stolen[1].start = ggtt_res.end; - - /* Pick the larger of the two chunks */ - if (resource_size(&stolen[0]) > resource_size(&stolen[1])) - *dsm = stolen[0]; - else - *dsm = stolen[1]; - - if (stolen[0].start != stolen[1].start || - stolen[0].end != stolen[1].end) { - DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res); - DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm); - } - } - - /* - * Verify that nothing else uses this physical address. Stolen - * memory should be reserved by the BIOS and hidden from the - * kernel. So if the region is already marked as busy, something - * is seriously wrong. - */ - r = devm_request_mem_region(dev_priv->drm.dev, dsm->start, - resource_size(dsm), - "Graphics Stolen Memory"); - if (r == NULL) { - /* - * One more attempt but this time requesting region from - * start + 1, as we have seen that this resolves the region - * conflict with the PCI Bus. - * This is a BIOS w/a: Some BIOS wrap stolen in the root - * PCI bus, but have an off-by-one error. Hence retry the - * reservation starting from 1 instead of 0. - * There's also BIOS with off-by-one on the other end. - */ - r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1, - resource_size(dsm) - 2, - "Graphics Stolen Memory"); - /* - * GEN3 firmware likes to smash pci bridges into the stolen - * range. Apparently this works. - */ - if (r == NULL && !IS_GEN(dev_priv, 3)) { - DRM_ERROR("conflict detected with stolen region: %pR\n", - dsm); - - return -EBUSY; - } - } - - return 0; -} - -void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv) -{ - if (!drm_mm_initialized(&dev_priv->mm.stolen)) - return; - - drm_mm_takedown(&dev_priv->mm.stolen); -} - -static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, - resource_size_t *base, - resource_size_t *size) -{ - u32 reg_val = I915_READ(IS_GM45(dev_priv) ? - CTG_STOLEN_RESERVED : - ELK_STOLEN_RESERVED); - resource_size_t stolen_top = dev_priv->dsm.end + 1; - - DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n", - IS_GM45(dev_priv) ? "CTG" : "ELK", reg_val); - - if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) - return; - - /* - * Whether ILK really reuses the ELK register for this is unclear. - * Let's see if we catch anyone with this supposedly enabled on ILK. - */ - WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n", - reg_val); - - if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) - return; - - *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; - WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); - - *size = stolen_top - *base; -} - -static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, - resource_size_t *base, - resource_size_t *size) -{ - u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); - - DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); - - if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) - return; - - *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; - - switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { - case GEN6_STOLEN_RESERVED_1M: - *size = 1024 * 1024; - break; - case GEN6_STOLEN_RESERVED_512K: - *size = 512 * 1024; - break; - case GEN6_STOLEN_RESERVED_256K: - *size = 256 * 1024; - break; - case GEN6_STOLEN_RESERVED_128K: - *size = 128 * 1024; - break; - default: - *size = 1024 * 1024; - MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); - } -} - -static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv, - resource_size_t *base, - resource_size_t *size) -{ - u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); - resource_size_t stolen_top = dev_priv->dsm.end + 1; - - DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); - - if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) - return; - - switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { - default: - MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); - /* fall through */ - case GEN7_STOLEN_RESERVED_1M: - *size = 1024 * 1024; - break; - } - - /* - * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the - * reserved location as (top - size). - */ - *base = stolen_top - *size; -} - -static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, - resource_size_t *base, - resource_size_t *size) -{ - u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); - - DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); - - if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) - return; - - *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; - - switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { - case GEN7_STOLEN_RESERVED_1M: - *size = 1024 * 1024; - break; - case GEN7_STOLEN_RESERVED_256K: - *size = 256 * 1024; - break; - default: - *size = 1024 * 1024; - MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); - } -} - -static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv, - resource_size_t *base, - resource_size_t *size) -{ - u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); - - DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); - - if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) - return; - - *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; - - switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { - case GEN8_STOLEN_RESERVED_1M: - *size = 1024 * 1024; - break; - case GEN8_STOLEN_RESERVED_2M: - *size = 2 * 1024 * 1024; - break; - case GEN8_STOLEN_RESERVED_4M: - *size = 4 * 1024 * 1024; - break; - case GEN8_STOLEN_RESERVED_8M: - *size = 8 * 1024 * 1024; - break; - default: - *size = 8 * 1024 * 1024; - MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); - } -} - -static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, - resource_size_t *base, - resource_size_t *size) -{ - u32 reg_val = I915_READ(GEN6_STOLEN_RESERVED); - resource_size_t stolen_top = dev_priv->dsm.end + 1; - - DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); - - if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) - return; - - if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK)) - return; - - *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; - *size = stolen_top - *base; -} - -static void icl_get_stolen_reserved(struct drm_i915_private *dev_priv, - resource_size_t *base, - resource_size_t *size) -{ - u64 reg_val = I915_READ64(GEN6_STOLEN_RESERVED); - - DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); - - *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; - - switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { - case GEN8_STOLEN_RESERVED_1M: - *size = 1024 * 1024; - break; - case GEN8_STOLEN_RESERVED_2M: - *size = 2 * 1024 * 1024; - break; - case GEN8_STOLEN_RESERVED_4M: - *size = 4 * 1024 * 1024; - break; - case GEN8_STOLEN_RESERVED_8M: - *size = 8 * 1024 * 1024; - break; - default: - *size = 8 * 1024 * 1024; - MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); - } -} - -int i915_gem_init_stolen(struct drm_i915_private *dev_priv) -{ - resource_size_t reserved_base, stolen_top; - resource_size_t reserved_total, reserved_size; - - mutex_init(&dev_priv->mm.stolen_lock); - - if (intel_vgpu_active(dev_priv)) { - DRM_INFO("iGVT-g active, disabling use of stolen memory\n"); - return 0; - } - - if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) { - DRM_INFO("DMAR active, disabling use of stolen memory\n"); - return 0; - } - - if (resource_size(&intel_graphics_stolen_res) == 0) - return 0; - - dev_priv->dsm = intel_graphics_stolen_res; - - if (i915_adjust_stolen(dev_priv, &dev_priv->dsm)) - return 0; - - GEM_BUG_ON(dev_priv->dsm.start == 0); - GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start); - - stolen_top = dev_priv->dsm.end + 1; - reserved_base = stolen_top; - reserved_size = 0; - - switch (INTEL_GEN(dev_priv)) { - case 2: - case 3: - break; - case 4: - if (!IS_G4X(dev_priv)) - break; - /* fall through */ - case 5: - g4x_get_stolen_reserved(dev_priv, - &reserved_base, &reserved_size); - break; - case 6: - gen6_get_stolen_reserved(dev_priv, - &reserved_base, &reserved_size); - break; - case 7: - if (IS_VALLEYVIEW(dev_priv)) - vlv_get_stolen_reserved(dev_priv, - &reserved_base, &reserved_size); - else - gen7_get_stolen_reserved(dev_priv, - &reserved_base, &reserved_size); - break; - case 8: - case 9: - case 10: - if (IS_LP(dev_priv)) - chv_get_stolen_reserved(dev_priv, - &reserved_base, &reserved_size); - else - bdw_get_stolen_reserved(dev_priv, - &reserved_base, &reserved_size); - break; - case 11: - default: - icl_get_stolen_reserved(dev_priv, &reserved_base, - &reserved_size); - break; - } - - /* - * Our expectation is that the reserved space is at the top of the - * stolen region and *never* at the bottom. If we see !reserved_base, - * it likely means we failed to read the registers correctly. - */ - if (!reserved_base) { - DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n", - &reserved_base, &reserved_size); - reserved_base = stolen_top; - reserved_size = 0; - } - - dev_priv->dsm_reserved = - (struct resource) DEFINE_RES_MEM(reserved_base, reserved_size); - - if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) { - DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n", - &dev_priv->dsm_reserved, &dev_priv->dsm); - return 0; - } - - /* It is possible for the reserved area to end before the end of stolen - * memory, so just consider the start. */ - reserved_total = stolen_top - reserved_base; - - DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n", - (u64)resource_size(&dev_priv->dsm) >> 10, - ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10); - - dev_priv->stolen_usable_size = - resource_size(&dev_priv->dsm) - reserved_total; - - /* Basic memrange allocator for stolen space. */ - drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->stolen_usable_size); - - return 0; -} - -static struct sg_table * -i915_pages_create_for_stolen(struct drm_device *dev, - resource_size_t offset, resource_size_t size) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - struct sg_table *st; - struct scatterlist *sg; - - GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm))); - - /* We hide that we have no struct page backing our stolen object - * by wrapping the contiguous physical allocation with a fake - * dma mapping in a single scatterlist. - */ - - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (st == NULL) - return ERR_PTR(-ENOMEM); - - if (sg_alloc_table(st, 1, GFP_KERNEL)) { - kfree(st); - return ERR_PTR(-ENOMEM); - } - - sg = st->sgl; - sg->offset = 0; - sg->length = size; - - sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset; - sg_dma_len(sg) = size; - - return st; -} - -static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) -{ - struct sg_table *pages = - i915_pages_create_for_stolen(obj->base.dev, - obj->stolen->start, - obj->stolen->size); - if (IS_ERR(pages)) - return PTR_ERR(pages); - - __i915_gem_object_set_pages(obj, pages, obj->stolen->size); - - return 0; -} - -static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, - struct sg_table *pages) -{ - /* Should only be called from i915_gem_object_release_stolen() */ - sg_free_table(pages); - kfree(pages); -} - -static void -i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); - - GEM_BUG_ON(!stolen); - - __i915_gem_object_unpin_pages(obj); - - i915_gem_stolen_remove_node(dev_priv, stolen); - kfree(stolen); -} - -static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { - .get_pages = i915_gem_object_get_pages_stolen, - .put_pages = i915_gem_object_put_pages_stolen, - .release = i915_gem_object_release_stolen, -}; - -static struct drm_i915_gem_object * -_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, - struct drm_mm_node *stolen) -{ - struct drm_i915_gem_object *obj; - unsigned int cache_level; - - obj = i915_gem_object_alloc(); - if (obj == NULL) - return NULL; - - drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size); - i915_gem_object_init(obj, &i915_gem_object_stolen_ops); - - obj->stolen = stolen; - obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; - cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; - i915_gem_object_set_cache_coherency(obj, cache_level); - - if (i915_gem_object_pin_pages(obj)) - goto cleanup; - - return obj; - -cleanup: - i915_gem_object_free(obj); - return NULL; -} - -struct drm_i915_gem_object * -i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, - resource_size_t size) -{ - struct drm_i915_gem_object *obj; - struct drm_mm_node *stolen; - int ret; - - if (!drm_mm_initialized(&dev_priv->mm.stolen)) - return NULL; - - if (size == 0) - return NULL; - - stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); - if (!stolen) - return NULL; - - ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); - if (ret) { - kfree(stolen); - return NULL; - } - - obj = _i915_gem_object_create_stolen(dev_priv, stolen); - if (obj) - return obj; - - i915_gem_stolen_remove_node(dev_priv, stolen); - kfree(stolen); - return NULL; -} - -struct drm_i915_gem_object * -i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, - resource_size_t stolen_offset, - resource_size_t gtt_offset, - resource_size_t size) -{ - struct i915_ggtt *ggtt = &dev_priv->ggtt; - struct drm_i915_gem_object *obj; - struct drm_mm_node *stolen; - struct i915_vma *vma; - int ret; - - if (!drm_mm_initialized(&dev_priv->mm.stolen)) - return NULL; - - lockdep_assert_held(&dev_priv->drm.struct_mutex); - - DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n", - &stolen_offset, >t_offset, &size); - - /* KISS and expect everything to be page-aligned */ - if (WARN_ON(size == 0) || - WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) || - WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT))) - return NULL; - - stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); - if (!stolen) - return NULL; - - stolen->start = stolen_offset; - stolen->size = size; - mutex_lock(&dev_priv->mm.stolen_lock); - ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); - mutex_unlock(&dev_priv->mm.stolen_lock); - if (ret) { - DRM_DEBUG_DRIVER("failed to allocate stolen space\n"); - kfree(stolen); - return NULL; - } - - obj = _i915_gem_object_create_stolen(dev_priv, stolen); - if (obj == NULL) { - DRM_DEBUG_DRIVER("failed to allocate stolen object\n"); - i915_gem_stolen_remove_node(dev_priv, stolen); - kfree(stolen); - return NULL; - } - - /* Some objects just need physical mem from stolen space */ - if (gtt_offset == I915_GTT_OFFSET_NONE) - return obj; - - ret = i915_gem_object_pin_pages(obj); - if (ret) - goto err; - - vma = i915_vma_instance(obj, &ggtt->vm, NULL); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err_pages; - } - - /* To simplify the initialisation sequence between KMS and GTT, - * we allow construction of the stolen object prior to - * setting up the GTT space. The actual reservation will occur - * later. - */ - ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, - size, gtt_offset, obj->cache_level, - 0); - if (ret) { - DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n"); - goto err_pages; - } - - GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - - vma->pages = obj->mm.pages; - vma->flags |= I915_VMA_GLOBAL_BIND; - __i915_vma_set_map_and_fenceable(vma); - - mutex_lock(&ggtt->vm.mutex); - list_move_tail(&vma->vm_link, &ggtt->vm.bound_list); - mutex_unlock(&ggtt->vm.mutex); - - spin_lock(&dev_priv->mm.obj_lock); - list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); - obj->bind_count++; - spin_unlock(&dev_priv->mm.obj_lock); - - return obj; - -err_pages: - i915_gem_object_unpin_pages(obj); -err: - i915_gem_object_put(obj); - return NULL; -} diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c deleted file mode 100644 index 86d6d92ccbc9..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ /dev/null @@ -1,460 +0,0 @@ -/* - * Copyright © 2008 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Authors: - * Eric Anholt - * - */ - -#include -#include -#include - -#include "gem/i915_gem_ioctls.h" - -#include "i915_drv.h" - -/** - * DOC: buffer object tiling - * - * i915_gem_set_tiling_ioctl() and i915_gem_get_tiling_ioctl() is the userspace - * interface to declare fence register requirements. - * - * In principle GEM doesn't care at all about the internal data layout of an - * object, and hence it also doesn't care about tiling or swizzling. There's two - * exceptions: - * - * - For X and Y tiling the hardware provides detilers for CPU access, so called - * fences. Since there's only a limited amount of them the kernel must manage - * these, and therefore userspace must tell the kernel the object tiling if it - * wants to use fences for detiling. - * - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which - * depends upon the physical page frame number. When swapping such objects the - * page frame number might change and the kernel must be able to fix this up - * and hence now the tiling. Note that on a subset of platforms with - * asymmetric memory channel population the swizzling pattern changes in an - * unknown way, and for those the kernel simply forbids swapping completely. - * - * Since neither of this applies for new tiling layouts on modern platforms like - * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled. - * Anything else can be handled in userspace entirely without the kernel's - * invovlement. - */ - -/** - * i915_gem_fence_size - required global GTT size for a fence - * @i915: i915 device - * @size: object size - * @tiling: tiling mode - * @stride: tiling stride - * - * Return the required global GTT size for a fence (view of a tiled object), - * taking into account potential fence register mapping. - */ -u32 i915_gem_fence_size(struct drm_i915_private *i915, - u32 size, unsigned int tiling, unsigned int stride) -{ - u32 ggtt_size; - - GEM_BUG_ON(!size); - - if (tiling == I915_TILING_NONE) - return size; - - GEM_BUG_ON(!stride); - - if (INTEL_GEN(i915) >= 4) { - stride *= i915_gem_tile_height(tiling); - GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE)); - return roundup(size, stride); - } - - /* Previous chips need a power-of-two fence region when tiling */ - if (IS_GEN(i915, 3)) - ggtt_size = 1024*1024; - else - ggtt_size = 512*1024; - - while (ggtt_size < size) - ggtt_size <<= 1; - - return ggtt_size; -} - -/** - * i915_gem_fence_alignment - required global GTT alignment for a fence - * @i915: i915 device - * @size: object size - * @tiling: tiling mode - * @stride: tiling stride - * - * Return the required global GTT alignment for a fence (a view of a tiled - * object), taking into account potential fence register mapping. - */ -u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size, - unsigned int tiling, unsigned int stride) -{ - GEM_BUG_ON(!size); - - /* - * Minimum alignment is 4k (GTT page size), but might be greater - * if a fence register is needed for the object. - */ - if (tiling == I915_TILING_NONE) - return I915_GTT_MIN_ALIGNMENT; - - if (INTEL_GEN(i915) >= 4) - return I965_FENCE_PAGE; - - /* - * Previous chips need to be aligned to the size of the smallest - * fence register that can contain the object. - */ - return i915_gem_fence_size(i915, size, tiling, stride); -} - -/* Check pitch constriants for all chips & tiling formats */ -static bool -i915_tiling_ok(struct drm_i915_gem_object *obj, - unsigned int tiling, unsigned int stride) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - unsigned int tile_width; - - /* Linear is always fine */ - if (tiling == I915_TILING_NONE) - return true; - - if (tiling > I915_TILING_LAST) - return false; - - /* check maximum stride & object size */ - /* i965+ stores the end address of the gtt mapping in the fence - * reg, so dont bother to check the size */ - if (INTEL_GEN(i915) >= 7) { - if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL) - return false; - } else if (INTEL_GEN(i915) >= 4) { - if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) - return false; - } else { - if (stride > 8192) - return false; - - if (!is_power_of_2(stride)) - return false; - } - - if (IS_GEN(i915, 2) || - (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915))) - tile_width = 128; - else - tile_width = 512; - - if (!stride || !IS_ALIGNED(stride, tile_width)) - return false; - - return true; -} - -static bool i915_vma_fence_prepare(struct i915_vma *vma, - int tiling_mode, unsigned int stride) -{ - struct drm_i915_private *i915 = vma->vm->i915; - u32 size, alignment; - - if (!i915_vma_is_map_and_fenceable(vma)) - return true; - - size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride); - if (vma->node.size < size) - return false; - - alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride); - if (!IS_ALIGNED(vma->node.start, alignment)) - return false; - - return true; -} - -/* Make the current GTT allocation valid for the change in tiling. */ -static int -i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, - int tiling_mode, unsigned int stride) -{ - struct i915_vma *vma; - int ret; - - if (tiling_mode == I915_TILING_NONE) - return 0; - - for_each_ggtt_vma(vma, obj) { - if (i915_vma_fence_prepare(vma, tiling_mode, stride)) - continue; - - ret = i915_vma_unbind(vma); - if (ret) - return ret; - } - - return 0; -} - -int -i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, - unsigned int tiling, unsigned int stride) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_vma *vma; - int err; - - /* Make sure we don't cross-contaminate obj->tiling_and_stride */ - BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK); - - GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride)); - GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE)); - lockdep_assert_held(&i915->drm.struct_mutex); - - if ((tiling | stride) == obj->tiling_and_stride) - return 0; - - if (i915_gem_object_is_framebuffer(obj)) - return -EBUSY; - - /* We need to rebind the object if its current allocation - * no longer meets the alignment restrictions for its new - * tiling mode. Otherwise we can just leave it alone, but - * need to ensure that any fence register is updated before - * the next fenced (either through the GTT or by the BLT unit - * on older GPUs) access. - * - * After updating the tiling parameters, we then flag whether - * we need to update an associated fence register. Note this - * has to also include the unfenced register the GPU uses - * whilst executing a fenced command for an untiled object. - */ - - err = i915_gem_object_fence_prepare(obj, tiling, stride); - if (err) - return err; - - i915_gem_object_lock(obj); - if (i915_gem_object_is_framebuffer(obj)) { - i915_gem_object_unlock(obj); - return -EBUSY; - } - - /* If the memory has unknown (i.e. varying) swizzling, we pin the - * pages to prevent them being swapped out and causing corruption - * due to the change in swizzling. - */ - mutex_lock(&obj->mm.lock); - if (i915_gem_object_has_pages(obj) && - obj->mm.madv == I915_MADV_WILLNEED && - i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { - if (tiling == I915_TILING_NONE) { - GEM_BUG_ON(!obj->mm.quirked); - __i915_gem_object_unpin_pages(obj); - obj->mm.quirked = false; - } - if (!i915_gem_object_is_tiled(obj)) { - GEM_BUG_ON(obj->mm.quirked); - __i915_gem_object_pin_pages(obj); - obj->mm.quirked = true; - } - } - mutex_unlock(&obj->mm.lock); - - for_each_ggtt_vma(vma, obj) { - vma->fence_size = - i915_gem_fence_size(i915, vma->size, tiling, stride); - vma->fence_alignment = - i915_gem_fence_alignment(i915, - vma->size, tiling, stride); - - if (vma->fence) - vma->fence->dirty = true; - } - - obj->tiling_and_stride = tiling | stride; - i915_gem_object_unlock(obj); - - /* Force the fence to be reacquired for GTT access */ - i915_gem_object_release_mmap(obj); - - /* Try to preallocate memory required to save swizzling on put-pages */ - if (i915_gem_object_needs_bit17_swizzle(obj)) { - if (!obj->bit_17) { - obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT, - GFP_KERNEL); - } - } else { - bitmap_free(obj->bit_17); - obj->bit_17 = NULL; - } - - return 0; -} - -/** - * i915_gem_set_tiling_ioctl - IOCTL handler to set tiling mode - * @dev: DRM device - * @data: data pointer for the ioctl - * @file: DRM file for the ioctl call - * - * Sets the tiling mode of an object, returning the required swizzling of - * bit 6 of addresses in the object. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int -i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_gem_set_tiling *args = data; - struct drm_i915_gem_object *obj; - int err; - - obj = i915_gem_object_lookup(file, args->handle); - if (!obj) - return -ENOENT; - - /* - * The tiling mode of proxy objects is handled by its generator, and - * not allowed to be changed by userspace. - */ - if (i915_gem_object_is_proxy(obj)) { - err = -ENXIO; - goto err; - } - - if (!i915_tiling_ok(obj, args->tiling_mode, args->stride)) { - err = -EINVAL; - goto err; - } - - if (args->tiling_mode == I915_TILING_NONE) { - args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; - args->stride = 0; - } else { - if (args->tiling_mode == I915_TILING_X) - args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_x; - else - args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_y; - - /* Hide bit 17 swizzling from the user. This prevents old Mesa - * from aborting the application on sw fallbacks to bit 17, - * and we use the pread/pwrite bit17 paths to swizzle for it. - * If there was a user that was relying on the swizzle - * information for drm_intel_bo_map()ed reads/writes this would - * break it, but we don't have any of those. - */ - if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) - args->swizzle_mode = I915_BIT_6_SWIZZLE_9; - if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) - args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; - - /* If we can't handle the swizzling, make it untiled. */ - if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { - args->tiling_mode = I915_TILING_NONE; - args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; - args->stride = 0; - } - } - - err = mutex_lock_interruptible(&dev->struct_mutex); - if (err) - goto err; - - err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride); - mutex_unlock(&dev->struct_mutex); - - /* We have to maintain this existing ABI... */ - args->stride = i915_gem_object_get_stride(obj); - args->tiling_mode = i915_gem_object_get_tiling(obj); - -err: - i915_gem_object_put(obj); - return err; -} - -/** - * i915_gem_get_tiling_ioctl - IOCTL handler to get tiling mode - * @dev: DRM device - * @data: data pointer for the ioctl - * @file: DRM file for the ioctl call - * - * Returns the current tiling mode and required bit 6 swizzling for the object. - * - * Called by the user via ioctl. - * - * Returns: - * Zero on success, negative errno on failure. - */ -int -i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_gem_get_tiling *args = data; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_object *obj; - int err = -ENOENT; - - rcu_read_lock(); - obj = i915_gem_object_lookup_rcu(file, args->handle); - if (obj) { - args->tiling_mode = - READ_ONCE(obj->tiling_and_stride) & TILING_MASK; - err = 0; - } - rcu_read_unlock(); - if (unlikely(err)) - return err; - - switch (args->tiling_mode) { - case I915_TILING_X: - args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; - break; - case I915_TILING_Y: - args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; - break; - default: - case I915_TILING_NONE: - args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; - break; - } - - /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ - if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) - args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN; - else - args->phys_swizzle_mode = args->swizzle_mode; - if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) - args->swizzle_mode = I915_BIT_6_SWIZZLE_9; - if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) - args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; - - return 0; -} diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c deleted file mode 100644 index 2c1b6bb7a040..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ /dev/null @@ -1,851 +0,0 @@ -/* - * Copyright © 2012-2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include -#include -#include -#include -#include - -#include - -#include "gem/i915_gem_ioctls.h" - -#include "i915_drv.h" -#include "i915_trace.h" -#include "intel_drv.h" - -struct i915_mm_struct { - struct mm_struct *mm; - struct drm_i915_private *i915; - struct i915_mmu_notifier *mn; - struct hlist_node node; - struct kref kref; - struct work_struct work; -}; - -#if defined(CONFIG_MMU_NOTIFIER) -#include - -struct i915_mmu_notifier { - spinlock_t lock; - struct hlist_node node; - struct mmu_notifier mn; - struct rb_root_cached objects; - struct i915_mm_struct *mm; -}; - -struct i915_mmu_object { - struct i915_mmu_notifier *mn; - struct drm_i915_gem_object *obj; - struct interval_tree_node it; -}; - -static void add_object(struct i915_mmu_object *mo) -{ - GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb)); - interval_tree_insert(&mo->it, &mo->mn->objects); -} - -static void del_object(struct i915_mmu_object *mo) -{ - if (RB_EMPTY_NODE(&mo->it.rb)) - return; - - interval_tree_remove(&mo->it, &mo->mn->objects); - RB_CLEAR_NODE(&mo->it.rb); -} - -static void -__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value) -{ - struct i915_mmu_object *mo = obj->userptr.mmu_object; - - /* - * During mm_invalidate_range we need to cancel any userptr that - * overlaps the range being invalidated. Doing so requires the - * struct_mutex, and that risks recursion. In order to cause - * recursion, the user must alias the userptr address space with - * a GTT mmapping (possible with a MAP_FIXED) - then when we have - * to invalidate that mmaping, mm_invalidate_range is called with - * the userptr address *and* the struct_mutex held. To prevent that - * we set a flag under the i915_mmu_notifier spinlock to indicate - * whether this object is valid. - */ - if (!mo) - return; - - spin_lock(&mo->mn->lock); - if (value) - add_object(mo); - else - del_object(mo); - spin_unlock(&mo->mn->lock); -} - -static int -userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, - const struct mmu_notifier_range *range) -{ - struct i915_mmu_notifier *mn = - container_of(_mn, struct i915_mmu_notifier, mn); - struct interval_tree_node *it; - struct mutex *unlock = NULL; - unsigned long end; - int ret = 0; - - if (RB_EMPTY_ROOT(&mn->objects.rb_root)) - return 0; - - /* interval ranges are inclusive, but invalidate range is exclusive */ - end = range->end - 1; - - spin_lock(&mn->lock); - it = interval_tree_iter_first(&mn->objects, range->start, end); - while (it) { - struct drm_i915_gem_object *obj; - - if (!mmu_notifier_range_blockable(range)) { - ret = -EAGAIN; - break; - } - - /* - * The mmu_object is released late when destroying the - * GEM object so it is entirely possible to gain a - * reference on an object in the process of being freed - * since our serialisation is via the spinlock and not - * the struct_mutex - and consequently use it after it - * is freed and then double free it. To prevent that - * use-after-free we only acquire a reference on the - * object if it is not in the process of being destroyed. - */ - obj = container_of(it, struct i915_mmu_object, it)->obj; - if (!kref_get_unless_zero(&obj->base.refcount)) { - it = interval_tree_iter_next(it, range->start, end); - continue; - } - spin_unlock(&mn->lock); - - if (!unlock) { - unlock = &mn->mm->i915->drm.struct_mutex; - - switch (mutex_trylock_recursive(unlock)) { - default: - case MUTEX_TRYLOCK_FAILED: - if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) { - i915_gem_object_put(obj); - return -EINTR; - } - /* fall through */ - case MUTEX_TRYLOCK_SUCCESS: - break; - - case MUTEX_TRYLOCK_RECURSIVE: - unlock = ERR_PTR(-EEXIST); - break; - } - } - - ret = i915_gem_object_unbind(obj); - if (ret == 0) - ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); - i915_gem_object_put(obj); - if (ret) - goto unlock; - - spin_lock(&mn->lock); - - /* - * As we do not (yet) protect the mmu from concurrent insertion - * over this range, there is no guarantee that this search will - * terminate given a pathologic workload. - */ - it = interval_tree_iter_first(&mn->objects, range->start, end); - } - spin_unlock(&mn->lock); - -unlock: - if (!IS_ERR_OR_NULL(unlock)) - mutex_unlock(unlock); - - return ret; - -} - -static const struct mmu_notifier_ops i915_gem_userptr_notifier = { - .invalidate_range_start = userptr_mn_invalidate_range_start, -}; - -static struct i915_mmu_notifier * -i915_mmu_notifier_create(struct i915_mm_struct *mm) -{ - struct i915_mmu_notifier *mn; - - mn = kmalloc(sizeof(*mn), GFP_KERNEL); - if (mn == NULL) - return ERR_PTR(-ENOMEM); - - spin_lock_init(&mn->lock); - mn->mn.ops = &i915_gem_userptr_notifier; - mn->objects = RB_ROOT_CACHED; - mn->mm = mm; - - return mn; -} - -static void -i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) -{ - struct i915_mmu_object *mo; - - mo = fetch_and_zero(&obj->userptr.mmu_object); - if (!mo) - return; - - spin_lock(&mo->mn->lock); - del_object(mo); - spin_unlock(&mo->mn->lock); - kfree(mo); -} - -static struct i915_mmu_notifier * -i915_mmu_notifier_find(struct i915_mm_struct *mm) -{ - struct i915_mmu_notifier *mn; - int err = 0; - - mn = mm->mn; - if (mn) - return mn; - - mn = i915_mmu_notifier_create(mm); - if (IS_ERR(mn)) - err = PTR_ERR(mn); - - down_write(&mm->mm->mmap_sem); - mutex_lock(&mm->i915->mm_lock); - if (mm->mn == NULL && !err) { - /* Protected by mmap_sem (write-lock) */ - err = __mmu_notifier_register(&mn->mn, mm->mm); - if (!err) { - /* Protected by mm_lock */ - mm->mn = fetch_and_zero(&mn); - } - } else if (mm->mn) { - /* - * Someone else raced and successfully installed the mmu - * notifier, we can cancel our own errors. - */ - err = 0; - } - mutex_unlock(&mm->i915->mm_lock); - up_write(&mm->mm->mmap_sem); - - if (mn && !IS_ERR(mn)) - kfree(mn); - - return err ? ERR_PTR(err) : mm->mn; -} - -static int -i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, - unsigned flags) -{ - struct i915_mmu_notifier *mn; - struct i915_mmu_object *mo; - - if (flags & I915_USERPTR_UNSYNCHRONIZED) - return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; - - if (WARN_ON(obj->userptr.mm == NULL)) - return -EINVAL; - - mn = i915_mmu_notifier_find(obj->userptr.mm); - if (IS_ERR(mn)) - return PTR_ERR(mn); - - mo = kzalloc(sizeof(*mo), GFP_KERNEL); - if (!mo) - return -ENOMEM; - - mo->mn = mn; - mo->obj = obj; - mo->it.start = obj->userptr.ptr; - mo->it.last = obj->userptr.ptr + obj->base.size - 1; - RB_CLEAR_NODE(&mo->it.rb); - - obj->userptr.mmu_object = mo; - return 0; -} - -static void -i915_mmu_notifier_free(struct i915_mmu_notifier *mn, - struct mm_struct *mm) -{ - if (mn == NULL) - return; - - mmu_notifier_unregister(&mn->mn, mm); - kfree(mn); -} - -#else - -static void -__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value) -{ -} - -static void -i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) -{ -} - -static int -i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, - unsigned flags) -{ - if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0) - return -ENODEV; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - return 0; -} - -static void -i915_mmu_notifier_free(struct i915_mmu_notifier *mn, - struct mm_struct *mm) -{ -} - -#endif - -static struct i915_mm_struct * -__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) -{ - struct i915_mm_struct *mm; - - /* Protected by dev_priv->mm_lock */ - hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) - if (mm->mm == real) - return mm; - - return NULL; -} - -static int -i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct i915_mm_struct *mm; - int ret = 0; - - /* During release of the GEM object we hold the struct_mutex. This - * precludes us from calling mmput() at that time as that may be - * the last reference and so call exit_mmap(). exit_mmap() will - * attempt to reap the vma, and if we were holding a GTT mmap - * would then call drm_gem_vm_close() and attempt to reacquire - * the struct mutex. So in order to avoid that recursion, we have - * to defer releasing the mm reference until after we drop the - * struct_mutex, i.e. we need to schedule a worker to do the clean - * up. - */ - mutex_lock(&dev_priv->mm_lock); - mm = __i915_mm_struct_find(dev_priv, current->mm); - if (mm == NULL) { - mm = kmalloc(sizeof(*mm), GFP_KERNEL); - if (mm == NULL) { - ret = -ENOMEM; - goto out; - } - - kref_init(&mm->kref); - mm->i915 = to_i915(obj->base.dev); - - mm->mm = current->mm; - mmgrab(current->mm); - - mm->mn = NULL; - - /* Protected by dev_priv->mm_lock */ - hash_add(dev_priv->mm_structs, - &mm->node, (unsigned long)mm->mm); - } else - kref_get(&mm->kref); - - obj->userptr.mm = mm; -out: - mutex_unlock(&dev_priv->mm_lock); - return ret; -} - -static void -__i915_mm_struct_free__worker(struct work_struct *work) -{ - struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); - i915_mmu_notifier_free(mm->mn, mm->mm); - mmdrop(mm->mm); - kfree(mm); -} - -static void -__i915_mm_struct_free(struct kref *kref) -{ - struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); - - /* Protected by dev_priv->mm_lock */ - hash_del(&mm->node); - mutex_unlock(&mm->i915->mm_lock); - - INIT_WORK(&mm->work, __i915_mm_struct_free__worker); - queue_work(mm->i915->mm.userptr_wq, &mm->work); -} - -static void -i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) -{ - if (obj->userptr.mm == NULL) - return; - - kref_put_mutex(&obj->userptr.mm->kref, - __i915_mm_struct_free, - &to_i915(obj->base.dev)->mm_lock); - obj->userptr.mm = NULL; -} - -struct get_pages_work { - struct work_struct work; - struct drm_i915_gem_object *obj; - struct task_struct *task; -}; - -static struct sg_table * -__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, - struct page **pvec, int num_pages) -{ - unsigned int max_segment = i915_sg_segment_size(); - struct sg_table *st; - unsigned int sg_page_sizes; - int ret; - - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return ERR_PTR(-ENOMEM); - -alloc_table: - ret = __sg_alloc_table_from_pages(st, pvec, num_pages, - 0, num_pages << PAGE_SHIFT, - max_segment, - GFP_KERNEL); - if (ret) { - kfree(st); - return ERR_PTR(ret); - } - - ret = i915_gem_gtt_prepare_pages(obj, st); - if (ret) { - sg_free_table(st); - - if (max_segment > PAGE_SIZE) { - max_segment = PAGE_SIZE; - goto alloc_table; - } - - kfree(st); - return ERR_PTR(ret); - } - - sg_page_sizes = i915_sg_page_sizes(st->sgl); - - __i915_gem_object_set_pages(obj, st, sg_page_sizes); - - return st; -} - -static void -__i915_gem_userptr_get_pages_worker(struct work_struct *_work) -{ - struct get_pages_work *work = container_of(_work, typeof(*work), work); - struct drm_i915_gem_object *obj = work->obj; - const int npages = obj->base.size >> PAGE_SHIFT; - struct page **pvec; - int pinned, ret; - - ret = -ENOMEM; - pinned = 0; - - pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); - if (pvec != NULL) { - struct mm_struct *mm = obj->userptr.mm->mm; - unsigned int flags = 0; - - if (!i915_gem_object_is_readonly(obj)) - flags |= FOLL_WRITE; - - ret = -EFAULT; - if (mmget_not_zero(mm)) { - down_read(&mm->mmap_sem); - while (pinned < npages) { - ret = get_user_pages_remote - (work->task, mm, - obj->userptr.ptr + pinned * PAGE_SIZE, - npages - pinned, - flags, - pvec + pinned, NULL, NULL); - if (ret < 0) - break; - - pinned += ret; - } - up_read(&mm->mmap_sem); - mmput(mm); - } - } - - mutex_lock(&obj->mm.lock); - if (obj->userptr.work == &work->work) { - struct sg_table *pages = ERR_PTR(ret); - - if (pinned == npages) { - pages = __i915_gem_userptr_alloc_pages(obj, pvec, - npages); - if (!IS_ERR(pages)) { - pinned = 0; - pages = NULL; - } - } - - obj->userptr.work = ERR_CAST(pages); - if (IS_ERR(pages)) - __i915_gem_userptr_set_active(obj, false); - } - mutex_unlock(&obj->mm.lock); - - release_pages(pvec, pinned); - kvfree(pvec); - - i915_gem_object_put(obj); - put_task_struct(work->task); - kfree(work); -} - -static struct sg_table * -__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj) -{ - struct get_pages_work *work; - - /* Spawn a worker so that we can acquire the - * user pages without holding our mutex. Access - * to the user pages requires mmap_sem, and we have - * a strict lock ordering of mmap_sem, struct_mutex - - * we already hold struct_mutex here and so cannot - * call gup without encountering a lock inversion. - * - * Userspace will keep on repeating the operation - * (thanks to EAGAIN) until either we hit the fast - * path or the worker completes. If the worker is - * cancelled or superseded, the task is still run - * but the results ignored. (This leads to - * complications that we may have a stray object - * refcount that we need to be wary of when - * checking for existing objects during creation.) - * If the worker encounters an error, it reports - * that error back to this function through - * obj->userptr.work = ERR_PTR. - */ - work = kmalloc(sizeof(*work), GFP_KERNEL); - if (work == NULL) - return ERR_PTR(-ENOMEM); - - obj->userptr.work = &work->work; - - work->obj = i915_gem_object_get(obj); - - work->task = current; - get_task_struct(work->task); - - INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); - queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work); - - return ERR_PTR(-EAGAIN); -} - -static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) -{ - const int num_pages = obj->base.size >> PAGE_SHIFT; - struct mm_struct *mm = obj->userptr.mm->mm; - struct page **pvec; - struct sg_table *pages; - bool active; - int pinned; - - /* If userspace should engineer that these pages are replaced in - * the vma between us binding this page into the GTT and completion - * of rendering... Their loss. If they change the mapping of their - * pages they need to create a new bo to point to the new vma. - * - * However, that still leaves open the possibility of the vma - * being copied upon fork. Which falls under the same userspace - * synchronisation issue as a regular bo, except that this time - * the process may not be expecting that a particular piece of - * memory is tied to the GPU. - * - * Fortunately, we can hook into the mmu_notifier in order to - * discard the page references prior to anything nasty happening - * to the vma (discard or cloning) which should prevent the more - * egregious cases from causing harm. - */ - - if (obj->userptr.work) { - /* active flag should still be held for the pending work */ - if (IS_ERR(obj->userptr.work)) - return PTR_ERR(obj->userptr.work); - else - return -EAGAIN; - } - - pvec = NULL; - pinned = 0; - - if (mm == current->mm) { - pvec = kvmalloc_array(num_pages, sizeof(struct page *), - GFP_KERNEL | - __GFP_NORETRY | - __GFP_NOWARN); - if (pvec) /* defer to worker if malloc fails */ - pinned = __get_user_pages_fast(obj->userptr.ptr, - num_pages, - !i915_gem_object_is_readonly(obj), - pvec); - } - - active = false; - if (pinned < 0) { - pages = ERR_PTR(pinned); - pinned = 0; - } else if (pinned < num_pages) { - pages = __i915_gem_userptr_get_pages_schedule(obj); - active = pages == ERR_PTR(-EAGAIN); - } else { - pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages); - active = !IS_ERR(pages); - } - if (active) - __i915_gem_userptr_set_active(obj, true); - - if (IS_ERR(pages)) - release_pages(pvec, pinned); - kvfree(pvec); - - return PTR_ERR_OR_ZERO(pages); -} - -static void -i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, - struct sg_table *pages) -{ - struct sgt_iter sgt_iter; - struct page *page; - - /* Cancel any inflight work and force them to restart their gup */ - obj->userptr.work = NULL; - __i915_gem_userptr_set_active(obj, false); - if (!pages) - return; - - __i915_gem_object_release_shmem(obj, pages, true); - i915_gem_gtt_finish_pages(obj, pages); - - for_each_sgt_page(page, sgt_iter, pages) { - if (obj->mm.dirty) - set_page_dirty(page); - - mark_page_accessed(page); - put_page(page); - } - obj->mm.dirty = false; - - sg_free_table(pages); - kfree(pages); -} - -static void -i915_gem_userptr_release(struct drm_i915_gem_object *obj) -{ - i915_gem_userptr_release__mmu_notifier(obj); - i915_gem_userptr_release__mm_struct(obj); -} - -static int -i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) -{ - if (obj->userptr.mmu_object) - return 0; - - return i915_gem_userptr_init__mmu_notifier(obj, 0); -} - -static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { - .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | - I915_GEM_OBJECT_IS_SHRINKABLE | - I915_GEM_OBJECT_ASYNC_CANCEL, - .get_pages = i915_gem_userptr_get_pages, - .put_pages = i915_gem_userptr_put_pages, - .dmabuf_export = i915_gem_userptr_dmabuf_export, - .release = i915_gem_userptr_release, -}; - -/* - * Creates a new mm object that wraps some normal memory from the process - * context - user memory. - * - * We impose several restrictions upon the memory being mapped - * into the GPU. - * 1. It must be page aligned (both start/end addresses, i.e ptr and size). - * 2. It must be normal system memory, not a pointer into another map of IO - * space (e.g. it must not be a GTT mmapping of another object). - * 3. We only allow a bo as large as we could in theory map into the GTT, - * that is we limit the size to the total size of the GTT. - * 4. The bo is marked as being snoopable. The backing pages are left - * accessible directly by the CPU, but reads and writes by the GPU may - * incur the cost of a snoop (unless you have an LLC architecture). - * - * Synchronisation between multiple users and the GPU is left to userspace - * through the normal set-domain-ioctl. The kernel will enforce that the - * GPU relinquishes the VMA before it is returned back to the system - * i.e. upon free(), munmap() or process termination. However, the userspace - * malloc() library may not immediately relinquish the VMA after free() and - * instead reuse it whilst the GPU is still reading and writing to the VMA. - * Caveat emptor. - * - * Also note, that the object created here is not currently a "first class" - * object, in that several ioctls are banned. These are the CPU access - * ioctls: mmap(), pwrite and pread. In practice, you are expected to use - * direct access via your pointer rather than use those ioctls. Another - * restriction is that we do not allow userptr surfaces to be pinned to the - * hardware and so we reject any attempt to create a framebuffer out of a - * userptr. - * - * If you think this is a good interface to use to pass GPU memory between - * drivers, please use dma-buf instead. In fact, wherever possible use - * dma-buf instead. - */ -int -i915_gem_userptr_ioctl(struct drm_device *dev, - void *data, - struct drm_file *file) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_userptr *args = data; - struct drm_i915_gem_object *obj; - int ret; - u32 handle; - - if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) { - /* We cannot support coherent userptr objects on hw without - * LLC and broken snooping. - */ - return -ENODEV; - } - - if (args->flags & ~(I915_USERPTR_READ_ONLY | - I915_USERPTR_UNSYNCHRONIZED)) - return -EINVAL; - - if (!args->user_size) - return -EINVAL; - - if (offset_in_page(args->user_ptr | args->user_size)) - return -EINVAL; - - if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size)) - return -EFAULT; - - if (args->flags & I915_USERPTR_READ_ONLY) { - struct i915_hw_ppgtt *ppgtt; - - /* - * On almost all of the older hw, we cannot tell the GPU that - * a page is readonly. - */ - ppgtt = dev_priv->kernel_context->ppgtt; - if (!ppgtt || !ppgtt->vm.has_read_only) - return -ENODEV; - } - - obj = i915_gem_object_alloc(); - if (obj == NULL) - return -ENOMEM; - - drm_gem_private_object_init(dev, &obj->base, args->user_size); - i915_gem_object_init(obj, &i915_gem_userptr_ops); - obj->read_domains = I915_GEM_DOMAIN_CPU; - obj->write_domain = I915_GEM_DOMAIN_CPU; - i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); - - obj->userptr.ptr = args->user_ptr; - if (args->flags & I915_USERPTR_READ_ONLY) - i915_gem_object_set_readonly(obj); - - /* And keep a pointer to the current->mm for resolving the user pages - * at binding. This means that we need to hook into the mmu_notifier - * in order to detect if the mmu is destroyed. - */ - ret = i915_gem_userptr_init__mm_struct(obj); - if (ret == 0) - ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); - if (ret == 0) - ret = drm_gem_handle_create(file, &obj->base, &handle); - - /* drop reference from allocate - handle holds it now */ - i915_gem_object_put(obj); - if (ret) - return ret; - - args->handle = handle; - return 0; -} - -int i915_gem_init_userptr(struct drm_i915_private *dev_priv) -{ - mutex_init(&dev_priv->mm_lock); - hash_init(dev_priv->mm_structs); - - dev_priv->mm.userptr_wq = - alloc_workqueue("i915-userptr-acquire", - WQ_HIGHPRI | WQ_UNBOUND, - 0); - if (!dev_priv->mm.userptr_wq) - return -ENOMEM; - - return 0; -} - -void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv) -{ - destroy_workqueue(dev_priv->mm.userptr_wq); -} diff --git a/drivers/gpu/drm/i915/i915_gemfs.c b/drivers/gpu/drm/i915/i915_gemfs.c deleted file mode 100644 index 888b7d3f04c3..000000000000 --- a/drivers/gpu/drm/i915/i915_gemfs.c +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright © 2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include -#include -#include - -#include "i915_drv.h" -#include "i915_gemfs.h" - -int i915_gemfs_init(struct drm_i915_private *i915) -{ - struct file_system_type *type; - struct vfsmount *gemfs; - - type = get_fs_type("tmpfs"); - if (!type) - return -ENODEV; - - gemfs = kern_mount(type); - if (IS_ERR(gemfs)) - return PTR_ERR(gemfs); - - /* - * Enable huge-pages for objects that are at least HPAGE_PMD_SIZE, most - * likely 2M. Note that within_size may overallocate huge-pages, if say - * we allocate an object of size 2M + 4K, we may get 2M + 2M, but under - * memory pressure shmem should split any huge-pages which can be - * shrunk. - */ - - if (has_transparent_hugepage()) { - struct super_block *sb = gemfs->mnt_sb; - /* FIXME: Disabled until we get W/A for read BW issue. */ - char options[] = "huge=never"; - int flags = 0; - int err; - - err = sb->s_op->remount_fs(sb, &flags, options); - if (err) { - kern_unmount(gemfs); - return err; - } - } - - i915->mm.gemfs = gemfs; - - return 0; -} - -void i915_gemfs_fini(struct drm_i915_private *i915) -{ - kern_unmount(i915->mm.gemfs); -} diff --git a/drivers/gpu/drm/i915/i915_gemfs.h b/drivers/gpu/drm/i915/i915_gemfs.h deleted file mode 100644 index cca8bdc5b93e..000000000000 --- a/drivers/gpu/drm/i915/i915_gemfs.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright © 2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef __I915_GEMFS_H__ -#define __I915_GEMFS_H__ - -struct drm_i915_private; - -int i915_gemfs_init(struct drm_i915_private *i915); - -void i915_gemfs_fini(struct drm_i915_private *i915); - -#endif diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c index db52a58eadcc..2d5fcba98841 100644 --- a/drivers/gpu/drm/i915/i915_globals.c +++ b/drivers/gpu/drm/i915/i915_globals.c @@ -8,7 +8,7 @@ #include #include "i915_active.h" -#include "i915_gem_context.h" +#include "gem/i915_gem_context.h" #include "gem/i915_gem_object.h" #include "i915_globals.h" #include "i915_request.h" diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 4f85cbdddb0d..c86865a34972 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -36,6 +36,8 @@ #include +#include "gem/i915_gem_context.h" + #include "i915_drv.h" #include "i915_gpu_error.h" #include "intel_atomic.h" diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 379fd89a180f..2e33a9b4eae7 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -195,6 +195,8 @@ #include #include +#include "gem/i915_gem_context.h" +#include "gem/i915_gem_pm.h" #include "gt/intel_lrc_reg.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 18b34b0bf872..da1e6984a8cc 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -29,6 +29,9 @@ #include #include +#include "gem/i915_gem_context.h" +#include "gt/intel_context.h" + #include "i915_active.h" #include "i915_drv.h" #include "i915_globals.h" diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 6a063d3fccee..f454cf2450b5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -45,7 +45,6 @@ #include #include "i915_drv.h" -#include "i915_gem_clflush.h" #include "i915_trace.h" #include "intel_acpi.h" #include "intel_atomic.h" diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index ffdab22db2b0..a4f98ccef0fe 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -26,6 +26,8 @@ #include "gt/intel_engine_pm.h" #include "gt/intel_lrc_reg.h" +#include "gt/intel_context.h" +#include "gem/i915_gem_context.h" #include "intel_guc_submission.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index b64b45d9b538..80dcd879fc58 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -29,6 +29,8 @@ #include #include +#include "gem/i915_gem_pm.h" + #include "i915_drv.h" #include "i915_reg.h" #include "intel_drv.h" diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c deleted file mode 100644 index 419fd4d6a8f0..000000000000 --- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "huge_gem_object.h" - -static void huge_free_pages(struct drm_i915_gem_object *obj, - struct sg_table *pages) -{ - unsigned long nreal = obj->scratch / PAGE_SIZE; - struct scatterlist *sg; - - for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg)) - __free_page(sg_page(sg)); - - sg_free_table(pages); - kfree(pages); -} - -static int huge_get_pages(struct drm_i915_gem_object *obj) -{ -#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY) - const unsigned long nreal = obj->scratch / PAGE_SIZE; - const unsigned long npages = obj->base.size / PAGE_SIZE; - struct scatterlist *sg, *src, *end; - struct sg_table *pages; - unsigned long n; - - pages = kmalloc(sizeof(*pages), GFP); - if (!pages) - return -ENOMEM; - - if (sg_alloc_table(pages, npages, GFP)) { - kfree(pages); - return -ENOMEM; - } - - sg = pages->sgl; - for (n = 0; n < nreal; n++) { - struct page *page; - - page = alloc_page(GFP | __GFP_HIGHMEM); - if (!page) { - sg_mark_end(sg); - goto err; - } - - sg_set_page(sg, page, PAGE_SIZE, 0); - sg = __sg_next(sg); - } - if (nreal < npages) { - for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) { - sg_set_page(sg, sg_page(src), PAGE_SIZE, 0); - src = __sg_next(src); - if (src == end) - src = pages->sgl; - } - } - - if (i915_gem_gtt_prepare_pages(obj, pages)) - goto err; - - __i915_gem_object_set_pages(obj, pages, PAGE_SIZE); - - return 0; - -err: - huge_free_pages(obj, pages); - - return -ENOMEM; -#undef GFP -} - -static void huge_put_pages(struct drm_i915_gem_object *obj, - struct sg_table *pages) -{ - i915_gem_gtt_finish_pages(obj, pages); - huge_free_pages(obj, pages); - - obj->mm.dirty = false; -} - -static const struct drm_i915_gem_object_ops huge_ops = { - .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | - I915_GEM_OBJECT_IS_SHRINKABLE, - .get_pages = huge_get_pages, - .put_pages = huge_put_pages, -}; - -struct drm_i915_gem_object * -huge_gem_object(struct drm_i915_private *i915, - phys_addr_t phys_size, - dma_addr_t dma_size) -{ - struct drm_i915_gem_object *obj; - unsigned int cache_level; - - GEM_BUG_ON(!phys_size || phys_size > dma_size); - GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE)); - GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE)); - - if (overflows_type(dma_size, obj->base.size)) - return ERR_PTR(-E2BIG); - - obj = i915_gem_object_alloc(); - if (!obj) - return ERR_PTR(-ENOMEM); - - drm_gem_private_object_init(&i915->drm, &obj->base, dma_size); - i915_gem_object_init(obj, &huge_ops); - - obj->read_domains = I915_GEM_DOMAIN_CPU; - obj->write_domain = I915_GEM_DOMAIN_CPU; - cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; - i915_gem_object_set_cache_coherency(obj, cache_level); - obj->scratch = phys_size; - - return obj; -} diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/selftests/huge_gem_object.h deleted file mode 100644 index a6133a9e8029..000000000000 --- a/drivers/gpu/drm/i915/selftests/huge_gem_object.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef __HUGE_GEM_OBJECT_H -#define __HUGE_GEM_OBJECT_H - -struct drm_i915_gem_object * -huge_gem_object(struct drm_i915_private *i915, - phys_addr_t phys_size, - dma_addr_t dma_size); - -static inline phys_addr_t -huge_gem_object_phys_size(struct drm_i915_gem_object *obj) -{ - return obj->scratch; -} - -static inline dma_addr_t -huge_gem_object_dma_size(struct drm_i915_gem_object *obj) -{ - return obj->base.size; -} - -#endif /* !__HUGE_GEM_OBJECT_H */ diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c deleted file mode 100644 index b22b8249dfbd..000000000000 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ /dev/null @@ -1,1793 +0,0 @@ -/* - * Copyright © 2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "../i915_selftest.h" - -#include - -#include "igt_gem_utils.h" -#include "mock_drm.h" -#include "i915_random.h" - -static const unsigned int page_sizes[] = { - I915_GTT_PAGE_SIZE_2M, - I915_GTT_PAGE_SIZE_64K, - I915_GTT_PAGE_SIZE_4K, -}; - -static unsigned int get_largest_page_size(struct drm_i915_private *i915, - u64 rem) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) { - unsigned int page_size = page_sizes[i]; - - if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size) - return page_size; - } - - return 0; -} - -static void huge_pages_free_pages(struct sg_table *st) -{ - struct scatterlist *sg; - - for (sg = st->sgl; sg; sg = __sg_next(sg)) { - if (sg_page(sg)) - __free_pages(sg_page(sg), get_order(sg->length)); - } - - sg_free_table(st); - kfree(st); -} - -static int get_huge_pages(struct drm_i915_gem_object *obj) -{ -#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY) - unsigned int page_mask = obj->mm.page_mask; - struct sg_table *st; - struct scatterlist *sg; - unsigned int sg_page_sizes; - u64 rem; - - st = kmalloc(sizeof(*st), GFP); - if (!st) - return -ENOMEM; - - if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) { - kfree(st); - return -ENOMEM; - } - - rem = obj->base.size; - sg = st->sgl; - st->nents = 0; - sg_page_sizes = 0; - - /* - * Our goal here is simple, we want to greedily fill the object from - * largest to smallest page-size, while ensuring that we use *every* - * page-size as per the given page-mask. - */ - do { - unsigned int bit = ilog2(page_mask); - unsigned int page_size = BIT(bit); - int order = get_order(page_size); - - do { - struct page *page; - - GEM_BUG_ON(order >= MAX_ORDER); - page = alloc_pages(GFP | __GFP_ZERO, order); - if (!page) - goto err; - - sg_set_page(sg, page, page_size, 0); - sg_page_sizes |= page_size; - st->nents++; - - rem -= page_size; - if (!rem) { - sg_mark_end(sg); - break; - } - - sg = __sg_next(sg); - } while ((rem - ((page_size-1) & page_mask)) >= page_size); - - page_mask &= (page_size-1); - } while (page_mask); - - if (i915_gem_gtt_prepare_pages(obj, st)) - goto err; - - obj->mm.madv = I915_MADV_DONTNEED; - - GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask); - __i915_gem_object_set_pages(obj, st, sg_page_sizes); - - return 0; - -err: - sg_set_page(sg, NULL, 0, 0); - sg_mark_end(sg); - huge_pages_free_pages(st); - - return -ENOMEM; -} - -static void put_huge_pages(struct drm_i915_gem_object *obj, - struct sg_table *pages) -{ - i915_gem_gtt_finish_pages(obj, pages); - huge_pages_free_pages(pages); - - obj->mm.dirty = false; - obj->mm.madv = I915_MADV_WILLNEED; -} - -static const struct drm_i915_gem_object_ops huge_page_ops = { - .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | - I915_GEM_OBJECT_IS_SHRINKABLE, - .get_pages = get_huge_pages, - .put_pages = put_huge_pages, -}; - -static struct drm_i915_gem_object * -huge_pages_object(struct drm_i915_private *i915, - u64 size, - unsigned int page_mask) -{ - struct drm_i915_gem_object *obj; - - GEM_BUG_ON(!size); - GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask)))); - - if (size >> PAGE_SHIFT > INT_MAX) - return ERR_PTR(-E2BIG); - - if (overflows_type(size, obj->base.size)) - return ERR_PTR(-E2BIG); - - obj = i915_gem_object_alloc(); - if (!obj) - return ERR_PTR(-ENOMEM); - - drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &huge_page_ops); - - obj->write_domain = I915_GEM_DOMAIN_CPU; - obj->read_domains = I915_GEM_DOMAIN_CPU; - obj->cache_level = I915_CACHE_NONE; - - obj->mm.page_mask = page_mask; - - return obj; -} - -static int fake_get_huge_pages(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - const u64 max_len = rounddown_pow_of_two(UINT_MAX); - struct sg_table *st; - struct scatterlist *sg; - unsigned int sg_page_sizes; - u64 rem; - - st = kmalloc(sizeof(*st), GFP); - if (!st) - return -ENOMEM; - - if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) { - kfree(st); - return -ENOMEM; - } - - /* Use optimal page sized chunks to fill in the sg table */ - rem = obj->base.size; - sg = st->sgl; - st->nents = 0; - sg_page_sizes = 0; - do { - unsigned int page_size = get_largest_page_size(i915, rem); - unsigned int len = min(page_size * div_u64(rem, page_size), - max_len); - - GEM_BUG_ON(!page_size); - - sg->offset = 0; - sg->length = len; - sg_dma_len(sg) = len; - sg_dma_address(sg) = page_size; - - sg_page_sizes |= len; - - st->nents++; - - rem -= len; - if (!rem) { - sg_mark_end(sg); - break; - } - - sg = sg_next(sg); - } while (1); - - i915_sg_trim(st); - - obj->mm.madv = I915_MADV_DONTNEED; - - __i915_gem_object_set_pages(obj, st, sg_page_sizes); - - return 0; -} - -static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct sg_table *st; - struct scatterlist *sg; - unsigned int page_size; - - st = kmalloc(sizeof(*st), GFP); - if (!st) - return -ENOMEM; - - if (sg_alloc_table(st, 1, GFP)) { - kfree(st); - return -ENOMEM; - } - - sg = st->sgl; - st->nents = 1; - - page_size = get_largest_page_size(i915, obj->base.size); - GEM_BUG_ON(!page_size); - - sg->offset = 0; - sg->length = obj->base.size; - sg_dma_len(sg) = obj->base.size; - sg_dma_address(sg) = page_size; - - obj->mm.madv = I915_MADV_DONTNEED; - - __i915_gem_object_set_pages(obj, st, sg->length); - - return 0; -#undef GFP -} - -static void fake_free_huge_pages(struct drm_i915_gem_object *obj, - struct sg_table *pages) -{ - sg_free_table(pages); - kfree(pages); -} - -static void fake_put_huge_pages(struct drm_i915_gem_object *obj, - struct sg_table *pages) -{ - fake_free_huge_pages(obj, pages); - obj->mm.dirty = false; - obj->mm.madv = I915_MADV_WILLNEED; -} - -static const struct drm_i915_gem_object_ops fake_ops = { - .flags = I915_GEM_OBJECT_IS_SHRINKABLE, - .get_pages = fake_get_huge_pages, - .put_pages = fake_put_huge_pages, -}; - -static const struct drm_i915_gem_object_ops fake_ops_single = { - .flags = I915_GEM_OBJECT_IS_SHRINKABLE, - .get_pages = fake_get_huge_pages_single, - .put_pages = fake_put_huge_pages, -}; - -static struct drm_i915_gem_object * -fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) -{ - struct drm_i915_gem_object *obj; - - GEM_BUG_ON(!size); - GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); - - if (size >> PAGE_SHIFT > UINT_MAX) - return ERR_PTR(-E2BIG); - - if (overflows_type(size, obj->base.size)) - return ERR_PTR(-E2BIG); - - obj = i915_gem_object_alloc(); - if (!obj) - return ERR_PTR(-ENOMEM); - - drm_gem_private_object_init(&i915->drm, &obj->base, size); - - if (single) - i915_gem_object_init(obj, &fake_ops_single); - else - i915_gem_object_init(obj, &fake_ops); - - obj->write_domain = I915_GEM_DOMAIN_CPU; - obj->read_domains = I915_GEM_DOMAIN_CPU; - obj->cache_level = I915_CACHE_NONE; - - return obj; -} - -static int igt_check_page_sizes(struct i915_vma *vma) -{ - struct drm_i915_private *i915 = vma->vm->i915; - unsigned int supported = INTEL_INFO(i915)->page_sizes; - struct drm_i915_gem_object *obj = vma->obj; - int err = 0; - - if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) { - pr_err("unsupported page_sizes.sg=%u, supported=%u\n", - vma->page_sizes.sg & ~supported, supported); - err = -EINVAL; - } - - if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) { - pr_err("unsupported page_sizes.gtt=%u, supported=%u\n", - vma->page_sizes.gtt & ~supported, supported); - err = -EINVAL; - } - - if (vma->page_sizes.phys != obj->mm.page_sizes.phys) { - pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n", - vma->page_sizes.phys, obj->mm.page_sizes.phys); - err = -EINVAL; - } - - if (vma->page_sizes.sg != obj->mm.page_sizes.sg) { - pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n", - vma->page_sizes.sg, obj->mm.page_sizes.sg); - err = -EINVAL; - } - - if (obj->mm.page_sizes.gtt) { - pr_err("obj->page_sizes.gtt(%u) should never be set\n", - obj->mm.page_sizes.gtt); - err = -EINVAL; - } - - return err; -} - -static int igt_mock_exhaust_device_supported_pages(void *arg) -{ - struct i915_hw_ppgtt *ppgtt = arg; - struct drm_i915_private *i915 = ppgtt->vm.i915; - unsigned int saved_mask = INTEL_INFO(i915)->page_sizes; - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - int i, j, single; - int err; - - /* - * Sanity check creating objects with every valid page support - * combination for our mock device. - */ - - for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) { - unsigned int combination = 0; - - for (j = 0; j < ARRAY_SIZE(page_sizes); j++) { - if (i & BIT(j)) - combination |= page_sizes[j]; - } - - mkwrite_device_info(i915)->page_sizes = combination; - - for (single = 0; single <= 1; ++single) { - obj = fake_huge_pages_object(i915, combination, !!single); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto out_device; - } - - if (obj->base.size != combination) { - pr_err("obj->base.size=%zu, expected=%u\n", - obj->base.size, combination); - err = -EINVAL; - goto out_put; - } - - vma = i915_vma_instance(obj, &ppgtt->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto out_put; - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - goto out_close; - - err = igt_check_page_sizes(vma); - - if (vma->page_sizes.sg != combination) { - pr_err("page_sizes.sg=%u, expected=%u\n", - vma->page_sizes.sg, combination); - err = -EINVAL; - } - - i915_vma_unpin(vma); - i915_vma_close(vma); - - i915_gem_object_put(obj); - - if (err) - goto out_device; - } - } - - goto out_device; - -out_close: - i915_vma_close(vma); -out_put: - i915_gem_object_put(obj); -out_device: - mkwrite_device_info(i915)->page_sizes = saved_mask; - - return err; -} - -static int igt_mock_ppgtt_misaligned_dma(void *arg) -{ - struct i915_hw_ppgtt *ppgtt = arg; - struct drm_i915_private *i915 = ppgtt->vm.i915; - unsigned long supported = INTEL_INFO(i915)->page_sizes; - struct drm_i915_gem_object *obj; - int bit; - int err; - - /* - * Sanity check dma misalignment for huge pages -- the dma addresses we - * insert into the paging structures need to always respect the page - * size alignment. - */ - - bit = ilog2(I915_GTT_PAGE_SIZE_64K); - - for_each_set_bit_from(bit, &supported, - ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { - IGT_TIMEOUT(end_time); - unsigned int page_size = BIT(bit); - unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; - unsigned int offset; - unsigned int size = - round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1; - struct i915_vma *vma; - - obj = fake_huge_pages_object(i915, size, true); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - if (obj->base.size != size) { - pr_err("obj->base.size=%zu, expected=%u\n", - obj->base.size, size); - err = -EINVAL; - goto out_put; - } - - err = i915_gem_object_pin_pages(obj); - if (err) - goto out_put; - - /* Force the page size for this object */ - obj->mm.page_sizes.sg = page_size; - - vma = i915_vma_instance(obj, &ppgtt->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto out_unpin; - } - - err = i915_vma_pin(vma, 0, 0, flags); - if (err) { - i915_vma_close(vma); - goto out_unpin; - } - - - err = igt_check_page_sizes(vma); - - if (vma->page_sizes.gtt != page_size) { - pr_err("page_sizes.gtt=%u, expected %u\n", - vma->page_sizes.gtt, page_size); - err = -EINVAL; - } - - i915_vma_unpin(vma); - - if (err) { - i915_vma_close(vma); - goto out_unpin; - } - - /* - * Try all the other valid offsets until the next - * boundary -- should always fall back to using 4K - * pages. - */ - for (offset = 4096; offset < page_size; offset += 4096) { - err = i915_vma_unbind(vma); - if (err) { - i915_vma_close(vma); - goto out_unpin; - } - - err = i915_vma_pin(vma, 0, 0, flags | offset); - if (err) { - i915_vma_close(vma); - goto out_unpin; - } - - err = igt_check_page_sizes(vma); - - if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) { - pr_err("page_sizes.gtt=%u, expected %llu\n", - vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K); - err = -EINVAL; - } - - i915_vma_unpin(vma); - - if (err) { - i915_vma_close(vma); - goto out_unpin; - } - - if (igt_timeout(end_time, - "%s timed out at offset %x with page-size %x\n", - __func__, offset, page_size)) - break; - } - - i915_vma_close(vma); - - i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); - i915_gem_object_put(obj); - } - - return 0; - -out_unpin: - i915_gem_object_unpin_pages(obj); -out_put: - i915_gem_object_put(obj); - - return err; -} - -static void close_object_list(struct list_head *objects, - struct i915_hw_ppgtt *ppgtt) -{ - struct drm_i915_gem_object *obj, *on; - - list_for_each_entry_safe(obj, on, objects, st_link) { - struct i915_vma *vma; - - vma = i915_vma_instance(obj, &ppgtt->vm, NULL); - if (!IS_ERR(vma)) - i915_vma_close(vma); - - list_del(&obj->st_link); - i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); - i915_gem_object_put(obj); - } -} - -static int igt_mock_ppgtt_huge_fill(void *arg) -{ - struct i915_hw_ppgtt *ppgtt = arg; - struct drm_i915_private *i915 = ppgtt->vm.i915; - unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT; - unsigned long page_num; - bool single = false; - LIST_HEAD(objects); - IGT_TIMEOUT(end_time); - int err = -ENODEV; - - for_each_prime_number_from(page_num, 1, max_pages) { - struct drm_i915_gem_object *obj; - u64 size = page_num << PAGE_SHIFT; - struct i915_vma *vma; - unsigned int expected_gtt = 0; - int i; - - obj = fake_huge_pages_object(i915, size, single); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - break; - } - - if (obj->base.size != size) { - pr_err("obj->base.size=%zd, expected=%llu\n", - obj->base.size, size); - i915_gem_object_put(obj); - err = -EINVAL; - break; - } - - err = i915_gem_object_pin_pages(obj); - if (err) { - i915_gem_object_put(obj); - break; - } - - list_add(&obj->st_link, &objects); - - vma = i915_vma_instance(obj, &ppgtt->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - break; - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - break; - - err = igt_check_page_sizes(vma); - if (err) { - i915_vma_unpin(vma); - break; - } - - /* - * Figure out the expected gtt page size knowing that we go from - * largest to smallest page size sg chunks, and that we align to - * the largest page size. - */ - for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) { - unsigned int page_size = page_sizes[i]; - - if (HAS_PAGE_SIZES(i915, page_size) && - size >= page_size) { - expected_gtt |= page_size; - size &= page_size-1; - } - } - - GEM_BUG_ON(!expected_gtt); - GEM_BUG_ON(size); - - if (expected_gtt & I915_GTT_PAGE_SIZE_4K) - expected_gtt &= ~I915_GTT_PAGE_SIZE_64K; - - i915_vma_unpin(vma); - - if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) { - if (!IS_ALIGNED(vma->node.start, - I915_GTT_PAGE_SIZE_2M)) { - pr_err("node.start(%llx) not aligned to 2M\n", - vma->node.start); - err = -EINVAL; - break; - } - - if (!IS_ALIGNED(vma->node.size, - I915_GTT_PAGE_SIZE_2M)) { - pr_err("node.size(%llx) not aligned to 2M\n", - vma->node.size); - err = -EINVAL; - break; - } - } - - if (vma->page_sizes.gtt != expected_gtt) { - pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n", - vma->page_sizes.gtt, expected_gtt, - obj->base.size, yesno(!!single)); - err = -EINVAL; - break; - } - - if (igt_timeout(end_time, - "%s timed out at size %zd\n", - __func__, obj->base.size)) - break; - - single = !single; - } - - close_object_list(&objects, ppgtt); - - if (err == -ENOMEM || err == -ENOSPC) - err = 0; - - return err; -} - -static int igt_mock_ppgtt_64K(void *arg) -{ - struct i915_hw_ppgtt *ppgtt = arg; - struct drm_i915_private *i915 = ppgtt->vm.i915; - struct drm_i915_gem_object *obj; - const struct object_info { - unsigned int size; - unsigned int gtt; - unsigned int offset; - } objects[] = { - /* Cases with forced padding/alignment */ - { - .size = SZ_64K, - .gtt = I915_GTT_PAGE_SIZE_64K, - .offset = 0, - }, - { - .size = SZ_64K + SZ_4K, - .gtt = I915_GTT_PAGE_SIZE_4K, - .offset = 0, - }, - { - .size = SZ_64K - SZ_4K, - .gtt = I915_GTT_PAGE_SIZE_4K, - .offset = 0, - }, - { - .size = SZ_2M, - .gtt = I915_GTT_PAGE_SIZE_64K, - .offset = 0, - }, - { - .size = SZ_2M - SZ_4K, - .gtt = I915_GTT_PAGE_SIZE_4K, - .offset = 0, - }, - { - .size = SZ_2M + SZ_4K, - .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K, - .offset = 0, - }, - { - .size = SZ_2M + SZ_64K, - .gtt = I915_GTT_PAGE_SIZE_64K, - .offset = 0, - }, - { - .size = SZ_2M - SZ_64K, - .gtt = I915_GTT_PAGE_SIZE_64K, - .offset = 0, - }, - /* Try without any forced padding/alignment */ - { - .size = SZ_64K, - .offset = SZ_2M, - .gtt = I915_GTT_PAGE_SIZE_4K, - }, - { - .size = SZ_128K, - .offset = SZ_2M - SZ_64K, - .gtt = I915_GTT_PAGE_SIZE_4K, - }, - }; - struct i915_vma *vma; - int i, single; - int err; - - /* - * Sanity check some of the trickiness with 64K pages -- either we can - * safely mark the whole page-table(2M block) as 64K, or we have to - * always fallback to 4K. - */ - - if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K)) - return 0; - - for (i = 0; i < ARRAY_SIZE(objects); ++i) { - unsigned int size = objects[i].size; - unsigned int expected_gtt = objects[i].gtt; - unsigned int offset = objects[i].offset; - unsigned int flags = PIN_USER; - - for (single = 0; single <= 1; single++) { - obj = fake_huge_pages_object(i915, size, !!single); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - err = i915_gem_object_pin_pages(obj); - if (err) - goto out_object_put; - - /* - * Disable 2M pages -- We only want to use 64K/4K pages - * for this test. - */ - obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M; - - vma = i915_vma_instance(obj, &ppgtt->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto out_object_unpin; - } - - if (offset) - flags |= PIN_OFFSET_FIXED | offset; - - err = i915_vma_pin(vma, 0, 0, flags); - if (err) - goto out_vma_close; - - err = igt_check_page_sizes(vma); - if (err) - goto out_vma_unpin; - - if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) { - if (!IS_ALIGNED(vma->node.start, - I915_GTT_PAGE_SIZE_2M)) { - pr_err("node.start(%llx) not aligned to 2M\n", - vma->node.start); - err = -EINVAL; - goto out_vma_unpin; - } - - if (!IS_ALIGNED(vma->node.size, - I915_GTT_PAGE_SIZE_2M)) { - pr_err("node.size(%llx) not aligned to 2M\n", - vma->node.size); - err = -EINVAL; - goto out_vma_unpin; - } - } - - if (vma->page_sizes.gtt != expected_gtt) { - pr_err("gtt=%u, expected=%u, i=%d, single=%s\n", - vma->page_sizes.gtt, expected_gtt, i, - yesno(!!single)); - err = -EINVAL; - goto out_vma_unpin; - } - - i915_vma_unpin(vma); - i915_vma_close(vma); - - i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); - i915_gem_object_put(obj); - } - } - - return 0; - -out_vma_unpin: - i915_vma_unpin(vma); -out_vma_close: - i915_vma_close(vma); -out_object_unpin: - i915_gem_object_unpin_pages(obj); -out_object_put: - i915_gem_object_put(obj); - - return err; -} - -static struct i915_vma * -gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) -{ - struct drm_i915_private *i915 = vma->vm->i915; - const int gen = INTEL_GEN(i915); - unsigned int count = vma->size >> PAGE_SHIFT; - struct drm_i915_gem_object *obj; - struct i915_vma *batch; - unsigned int size; - u32 *cmd; - int n; - int err; - - size = (1 + 4 * count) * sizeof(u32); - size = round_up(size, PAGE_SIZE); - obj = i915_gem_object_create_internal(i915, size); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); - if (IS_ERR(cmd)) { - err = PTR_ERR(cmd); - goto err; - } - - offset += vma->node.start; - - for (n = 0; n < count; n++) { - if (gen >= 8) { - *cmd++ = MI_STORE_DWORD_IMM_GEN4; - *cmd++ = lower_32_bits(offset); - *cmd++ = upper_32_bits(offset); - *cmd++ = val; - } else if (gen >= 4) { - *cmd++ = MI_STORE_DWORD_IMM_GEN4 | - (gen < 6 ? MI_USE_GGTT : 0); - *cmd++ = 0; - *cmd++ = offset; - *cmd++ = val; - } else { - *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; - *cmd++ = offset; - *cmd++ = val; - } - - offset += PAGE_SIZE; - } - - *cmd = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); - - i915_gem_object_unpin_map(obj); - - batch = i915_vma_instance(obj, vma->vm, NULL); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto err; - } - - err = i915_vma_pin(batch, 0, 0, PIN_USER); - if (err) - goto err; - - return batch; - -err: - i915_gem_object_put(obj); - - return ERR_PTR(err); -} - -static int gpu_write(struct i915_vma *vma, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - u32 dword, - u32 value) -{ - struct i915_request *rq; - struct i915_vma *batch; - int err; - - GEM_BUG_ON(!intel_engine_can_store_dword(engine)); - - err = i915_gem_object_set_to_gtt_domain(vma->obj, true); - if (err) - return err; - - batch = gpu_write_dw(vma, dword * sizeof(u32), value); - if (IS_ERR(batch)) - return PTR_ERR(batch); - - rq = igt_request_alloc(ctx, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_batch; - } - - err = i915_vma_move_to_active(batch, rq, 0); - if (err) - goto err_request; - - i915_gem_object_set_active_reference(batch->obj); - - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - if (err) - goto err_request; - - err = engine->emit_bb_start(rq, - batch->node.start, batch->node.size, - 0); -err_request: - if (err) - i915_request_skip(rq, err); - i915_request_add(rq); -err_batch: - i915_vma_unpin(batch); - i915_vma_close(batch); - - return err; -} - -static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) -{ - unsigned int needs_flush; - unsigned long n; - int err; - - err = i915_gem_object_prepare_read(obj, &needs_flush); - if (err) - return err; - - for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { - u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n)); - - if (needs_flush & CLFLUSH_BEFORE) - drm_clflush_virt_range(ptr, PAGE_SIZE); - - if (ptr[dword] != val) { - pr_err("n=%lu ptr[%u]=%u, val=%u\n", - n, dword, ptr[dword], val); - kunmap_atomic(ptr); - err = -EINVAL; - break; - } - - kunmap_atomic(ptr); - } - - i915_gem_object_finish_access(obj); - - return err; -} - -static int __igt_write_huge(struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - struct drm_i915_gem_object *obj, - u64 size, u64 offset, - u32 dword, u32 val) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_address_space *vm = - ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; - unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; - struct i915_vma *vma; - int err; - - vma = i915_vma_instance(obj, vm, NULL); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - err = i915_vma_unbind(vma); - if (err) - goto out_vma_close; - - err = i915_vma_pin(vma, size, 0, flags | offset); - if (err) { - /* - * The ggtt may have some pages reserved so - * refrain from erroring out. - */ - if (err == -ENOSPC && i915_is_ggtt(vm)) - err = 0; - - goto out_vma_close; - } - - err = igt_check_page_sizes(vma); - if (err) - goto out_vma_unpin; - - err = gpu_write(vma, ctx, engine, dword, val); - if (err) { - pr_err("gpu-write failed at offset=%llx\n", offset); - goto out_vma_unpin; - } - - err = cpu_check(obj, dword, val); - if (err) { - pr_err("cpu-check failed at offset=%llx\n", offset); - goto out_vma_unpin; - } - -out_vma_unpin: - i915_vma_unpin(vma); -out_vma_close: - i915_vma_destroy(vma); - - return err; -} - -static int igt_write_huge(struct i915_gem_context *ctx, - struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_address_space *vm = - ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; - static struct intel_engine_cs *engines[I915_NUM_ENGINES]; - struct intel_engine_cs *engine; - I915_RND_STATE(prng); - IGT_TIMEOUT(end_time); - unsigned int max_page_size; - unsigned int id; - u64 max; - u64 num; - u64 size; - int *order; - int i, n; - int err = 0; - - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - - size = obj->base.size; - if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) - size = round_up(size, I915_GTT_PAGE_SIZE_2M); - - max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg); - max = div_u64((vm->total - size), max_page_size); - - n = 0; - for_each_engine(engine, i915, id) { - if (!intel_engine_can_store_dword(engine)) { - pr_info("store-dword-imm not supported on engine=%u\n", - id); - continue; - } - engines[n++] = engine; - } - - if (!n) - return 0; - - /* - * To keep things interesting when alternating between engines in our - * randomized order, lets also make feeding to the same engine a few - * times in succession a possibility by enlarging the permutation array. - */ - order = i915_random_order(n * I915_NUM_ENGINES, &prng); - if (!order) - return -ENOMEM; - - /* - * Try various offsets in an ascending/descending fashion until we - * timeout -- we want to avoid issues hidden by effectively always using - * offset = 0. - */ - i = 0; - for_each_prime_number_from(num, 0, max) { - u64 offset_low = num * max_page_size; - u64 offset_high = (max - num) * max_page_size; - u32 dword = offset_in_page(num) / 4; - - engine = engines[order[i] % n]; - i = (i + 1) % (n * I915_NUM_ENGINES); - - /* - * In order to utilize 64K pages we need to both pad the vma - * size and ensure the vma offset is at the start of the pt - * boundary, however to improve coverage we opt for testing both - * aligned and unaligned offsets. - */ - if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) - offset_low = round_down(offset_low, - I915_GTT_PAGE_SIZE_2M); - - err = __igt_write_huge(ctx, engine, obj, size, offset_low, - dword, num + 1); - if (err) - break; - - err = __igt_write_huge(ctx, engine, obj, size, offset_high, - dword, num + 1); - if (err) - break; - - if (igt_timeout(end_time, - "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n", - __func__, engine->id, offset_low, offset_high, - max_page_size)) - break; - } - - kfree(order); - - return err; -} - -static int igt_ppgtt_exhaust_huge(void *arg) -{ - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; - unsigned long supported = INTEL_INFO(i915)->page_sizes; - static unsigned int pages[ARRAY_SIZE(page_sizes)]; - struct drm_i915_gem_object *obj; - unsigned int size_mask; - unsigned int page_mask; - int n, i; - int err = -ENODEV; - - if (supported == I915_GTT_PAGE_SIZE_4K) - return 0; - - /* - * Sanity check creating objects with a varying mix of page sizes -- - * ensuring that our writes lands in the right place. - */ - - n = 0; - for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) - pages[n++] = BIT(i); - - for (size_mask = 2; size_mask < BIT(n); size_mask++) { - unsigned int size = 0; - - for (i = 0; i < n; i++) { - if (size_mask & BIT(i)) - size |= pages[i]; - } - - /* - * For our page mask we want to enumerate all the page-size - * combinations which will fit into our chosen object size. - */ - for (page_mask = 2; page_mask <= size_mask; page_mask++) { - unsigned int page_sizes = 0; - - for (i = 0; i < n; i++) { - if (page_mask & BIT(i)) - page_sizes |= pages[i]; - } - - /* - * Ensure that we can actually fill the given object - * with our chosen page mask. - */ - if (!IS_ALIGNED(size, BIT(__ffs(page_sizes)))) - continue; - - obj = huge_pages_object(i915, size, page_sizes); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto out_device; - } - - err = i915_gem_object_pin_pages(obj); - if (err) { - i915_gem_object_put(obj); - - if (err == -ENOMEM) { - pr_info("unable to get pages, size=%u, pages=%u\n", - size, page_sizes); - err = 0; - break; - } - - pr_err("pin_pages failed, size=%u, pages=%u\n", - size_mask, page_mask); - - goto out_device; - } - - /* Force the page-size for the gtt insertion */ - obj->mm.page_sizes.sg = page_sizes; - - err = igt_write_huge(ctx, obj); - if (err) { - pr_err("exhaust write-huge failed with size=%u\n", - size); - goto out_unpin; - } - - i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); - i915_gem_object_put(obj); - } - } - - goto out_device; - -out_unpin: - i915_gem_object_unpin_pages(obj); - i915_gem_object_put(obj); -out_device: - mkwrite_device_info(i915)->page_sizes = supported; - - return err; -} - -static int igt_ppgtt_internal_huge(void *arg) -{ - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; - struct drm_i915_gem_object *obj; - static const unsigned int sizes[] = { - SZ_64K, - SZ_128K, - SZ_256K, - SZ_512K, - SZ_1M, - SZ_2M, - }; - int i; - int err; - - /* - * Sanity check that the HW uses huge pages correctly through internal - * -- ensure that our writes land in the right place. - */ - - for (i = 0; i < ARRAY_SIZE(sizes); ++i) { - unsigned int size = sizes[i]; - - obj = i915_gem_object_create_internal(i915, size); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - err = i915_gem_object_pin_pages(obj); - if (err) - goto out_put; - - if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) { - pr_info("internal unable to allocate huge-page(s) with size=%u\n", - size); - goto out_unpin; - } - - err = igt_write_huge(ctx, obj); - if (err) { - pr_err("internal write-huge failed with size=%u\n", - size); - goto out_unpin; - } - - i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); - i915_gem_object_put(obj); - } - - return 0; - -out_unpin: - i915_gem_object_unpin_pages(obj); -out_put: - i915_gem_object_put(obj); - - return err; -} - -static inline bool igt_can_allocate_thp(struct drm_i915_private *i915) -{ - return i915->mm.gemfs && has_transparent_hugepage(); -} - -static int igt_ppgtt_gemfs_huge(void *arg) -{ - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; - struct drm_i915_gem_object *obj; - static const unsigned int sizes[] = { - SZ_2M, - SZ_4M, - SZ_8M, - SZ_16M, - SZ_32M, - }; - int i; - int err; - - /* - * Sanity check that the HW uses huge pages correctly through gemfs -- - * ensure that our writes land in the right place. - */ - - if (!igt_can_allocate_thp(i915)) { - pr_info("missing THP support, skipping\n"); - return 0; - } - - for (i = 0; i < ARRAY_SIZE(sizes); ++i) { - unsigned int size = sizes[i]; - - obj = i915_gem_object_create_shmem(i915, size); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - err = i915_gem_object_pin_pages(obj); - if (err) - goto out_put; - - if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) { - pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n", - size); - goto out_unpin; - } - - err = igt_write_huge(ctx, obj); - if (err) { - pr_err("gemfs write-huge failed with size=%u\n", - size); - goto out_unpin; - } - - i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); - i915_gem_object_put(obj); - } - - return 0; - -out_unpin: - i915_gem_object_unpin_pages(obj); -out_put: - i915_gem_object_put(obj); - - return err; -} - -static int igt_ppgtt_pin_update(void *arg) -{ - struct i915_gem_context *ctx = arg; - struct drm_i915_private *dev_priv = ctx->i915; - unsigned long supported = INTEL_INFO(dev_priv)->page_sizes; - struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; - int first, last; - int err; - - /* - * Make sure there's no funny business when doing a PIN_UPDATE -- in the - * past we had a subtle issue with being able to incorrectly do multiple - * alloc va ranges on the same object when doing a PIN_UPDATE, which - * resulted in some pretty nasty bugs, though only when using - * huge-gtt-pages. - */ - - if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) { - pr_info("48b PPGTT not supported, skipping\n"); - return 0; - } - - first = ilog2(I915_GTT_PAGE_SIZE_64K); - last = ilog2(I915_GTT_PAGE_SIZE_2M); - - for_each_set_bit_from(first, &supported, last + 1) { - unsigned int page_size = BIT(first); - - obj = i915_gem_object_create_internal(dev_priv, page_size); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - vma = i915_vma_instance(obj, &ppgtt->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto out_put; - } - - err = i915_vma_pin(vma, SZ_2M, 0, flags); - if (err) - goto out_close; - - if (vma->page_sizes.sg < page_size) { - pr_info("Unable to allocate page-size %x, finishing test early\n", - page_size); - goto out_unpin; - } - - err = igt_check_page_sizes(vma); - if (err) - goto out_unpin; - - if (vma->page_sizes.gtt != page_size) { - dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0); - - /* - * The only valid reason for this to ever fail would be - * if the dma-mapper screwed us over when we did the - * dma_map_sg(), since it has the final say over the dma - * address. - */ - if (IS_ALIGNED(addr, page_size)) { - pr_err("page_sizes.gtt=%u, expected=%u\n", - vma->page_sizes.gtt, page_size); - err = -EINVAL; - } else { - pr_info("dma address misaligned, finishing test early\n"); - } - - goto out_unpin; - } - - err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE); - if (err) - goto out_unpin; - - i915_vma_unpin(vma); - i915_vma_close(vma); - - i915_gem_object_put(obj); - } - - obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - vma = i915_vma_instance(obj, &ppgtt->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto out_put; - } - - err = i915_vma_pin(vma, 0, 0, flags); - if (err) - goto out_close; - - /* - * Make sure we don't end up with something like where the pde is still - * pointing to the 2M page, and the pt we just filled-in is dangling -- - * we can check this by writing to the first page where it would then - * land in the now stale 2M page. - */ - - err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf); - if (err) - goto out_unpin; - - err = cpu_check(obj, 0, 0xdeadbeaf); - -out_unpin: - i915_vma_unpin(vma); -out_close: - i915_vma_close(vma); -out_put: - i915_gem_object_put(obj); - - return err; -} - -static int igt_tmpfs_fallback(void *arg) -{ - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; - struct vfsmount *gemfs = i915->mm.gemfs; - struct i915_address_space *vm = - ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - u32 *vaddr; - int err = 0; - - /* - * Make sure that we don't burst into a ball of flames upon falling back - * to tmpfs, which we rely on if on the off-chance we encouter a failure - * when setting up gemfs. - */ - - i915->mm.gemfs = NULL; - - obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto out_restore; - } - - vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto out_put; - } - *vaddr = 0xdeadbeaf; - - __i915_gem_object_flush_map(obj, 0, 64); - i915_gem_object_unpin_map(obj); - - vma = i915_vma_instance(obj, vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto out_put; - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - goto out_close; - - err = igt_check_page_sizes(vma); - - i915_vma_unpin(vma); -out_close: - i915_vma_close(vma); -out_put: - i915_gem_object_put(obj); -out_restore: - i915->mm.gemfs = gemfs; - - return err; -} - -static int igt_shrink_thp(void *arg) -{ - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; - struct i915_address_space *vm = - ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - unsigned int flags = PIN_USER; - int err; - - /* - * Sanity check shrinking huge-paged object -- make sure nothing blows - * up. - */ - - if (!igt_can_allocate_thp(i915)) { - pr_info("missing THP support, skipping\n"); - return 0; - } - - obj = i915_gem_object_create_shmem(i915, SZ_2M); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - vma = i915_vma_instance(obj, vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto out_put; - } - - err = i915_vma_pin(vma, 0, 0, flags); - if (err) - goto out_close; - - if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) { - pr_info("failed to allocate THP, finishing test early\n"); - goto out_unpin; - } - - err = igt_check_page_sizes(vma); - if (err) - goto out_unpin; - - err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf); - if (err) - goto out_unpin; - - i915_vma_unpin(vma); - - /* - * Now that the pages are *unpinned* shrink-all should invoke - * shmem to truncate our pages. - */ - i915_gem_shrink_all(i915); - if (i915_gem_object_has_pages(obj)) { - pr_err("shrink-all didn't truncate the pages\n"); - err = -EINVAL; - goto out_close; - } - - if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) { - pr_err("residual page-size bits left\n"); - err = -EINVAL; - goto out_close; - } - - err = i915_vma_pin(vma, 0, 0, flags); - if (err) - goto out_close; - - err = cpu_check(obj, 0, 0xdeadbeaf); - -out_unpin: - i915_vma_unpin(vma); -out_close: - i915_vma_close(vma); -out_put: - i915_gem_object_put(obj); - - return err; -} - -int i915_gem_huge_page_mock_selftests(void) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_mock_exhaust_device_supported_pages), - SUBTEST(igt_mock_ppgtt_misaligned_dma), - SUBTEST(igt_mock_ppgtt_huge_fill), - SUBTEST(igt_mock_ppgtt_64K), - }; - struct drm_i915_private *dev_priv; - struct i915_hw_ppgtt *ppgtt; - int err; - - dev_priv = mock_gem_device(); - if (!dev_priv) - return -ENOMEM; - - /* Pretend to be a device which supports the 48b PPGTT */ - mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; - mkwrite_device_info(dev_priv)->ppgtt_size = 48; - - mutex_lock(&dev_priv->drm.struct_mutex); - ppgtt = i915_ppgtt_create(dev_priv); - if (IS_ERR(ppgtt)) { - err = PTR_ERR(ppgtt); - goto out_unlock; - } - - if (!i915_vm_is_4lvl(&ppgtt->vm)) { - pr_err("failed to create 48b PPGTT\n"); - err = -EINVAL; - goto out_close; - } - - /* If we were ever hit this then it's time to mock the 64K scratch */ - if (!i915_vm_has_scratch_64K(&ppgtt->vm)) { - pr_err("PPGTT missing 64K scratch page\n"); - err = -EINVAL; - goto out_close; - } - - err = i915_subtests(tests, ppgtt); - -out_close: - i915_ppgtt_put(ppgtt); - -out_unlock: - mutex_unlock(&dev_priv->drm.struct_mutex); - drm_dev_put(&dev_priv->drm); - - return err; -} - -int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_shrink_thp), - SUBTEST(igt_ppgtt_pin_update), - SUBTEST(igt_tmpfs_fallback), - SUBTEST(igt_ppgtt_exhaust_huge), - SUBTEST(igt_ppgtt_gemfs_huge), - SUBTEST(igt_ppgtt_internal_huge), - }; - struct drm_file *file; - struct i915_gem_context *ctx; - intel_wakeref_t wakeref; - int err; - - if (!HAS_PPGTT(dev_priv)) { - pr_info("PPGTT not supported, skipping live-selftests\n"); - return 0; - } - - if (i915_terminally_wedged(dev_priv)) - return 0; - - file = mock_file(dev_priv); - if (IS_ERR(file)) - return PTR_ERR(file); - - mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(dev_priv); - - ctx = live_context(dev_priv, file); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out_unlock; - } - - if (ctx->ppgtt) - ctx->ppgtt->vm.scrub_64K = true; - - err = i915_subtests(tests, ctx); - -out_unlock: - intel_runtime_pm_put(dev_priv, wakeref); - mutex_unlock(&dev_priv->drm.struct_mutex); - - mock_file_free(dev_priv, file); - - return err; -} diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index eee838dc0634..cc1ca4be1a00 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -4,7 +4,9 @@ * Copyright © 2018 Intel Corporation */ -#include "../i915_selftest.h" +#include "gem/i915_gem_pm.h" + +#include "i915_selftest.h" #include "igt_flush_test.h" #include "lib_sw_fence.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index c6a9bff85311..83643929416c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -6,11 +6,13 @@ #include -#include "../i915_selftest.h" +#include "gem/selftests/igt_gem_utils.h" +#include "gem/selftests/mock_context.h" + +#include "i915_selftest.h" -#include "igt_gem_utils.h" #include "igt_flush_test.h" -#include "mock_context.h" +#include "mock_drm.h" static int switch_to_context(struct drm_i915_private *i915, struct i915_gem_context *ctx) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c deleted file mode 100644 index cb25b5fc8027..000000000000 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ /dev/null @@ -1,397 +0,0 @@ -/* - * Copyright © 2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include - -#include "../i915_selftest.h" -#include "i915_random.h" - -static int cpu_set(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 v) -{ - unsigned int needs_clflush; - struct page *page; - void *map; - u32 *cpu; - int err; - - err = i915_gem_object_prepare_write(obj, &needs_clflush); - if (err) - return err; - - page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); - map = kmap_atomic(page); - cpu = map + offset_in_page(offset); - - if (needs_clflush & CLFLUSH_BEFORE) - drm_clflush_virt_range(cpu, sizeof(*cpu)); - - *cpu = v; - - if (needs_clflush & CLFLUSH_AFTER) - drm_clflush_virt_range(cpu, sizeof(*cpu)); - - kunmap_atomic(map); - i915_gem_object_finish_access(obj); - - return 0; -} - -static int cpu_get(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 *v) -{ - unsigned int needs_clflush; - struct page *page; - void *map; - u32 *cpu; - int err; - - err = i915_gem_object_prepare_read(obj, &needs_clflush); - if (err) - return err; - - page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); - map = kmap_atomic(page); - cpu = map + offset_in_page(offset); - - if (needs_clflush & CLFLUSH_BEFORE) - drm_clflush_virt_range(cpu, sizeof(*cpu)); - - *v = *cpu; - - kunmap_atomic(map); - i915_gem_object_finish_access(obj); - - return 0; -} - -static int gtt_set(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 v) -{ - struct i915_vma *vma; - u32 __iomem *map; - int err; - - err = i915_gem_object_set_to_gtt_domain(obj, true); - if (err) - return err; - - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - map = i915_vma_pin_iomap(vma); - i915_vma_unpin(vma); - if (IS_ERR(map)) - return PTR_ERR(map); - - iowrite32(v, &map[offset / sizeof(*map)]); - i915_vma_unpin_iomap(vma); - - return 0; -} - -static int gtt_get(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 *v) -{ - struct i915_vma *vma; - u32 __iomem *map; - int err; - - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - return err; - - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - map = i915_vma_pin_iomap(vma); - i915_vma_unpin(vma); - if (IS_ERR(map)) - return PTR_ERR(map); - - *v = ioread32(&map[offset / sizeof(*map)]); - i915_vma_unpin_iomap(vma); - - return 0; -} - -static int wc_set(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 v) -{ - u32 *map; - int err; - - err = i915_gem_object_set_to_wc_domain(obj, true); - if (err) - return err; - - map = i915_gem_object_pin_map(obj, I915_MAP_WC); - if (IS_ERR(map)) - return PTR_ERR(map); - - map[offset / sizeof(*map)] = v; - i915_gem_object_unpin_map(obj); - - return 0; -} - -static int wc_get(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 *v) -{ - u32 *map; - int err; - - err = i915_gem_object_set_to_wc_domain(obj, false); - if (err) - return err; - - map = i915_gem_object_pin_map(obj, I915_MAP_WC); - if (IS_ERR(map)) - return PTR_ERR(map); - - *v = map[offset / sizeof(*map)]; - i915_gem_object_unpin_map(obj); - - return 0; -} - -static int gpu_set(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 v) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_request *rq; - struct i915_vma *vma; - u32 *cs; - int err; - - err = i915_gem_object_set_to_gtt_domain(obj, true); - if (err) - return err; - - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - rq = i915_request_create(i915->engine[RCS0]->kernel_context); - if (IS_ERR(rq)) { - i915_vma_unpin(vma); - return PTR_ERR(rq); - } - - cs = intel_ring_begin(rq, 4); - if (IS_ERR(cs)) { - i915_request_add(rq); - i915_vma_unpin(vma); - return PTR_ERR(cs); - } - - if (INTEL_GEN(i915) >= 8) { - *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22; - *cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset); - *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset); - *cs++ = v; - } else if (INTEL_GEN(i915) >= 4) { - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = 0; - *cs++ = i915_ggtt_offset(vma) + offset; - *cs++ = v; - } else { - *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; - *cs++ = i915_ggtt_offset(vma) + offset; - *cs++ = v; - *cs++ = MI_NOOP; - } - intel_ring_advance(rq, cs); - - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - i915_vma_unpin(vma); - - i915_request_add(rq); - - return err; -} - -static bool always_valid(struct drm_i915_private *i915) -{ - return true; -} - -static bool needs_fence_registers(struct drm_i915_private *i915) -{ - return !i915_terminally_wedged(i915); -} - -static bool needs_mi_store_dword(struct drm_i915_private *i915) -{ - if (i915_terminally_wedged(i915)) - return false; - - return intel_engine_can_store_dword(i915->engine[RCS0]); -} - -static const struct igt_coherency_mode { - const char *name; - int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v); - int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v); - bool (*valid)(struct drm_i915_private *i915); -} igt_coherency_mode[] = { - { "cpu", cpu_set, cpu_get, always_valid }, - { "gtt", gtt_set, gtt_get, needs_fence_registers }, - { "wc", wc_set, wc_get, always_valid }, - { "gpu", gpu_set, NULL, needs_mi_store_dword }, - { }, -}; - -static int igt_gem_coherency(void *arg) -{ - const unsigned int ncachelines = PAGE_SIZE/64; - I915_RND_STATE(prng); - struct drm_i915_private *i915 = arg; - const struct igt_coherency_mode *read, *write, *over; - struct drm_i915_gem_object *obj; - intel_wakeref_t wakeref; - unsigned long count, n; - u32 *offsets, *values; - int err = 0; - - /* We repeatedly write, overwrite and read from a sequence of - * cachelines in order to try and detect incoherency (unflushed writes - * from either the CPU or GPU). Each setter/getter uses our cache - * domain API which should prevent incoherency. - */ - - offsets = kmalloc_array(ncachelines, 2*sizeof(u32), GFP_KERNEL); - if (!offsets) - return -ENOMEM; - for (count = 0; count < ncachelines; count++) - offsets[count] = count * 64 + 4 * (count % 16); - - values = offsets + ncachelines; - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - for (over = igt_coherency_mode; over->name; over++) { - if (!over->set) - continue; - - if (!over->valid(i915)) - continue; - - for (write = igt_coherency_mode; write->name; write++) { - if (!write->set) - continue; - - if (!write->valid(i915)) - continue; - - for (read = igt_coherency_mode; read->name; read++) { - if (!read->get) - continue; - - if (!read->valid(i915)) - continue; - - for_each_prime_number_from(count, 1, ncachelines) { - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto unlock; - } - - i915_random_reorder(offsets, ncachelines, &prng); - for (n = 0; n < count; n++) - values[n] = prandom_u32_state(&prng); - - for (n = 0; n < count; n++) { - err = over->set(obj, offsets[n], ~values[n]); - if (err) { - pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n", - n, count, over->name, err); - goto put_object; - } - } - - for (n = 0; n < count; n++) { - err = write->set(obj, offsets[n], values[n]); - if (err) { - pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n", - n, count, write->name, err); - goto put_object; - } - } - - for (n = 0; n < count; n++) { - u32 found; - - err = read->get(obj, offsets[n], &found); - if (err) { - pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n", - n, count, read->name, err); - goto put_object; - } - - if (found != values[n]) { - pr_err("Value[%ld/%ld] mismatch, (overwrite with %s) wrote [%s] %x read [%s] %x (inverse %x), at offset %x\n", - n, count, over->name, - write->name, values[n], - read->name, found, - ~values[n], offsets[n]); - err = -EINVAL; - goto put_object; - } - } - - __i915_gem_object_release_unless_active(obj); - } - } - } - } -unlock: - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - kfree(offsets); - return err; - -put_object: - __i915_gem_object_release_unless_active(obj); - goto unlock; -} - -int i915_gem_coherency_live_selftests(struct drm_i915_private *i915) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_gem_coherency), - }; - - return i915_subtests(tests, i915); -} diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c deleted file mode 100644 index c69c6d9a998b..000000000000 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ /dev/null @@ -1,1752 +0,0 @@ -/* - * Copyright © 2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include - -#include "gt/intel_reset.h" -#include "i915_selftest.h" - -#include "i915_random.h" -#include "igt_flush_test.h" -#include "igt_gem_utils.h" -#include "igt_live_test.h" -#include "igt_reset.h" -#include "igt_spinner.h" - -#include "mock_drm.h" -#include "mock_gem_device.h" -#include "huge_gem_object.h" - -#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32)) - -static int live_nop_switch(void *arg) -{ - const unsigned int nctx = 1024; - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct i915_gem_context **ctx; - enum intel_engine_id id; - intel_wakeref_t wakeref; - struct igt_live_test t; - struct drm_file *file; - unsigned long n; - int err = -ENODEV; - - /* - * Create as many contexts as we can feasibly get away with - * and check we can switch between them rapidly. - * - * Serves as very simple stress test for submission and HW switching - * between contexts. - */ - - if (!DRIVER_CAPS(i915)->has_logical_contexts) - return 0; - - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); - - ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL); - if (!ctx) { - err = -ENOMEM; - goto out_unlock; - } - - for (n = 0; n < nctx; n++) { - ctx[n] = live_context(i915, file); - if (IS_ERR(ctx[n])) { - err = PTR_ERR(ctx[n]); - goto out_unlock; - } - } - - for_each_engine(engine, i915, id) { - struct i915_request *rq; - unsigned long end_time, prime; - ktime_t times[2] = {}; - - times[0] = ktime_get_raw(); - for (n = 0; n < nctx; n++) { - rq = igt_request_alloc(ctx[n], engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out_unlock; - } - i915_request_add(rq); - } - if (i915_request_wait(rq, - I915_WAIT_LOCKED, - HZ / 5) < 0) { - pr_err("Failed to populated %d contexts\n", nctx); - i915_gem_set_wedged(i915); - err = -EIO; - goto out_unlock; - } - - times[1] = ktime_get_raw(); - - pr_info("Populated %d contexts on %s in %lluns\n", - nctx, engine->name, ktime_to_ns(times[1] - times[0])); - - err = igt_live_test_begin(&t, i915, __func__, engine->name); - if (err) - goto out_unlock; - - end_time = jiffies + i915_selftest.timeout_jiffies; - for_each_prime_number_from(prime, 2, 8192) { - times[1] = ktime_get_raw(); - - for (n = 0; n < prime; n++) { - rq = igt_request_alloc(ctx[n % nctx], engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out_unlock; - } - - /* - * This space is left intentionally blank. - * - * We do not actually want to perform any - * action with this request, we just want - * to measure the latency in allocation - * and submission of our breadcrumbs - - * ensuring that the bare request is sufficient - * for the system to work (i.e. proper HEAD - * tracking of the rings, interrupt handling, - * etc). It also gives us the lowest bounds - * for latency. - */ - - i915_request_add(rq); - } - if (i915_request_wait(rq, - I915_WAIT_LOCKED, - HZ / 5) < 0) { - pr_err("Switching between %ld contexts timed out\n", - prime); - i915_gem_set_wedged(i915); - break; - } - - times[1] = ktime_sub(ktime_get_raw(), times[1]); - if (prime == 2) - times[0] = times[1]; - - if (__igt_timeout(end_time, NULL)) - break; - } - - err = igt_live_test_end(&t); - if (err) - goto out_unlock; - - pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n", - engine->name, - ktime_to_ns(times[0]), - prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1)); - } - -out_unlock: - intel_runtime_pm_put(i915, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - mock_file_free(i915, file); - return err; -} - -static struct i915_vma * -gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) -{ - struct drm_i915_gem_object *obj; - const int gen = INTEL_GEN(vma->vm->i915); - unsigned long n, size; - u32 *cmd; - int err; - - size = (4 * count + 1) * sizeof(u32); - size = round_up(size, PAGE_SIZE); - obj = i915_gem_object_create_internal(vma->vm->i915, size); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(cmd)) { - err = PTR_ERR(cmd); - goto err; - } - - GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size); - offset += vma->node.start; - - for (n = 0; n < count; n++) { - if (gen >= 8) { - *cmd++ = MI_STORE_DWORD_IMM_GEN4; - *cmd++ = lower_32_bits(offset); - *cmd++ = upper_32_bits(offset); - *cmd++ = value; - } else if (gen >= 4) { - *cmd++ = MI_STORE_DWORD_IMM_GEN4 | - (gen < 6 ? MI_USE_GGTT : 0); - *cmd++ = 0; - *cmd++ = offset; - *cmd++ = value; - } else { - *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; - *cmd++ = offset; - *cmd++ = value; - } - offset += PAGE_SIZE; - } - *cmd = MI_BATCH_BUFFER_END; - i915_gem_object_flush_map(obj); - i915_gem_object_unpin_map(obj); - - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - goto err; - - vma = i915_vma_instance(obj, vma->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err; - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - goto err; - - return vma; - -err: - i915_gem_object_put(obj); - return ERR_PTR(err); -} - -static unsigned long real_page_count(struct drm_i915_gem_object *obj) -{ - return huge_gem_object_phys_size(obj) >> PAGE_SHIFT; -} - -static unsigned long fake_page_count(struct drm_i915_gem_object *obj) -{ - return huge_gem_object_dma_size(obj) >> PAGE_SHIFT; -} - -static int gpu_fill(struct drm_i915_gem_object *obj, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - unsigned int dw) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_address_space *vm = - ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; - struct i915_request *rq; - struct i915_vma *vma; - struct i915_vma *batch; - unsigned int flags; - int err; - - GEM_BUG_ON(obj->base.size > vm->total); - GEM_BUG_ON(!intel_engine_can_store_dword(engine)); - - vma = i915_vma_instance(obj, vm, NULL); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - return err; - - err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER); - if (err) - return err; - - /* Within the GTT the huge objects maps every page onto - * its 1024 real pages (using phys_pfn = dma_pfn % 1024). - * We set the nth dword within the page using the nth - * mapping via the GTT - this should exercise the GTT mapping - * whilst checking that each context provides a unique view - * into the object. - */ - batch = gpu_fill_dw(vma, - (dw * real_page_count(obj)) << PAGE_SHIFT | - (dw * sizeof(u32)), - real_page_count(obj), - dw); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto err_vma; - } - - rq = igt_request_alloc(ctx, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_batch; - } - - flags = 0; - if (INTEL_GEN(vm->i915) <= 5) - flags |= I915_DISPATCH_SECURE; - - err = engine->emit_bb_start(rq, - batch->node.start, batch->node.size, - flags); - if (err) - goto err_request; - - err = i915_vma_move_to_active(batch, rq, 0); - if (err) - goto skip_request; - - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - if (err) - goto skip_request; - - i915_gem_object_set_active_reference(batch->obj); - i915_vma_unpin(batch); - i915_vma_close(batch); - - i915_vma_unpin(vma); - - i915_request_add(rq); - - return 0; - -skip_request: - i915_request_skip(rq, err); -err_request: - i915_request_add(rq); -err_batch: - i915_vma_unpin(batch); - i915_vma_put(batch); -err_vma: - i915_vma_unpin(vma); - return err; -} - -static int cpu_fill(struct drm_i915_gem_object *obj, u32 value) -{ - const bool has_llc = HAS_LLC(to_i915(obj->base.dev)); - unsigned int n, m, need_flush; - int err; - - err = i915_gem_object_prepare_write(obj, &need_flush); - if (err) - return err; - - for (n = 0; n < real_page_count(obj); n++) { - u32 *map; - - map = kmap_atomic(i915_gem_object_get_page(obj, n)); - for (m = 0; m < DW_PER_PAGE; m++) - map[m] = value; - if (!has_llc) - drm_clflush_virt_range(map, PAGE_SIZE); - kunmap_atomic(map); - } - - i915_gem_object_finish_access(obj); - obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU; - obj->write_domain = 0; - return 0; -} - -static noinline int cpu_check(struct drm_i915_gem_object *obj, - unsigned int idx, unsigned int max) -{ - unsigned int n, m, needs_flush; - int err; - - err = i915_gem_object_prepare_read(obj, &needs_flush); - if (err) - return err; - - for (n = 0; n < real_page_count(obj); n++) { - u32 *map; - - map = kmap_atomic(i915_gem_object_get_page(obj, n)); - if (needs_flush & CLFLUSH_BEFORE) - drm_clflush_virt_range(map, PAGE_SIZE); - - for (m = 0; m < max; m++) { - if (map[m] != m) { - pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n", - __builtin_return_address(0), idx, - n, real_page_count(obj), m, max, - map[m], m); - err = -EINVAL; - goto out_unmap; - } - } - - for (; m < DW_PER_PAGE; m++) { - if (map[m] != STACK_MAGIC) { - pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n", - __builtin_return_address(0), idx, n, m, - map[m], STACK_MAGIC); - err = -EINVAL; - goto out_unmap; - } - } - -out_unmap: - kunmap_atomic(map); - if (err) - break; - } - - i915_gem_object_finish_access(obj); - return err; -} - -static int file_add_object(struct drm_file *file, - struct drm_i915_gem_object *obj) -{ - int err; - - GEM_BUG_ON(obj->base.handle_count); - - /* tie the object to the drm_file for easy reaping */ - err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL); - if (err < 0) - return err; - - i915_gem_object_get(obj); - obj->base.handle_count++; - return 0; -} - -static struct drm_i915_gem_object * -create_test_object(struct i915_gem_context *ctx, - struct drm_file *file, - struct list_head *objects) -{ - struct drm_i915_gem_object *obj; - struct i915_address_space *vm = - ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm; - u64 size; - int err; - - size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE); - size = round_down(size, DW_PER_PAGE * PAGE_SIZE); - - obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size); - if (IS_ERR(obj)) - return obj; - - err = file_add_object(file, obj); - i915_gem_object_put(obj); - if (err) - return ERR_PTR(err); - - err = cpu_fill(obj, STACK_MAGIC); - if (err) { - pr_err("Failed to fill object with cpu, err=%d\n", - err); - return ERR_PTR(err); - } - - list_add_tail(&obj->st_link, objects); - return obj; -} - -static unsigned long max_dwords(struct drm_i915_gem_object *obj) -{ - unsigned long npages = fake_page_count(obj); - - GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE)); - return npages / DW_PER_PAGE; -} - -static int igt_ctx_exec(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err = -ENODEV; - - /* - * Create a few different contexts (with different mm) and write - * through each ctx/mm using the GPU making sure those writes end - * up in the expected pages of our obj. - */ - - if (!DRIVER_CAPS(i915)->has_logical_contexts) - return 0; - - for_each_engine(engine, i915, id) { - struct drm_i915_gem_object *obj = NULL; - unsigned long ncontexts, ndwords, dw; - struct igt_live_test t; - struct drm_file *file; - IGT_TIMEOUT(end_time); - LIST_HEAD(objects); - - if (!intel_engine_can_store_dword(engine)) - continue; - - if (!engine->context_size) - continue; /* No logical context support in HW */ - - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - mutex_lock(&i915->drm.struct_mutex); - - err = igt_live_test_begin(&t, i915, __func__, engine->name); - if (err) - goto out_unlock; - - ncontexts = 0; - ndwords = 0; - dw = 0; - while (!time_after(jiffies, end_time)) { - struct i915_gem_context *ctx; - intel_wakeref_t wakeref; - - ctx = live_context(i915, file); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out_unlock; - } - - if (!obj) { - obj = create_test_object(ctx, file, &objects); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto out_unlock; - } - } - - with_intel_runtime_pm(i915, wakeref) - err = gpu_fill(obj, ctx, engine, dw); - if (err) { - pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", - ndwords, dw, max_dwords(obj), - engine->name, ctx->hw_id, - yesno(!!ctx->ppgtt), err); - goto out_unlock; - } - - if (++dw == max_dwords(obj)) { - obj = NULL; - dw = 0; - } - - ndwords++; - ncontexts++; - } - - pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", - ncontexts, engine->name, ndwords); - - ncontexts = dw = 0; - list_for_each_entry(obj, &objects, st_link) { - unsigned int rem = - min_t(unsigned int, ndwords - dw, max_dwords(obj)); - - err = cpu_check(obj, ncontexts++, rem); - if (err) - break; - - dw += rem; - } - -out_unlock: - if (igt_live_test_end(&t)) - err = -EIO; - mutex_unlock(&i915->drm.struct_mutex); - - mock_file_free(i915, file); - if (err) - return err; - } - - return 0; -} - -static int igt_shared_ctx_exec(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_gem_context *parent; - struct intel_engine_cs *engine; - enum intel_engine_id id; - struct igt_live_test t; - struct drm_file *file; - int err = 0; - - /* - * Create a few different contexts with the same mm and write - * through each ctx using the GPU making sure those writes end - * up in the expected pages of our obj. - */ - if (!DRIVER_CAPS(i915)->has_logical_contexts) - return 0; - - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - mutex_lock(&i915->drm.struct_mutex); - - parent = live_context(i915, file); - if (IS_ERR(parent)) { - err = PTR_ERR(parent); - goto out_unlock; - } - - if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */ - err = 0; - goto out_unlock; - } - - err = igt_live_test_begin(&t, i915, __func__, ""); - if (err) - goto out_unlock; - - for_each_engine(engine, i915, id) { - unsigned long ncontexts, ndwords, dw; - struct drm_i915_gem_object *obj = NULL; - IGT_TIMEOUT(end_time); - LIST_HEAD(objects); - - if (!intel_engine_can_store_dword(engine)) - continue; - - dw = 0; - ndwords = 0; - ncontexts = 0; - while (!time_after(jiffies, end_time)) { - struct i915_gem_context *ctx; - intel_wakeref_t wakeref; - - ctx = kernel_context(i915); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out_test; - } - - __assign_ppgtt(ctx, parent->ppgtt); - - if (!obj) { - obj = create_test_object(parent, file, &objects); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - kernel_context_close(ctx); - goto out_test; - } - } - - err = 0; - with_intel_runtime_pm(i915, wakeref) - err = gpu_fill(obj, ctx, engine, dw); - if (err) { - pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", - ndwords, dw, max_dwords(obj), - engine->name, ctx->hw_id, - yesno(!!ctx->ppgtt), err); - kernel_context_close(ctx); - goto out_test; - } - - if (++dw == max_dwords(obj)) { - obj = NULL; - dw = 0; - } - - ndwords++; - ncontexts++; - - kernel_context_close(ctx); - } - pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", - ncontexts, engine->name, ndwords); - - ncontexts = dw = 0; - list_for_each_entry(obj, &objects, st_link) { - unsigned int rem = - min_t(unsigned int, ndwords - dw, max_dwords(obj)); - - err = cpu_check(obj, ncontexts++, rem); - if (err) - goto out_test; - - dw += rem; - } - } -out_test: - if (igt_live_test_end(&t)) - err = -EIO; -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); - - mock_file_free(i915, file); - return err; -} - -static struct i915_vma *rpcs_query_batch(struct i915_vma *vma) -{ - struct drm_i915_gem_object *obj; - u32 *cmd; - int err; - - if (INTEL_GEN(vma->vm->i915) < 8) - return ERR_PTR(-EINVAL); - - obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(cmd)) { - err = PTR_ERR(cmd); - goto err; - } - - *cmd++ = MI_STORE_REGISTER_MEM_GEN8; - *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE); - *cmd++ = lower_32_bits(vma->node.start); - *cmd++ = upper_32_bits(vma->node.start); - *cmd = MI_BATCH_BUFFER_END; - - __i915_gem_object_flush_map(obj, 0, 64); - i915_gem_object_unpin_map(obj); - - vma = i915_vma_instance(obj, vma->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err; - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - goto err; - - return vma; - -err: - i915_gem_object_put(obj); - return ERR_PTR(err); -} - -static int -emit_rpcs_query(struct drm_i915_gem_object *obj, - struct intel_context *ce, - struct i915_request **rq_out) -{ - struct i915_request *rq; - struct i915_vma *batch; - struct i915_vma *vma; - int err; - - GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); - - vma = i915_vma_instance(obj, &ce->gem_context->ppgtt->vm, NULL); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - return err; - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return err; - - batch = rpcs_query_batch(vma); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto err_vma; - } - - rq = i915_request_create(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_batch; - } - - err = rq->engine->emit_bb_start(rq, - batch->node.start, batch->node.size, - 0); - if (err) - goto err_request; - - err = i915_vma_move_to_active(batch, rq, 0); - if (err) - goto skip_request; - - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - if (err) - goto skip_request; - - i915_gem_object_set_active_reference(batch->obj); - i915_vma_unpin(batch); - i915_vma_close(batch); - - i915_vma_unpin(vma); - - *rq_out = i915_request_get(rq); - - i915_request_add(rq); - - return 0; - -skip_request: - i915_request_skip(rq, err); -err_request: - i915_request_add(rq); -err_batch: - i915_vma_unpin(batch); -err_vma: - i915_vma_unpin(vma); - - return err; -} - -#define TEST_IDLE BIT(0) -#define TEST_BUSY BIT(1) -#define TEST_RESET BIT(2) - -static int -__sseu_prepare(struct drm_i915_private *i915, - const char *name, - unsigned int flags, - struct intel_context *ce, - struct igt_spinner **spin) -{ - struct i915_request *rq; - int ret; - - *spin = NULL; - if (!(flags & (TEST_BUSY | TEST_RESET))) - return 0; - - *spin = kzalloc(sizeof(**spin), GFP_KERNEL); - if (!*spin) - return -ENOMEM; - - ret = igt_spinner_init(*spin, i915); - if (ret) - goto err_free; - - rq = igt_spinner_create_request(*spin, - ce->gem_context, - ce->engine, - MI_NOOP); - if (IS_ERR(rq)) { - ret = PTR_ERR(rq); - goto err_fini; - } - - i915_request_add(rq); - - if (!igt_wait_for_spinner(*spin, rq)) { - pr_err("%s: Spinner failed to start!\n", name); - ret = -ETIMEDOUT; - goto err_end; - } - - return 0; - -err_end: - igt_spinner_end(*spin); -err_fini: - igt_spinner_fini(*spin); -err_free: - kfree(fetch_and_zero(spin)); - return ret; -} - -static int -__read_slice_count(struct drm_i915_private *i915, - struct intel_context *ce, - struct drm_i915_gem_object *obj, - struct igt_spinner *spin, - u32 *rpcs) -{ - struct i915_request *rq = NULL; - u32 s_mask, s_shift; - unsigned int cnt; - u32 *buf, val; - long ret; - - ret = emit_rpcs_query(obj, ce, &rq); - if (ret) - return ret; - - if (spin) - igt_spinner_end(spin); - - ret = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT); - i915_request_put(rq); - if (ret < 0) - return ret; - - buf = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(buf)) { - ret = PTR_ERR(buf); - return ret; - } - - if (INTEL_GEN(i915) >= 11) { - s_mask = GEN11_RPCS_S_CNT_MASK; - s_shift = GEN11_RPCS_S_CNT_SHIFT; - } else { - s_mask = GEN8_RPCS_S_CNT_MASK; - s_shift = GEN8_RPCS_S_CNT_SHIFT; - } - - val = *buf; - cnt = (val & s_mask) >> s_shift; - *rpcs = val; - - i915_gem_object_unpin_map(obj); - - return cnt; -} - -static int -__check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected, - const char *prefix, const char *suffix) -{ - if (slices == expected) - return 0; - - if (slices < 0) { - pr_err("%s: %s read slice count failed with %d%s\n", - name, prefix, slices, suffix); - return slices; - } - - pr_err("%s: %s slice count %d is not %u%s\n", - name, prefix, slices, expected, suffix); - - pr_info("RPCS=0x%x; %u%sx%u%s\n", - rpcs, slices, - (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "", - (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT, - (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : ""); - - return -EINVAL; -} - -static int -__sseu_finish(struct drm_i915_private *i915, - const char *name, - unsigned int flags, - struct intel_context *ce, - struct drm_i915_gem_object *obj, - unsigned int expected, - struct igt_spinner *spin) -{ - unsigned int slices = hweight32(ce->engine->sseu.slice_mask); - u32 rpcs = 0; - int ret = 0; - - if (flags & TEST_RESET) { - ret = i915_reset_engine(ce->engine, "sseu"); - if (ret) - goto out; - } - - ret = __read_slice_count(i915, ce, obj, - flags & TEST_RESET ? NULL : spin, &rpcs); - ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!"); - if (ret) - goto out; - - ret = __read_slice_count(i915, ce->engine->kernel_context, obj, - NULL, &rpcs); - ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!"); - -out: - if (spin) - igt_spinner_end(spin); - - if ((flags & TEST_IDLE) && ret == 0) { - ret = i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (ret) - return ret; - - ret = __read_slice_count(i915, ce, obj, NULL, &rpcs); - ret = __check_rpcs(name, rpcs, ret, expected, - "Context", " after idle!"); - } - - return ret; -} - -static int -__sseu_test(struct drm_i915_private *i915, - const char *name, - unsigned int flags, - struct intel_context *ce, - struct drm_i915_gem_object *obj, - struct intel_sseu sseu) -{ - struct igt_spinner *spin = NULL; - int ret; - - ret = __sseu_prepare(i915, name, flags, ce, &spin); - if (ret) - return ret; - - ret = __intel_context_reconfigure_sseu(ce, sseu); - if (ret) - goto out_spin; - - ret = __sseu_finish(i915, name, flags, ce, obj, - hweight32(sseu.slice_mask), spin); - -out_spin: - if (spin) { - igt_spinner_end(spin); - igt_spinner_fini(spin); - kfree(spin); - } - return ret; -} - -static int -__igt_ctx_sseu(struct drm_i915_private *i915, - const char *name, - unsigned int flags) -{ - struct intel_engine_cs *engine = i915->engine[RCS0]; - struct intel_sseu default_sseu = engine->sseu; - struct drm_i915_gem_object *obj; - struct i915_gem_context *ctx; - struct intel_context *ce; - struct intel_sseu pg_sseu; - intel_wakeref_t wakeref; - struct drm_file *file; - int ret; - - if (INTEL_GEN(i915) < 9) - return 0; - - if (!RUNTIME_INFO(i915)->sseu.has_slice_pg) - return 0; - - if (hweight32(default_sseu.slice_mask) < 2) - return 0; - - /* - * Gen11 VME friendly power-gated configuration with half enabled - * sub-slices. - */ - pg_sseu = default_sseu; - pg_sseu.slice_mask = 1; - pg_sseu.subslice_mask = - ~(~0 << (hweight32(default_sseu.subslice_mask) / 2)); - - pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n", - name, flags, hweight32(default_sseu.slice_mask), - hweight32(pg_sseu.slice_mask)); - - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - if (flags & TEST_RESET) - igt_global_reset_lock(i915); - - mutex_lock(&i915->drm.struct_mutex); - - ctx = live_context(i915, file); - if (IS_ERR(ctx)) { - ret = PTR_ERR(ctx); - goto out_unlock; - } - i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */ - - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - ret = PTR_ERR(obj); - goto out_unlock; - } - - wakeref = intel_runtime_pm_get(i915); - - ce = i915_gem_context_get_engine(ctx, RCS0); - if (IS_ERR(ce)) { - ret = PTR_ERR(ce); - goto out_rpm; - } - - ret = intel_context_pin(ce); - if (ret) - goto out_context; - - /* First set the default mask. */ - ret = __sseu_test(i915, name, flags, ce, obj, default_sseu); - if (ret) - goto out_fail; - - /* Then set a power-gated configuration. */ - ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu); - if (ret) - goto out_fail; - - /* Back to defaults. */ - ret = __sseu_test(i915, name, flags, ce, obj, default_sseu); - if (ret) - goto out_fail; - - /* One last power-gated configuration for the road. */ - ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu); - if (ret) - goto out_fail; - -out_fail: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - ret = -EIO; - - intel_context_unpin(ce); -out_context: - intel_context_put(ce); -out_rpm: - intel_runtime_pm_put(i915, wakeref); - i915_gem_object_put(obj); - -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); - - if (flags & TEST_RESET) - igt_global_reset_unlock(i915); - - mock_file_free(i915, file); - - if (ret) - pr_err("%s: Failed with %d!\n", name, ret); - - return ret; -} - -static int igt_ctx_sseu(void *arg) -{ - struct { - const char *name; - unsigned int flags; - } *phase, phases[] = { - { .name = "basic", .flags = 0 }, - { .name = "idle", .flags = TEST_IDLE }, - { .name = "busy", .flags = TEST_BUSY }, - { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET }, - { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE }, - { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE }, - }; - unsigned int i; - int ret = 0; - - for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases); - i++, phase++) - ret = __igt_ctx_sseu(arg, phase->name, phase->flags); - - return ret; -} - -static int igt_ctx_readonly(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj = NULL; - struct i915_gem_context *ctx; - struct i915_hw_ppgtt *ppgtt; - unsigned long idx, ndwords, dw; - struct igt_live_test t; - struct drm_file *file; - I915_RND_STATE(prng); - IGT_TIMEOUT(end_time); - LIST_HEAD(objects); - int err = -ENODEV; - - /* - * Create a few read-only objects (with the occasional writable object) - * and try to write into these object checking that the GPU discards - * any write to a read-only object. - */ - - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - mutex_lock(&i915->drm.struct_mutex); - - err = igt_live_test_begin(&t, i915, __func__, ""); - if (err) - goto out_unlock; - - ctx = live_context(i915, file); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out_unlock; - } - - ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt; - if (!ppgtt || !ppgtt->vm.has_read_only) { - err = 0; - goto out_unlock; - } - - ndwords = 0; - dw = 0; - while (!time_after(jiffies, end_time)) { - struct intel_engine_cs *engine; - unsigned int id; - - for_each_engine(engine, i915, id) { - intel_wakeref_t wakeref; - - if (!intel_engine_can_store_dword(engine)) - continue; - - if (!obj) { - obj = create_test_object(ctx, file, &objects); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto out_unlock; - } - - if (prandom_u32_state(&prng) & 1) - i915_gem_object_set_readonly(obj); - } - - err = 0; - with_intel_runtime_pm(i915, wakeref) - err = gpu_fill(obj, ctx, engine, dw); - if (err) { - pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", - ndwords, dw, max_dwords(obj), - engine->name, ctx->hw_id, - yesno(!!ctx->ppgtt), err); - goto out_unlock; - } - - if (++dw == max_dwords(obj)) { - obj = NULL; - dw = 0; - } - ndwords++; - } - } - pr_info("Submitted %lu dwords (across %u engines)\n", - ndwords, RUNTIME_INFO(i915)->num_engines); - - dw = 0; - idx = 0; - list_for_each_entry(obj, &objects, st_link) { - unsigned int rem = - min_t(unsigned int, ndwords - dw, max_dwords(obj)); - unsigned int num_writes; - - num_writes = rem; - if (i915_gem_object_is_readonly(obj)) - num_writes = 0; - - err = cpu_check(obj, idx++, num_writes); - if (err) - break; - - dw += rem; - } - -out_unlock: - if (igt_live_test_end(&t)) - err = -EIO; - mutex_unlock(&i915->drm.struct_mutex); - - mock_file_free(i915, file); - return err; -} - -static int check_scratch(struct i915_gem_context *ctx, u64 offset) -{ - struct drm_mm_node *node = - __drm_mm_interval_first(&ctx->ppgtt->vm.mm, - offset, offset + sizeof(u32) - 1); - if (!node || node->start > offset) - return 0; - - GEM_BUG_ON(offset >= node->start + node->size); - - pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n", - upper_32_bits(offset), lower_32_bits(offset)); - return -EINVAL; -} - -static int write_to_scratch(struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - u64 offset, u32 value) -{ - struct drm_i915_private *i915 = ctx->i915; - struct drm_i915_gem_object *obj; - struct i915_request *rq; - struct i915_vma *vma; - u32 *cmd; - int err; - - GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); - - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(cmd)) { - err = PTR_ERR(cmd); - goto err; - } - - *cmd++ = MI_STORE_DWORD_IMM_GEN4; - if (INTEL_GEN(i915) >= 8) { - *cmd++ = lower_32_bits(offset); - *cmd++ = upper_32_bits(offset); - } else { - *cmd++ = 0; - *cmd++ = offset; - } - *cmd++ = value; - *cmd = MI_BATCH_BUFFER_END; - __i915_gem_object_flush_map(obj, 0, 64); - i915_gem_object_unpin_map(obj); - - vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err; - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); - if (err) - goto err; - - err = check_scratch(ctx, offset); - if (err) - goto err_unpin; - - rq = igt_request_alloc(ctx, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_unpin; - } - - err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0); - if (err) - goto err_request; - - err = i915_vma_move_to_active(vma, rq, 0); - if (err) - goto skip_request; - - i915_gem_object_set_active_reference(obj); - i915_vma_unpin(vma); - i915_vma_close(vma); - - i915_request_add(rq); - - return 0; - -skip_request: - i915_request_skip(rq, err); -err_request: - i915_request_add(rq); -err_unpin: - i915_vma_unpin(vma); -err: - i915_gem_object_put(obj); - return err; -} - -static int read_from_scratch(struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - u64 offset, u32 *value) -{ - struct drm_i915_private *i915 = ctx->i915; - struct drm_i915_gem_object *obj; - const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */ - const u32 result = 0x100; - struct i915_request *rq; - struct i915_vma *vma; - u32 *cmd; - int err; - - GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); - - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(cmd)) { - err = PTR_ERR(cmd); - goto err; - } - - memset(cmd, POISON_INUSE, PAGE_SIZE); - if (INTEL_GEN(i915) >= 8) { - *cmd++ = MI_LOAD_REGISTER_MEM_GEN8; - *cmd++ = RCS_GPR0; - *cmd++ = lower_32_bits(offset); - *cmd++ = upper_32_bits(offset); - *cmd++ = MI_STORE_REGISTER_MEM_GEN8; - *cmd++ = RCS_GPR0; - *cmd++ = result; - *cmd++ = 0; - } else { - *cmd++ = MI_LOAD_REGISTER_MEM; - *cmd++ = RCS_GPR0; - *cmd++ = offset; - *cmd++ = MI_STORE_REGISTER_MEM; - *cmd++ = RCS_GPR0; - *cmd++ = result; - } - *cmd = MI_BATCH_BUFFER_END; - - i915_gem_object_flush_map(obj); - i915_gem_object_unpin_map(obj); - - vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err; - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); - if (err) - goto err; - - err = check_scratch(ctx, offset); - if (err) - goto err_unpin; - - rq = igt_request_alloc(ctx, engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_unpin; - } - - err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0); - if (err) - goto err_request; - - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - if (err) - goto skip_request; - - i915_vma_unpin(vma); - i915_vma_close(vma); - - i915_request_add(rq); - - err = i915_gem_object_set_to_cpu_domain(obj, false); - if (err) - goto err; - - cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(cmd)) { - err = PTR_ERR(cmd); - goto err; - } - - *value = cmd[result / sizeof(*cmd)]; - i915_gem_object_unpin_map(obj); - i915_gem_object_put(obj); - - return 0; - -skip_request: - i915_request_skip(rq, err); -err_request: - i915_request_add(rq); -err_unpin: - i915_vma_unpin(vma); -err: - i915_gem_object_put(obj); - return err; -} - -static int igt_vm_isolation(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct i915_gem_context *ctx_a, *ctx_b; - struct intel_engine_cs *engine; - intel_wakeref_t wakeref; - struct igt_live_test t; - struct drm_file *file; - I915_RND_STATE(prng); - unsigned long count; - unsigned int id; - u64 vm_total; - int err; - - if (INTEL_GEN(i915) < 7) - return 0; - - /* - * The simple goal here is that a write into one context is not - * observed in a second (separate page tables and scratch). - */ - - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - mutex_lock(&i915->drm.struct_mutex); - - err = igt_live_test_begin(&t, i915, __func__, ""); - if (err) - goto out_unlock; - - ctx_a = live_context(i915, file); - if (IS_ERR(ctx_a)) { - err = PTR_ERR(ctx_a); - goto out_unlock; - } - - ctx_b = live_context(i915, file); - if (IS_ERR(ctx_b)) { - err = PTR_ERR(ctx_b); - goto out_unlock; - } - - /* We can only test vm isolation, if the vm are distinct */ - if (ctx_a->ppgtt == ctx_b->ppgtt) - goto out_unlock; - - vm_total = ctx_a->ppgtt->vm.total; - GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total); - vm_total -= I915_GTT_PAGE_SIZE; - - wakeref = intel_runtime_pm_get(i915); - - count = 0; - for_each_engine(engine, i915, id) { - IGT_TIMEOUT(end_time); - unsigned long this = 0; - - if (!intel_engine_can_store_dword(engine)) - continue; - - while (!__igt_timeout(end_time, NULL)) { - u32 value = 0xc5c5c5c5; - u64 offset; - - div64_u64_rem(i915_prandom_u64_state(&prng), - vm_total, &offset); - offset &= -sizeof(u32); - offset += I915_GTT_PAGE_SIZE; - - err = write_to_scratch(ctx_a, engine, - offset, 0xdeadbeef); - if (err == 0) - err = read_from_scratch(ctx_b, engine, - offset, &value); - if (err) - goto out_rpm; - - if (value) { - pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n", - engine->name, value, - upper_32_bits(offset), - lower_32_bits(offset), - this); - err = -EINVAL; - goto out_rpm; - } - - this++; - } - count += this; - } - pr_info("Checked %lu scratch offsets across %d engines\n", - count, RUNTIME_INFO(i915)->num_engines); - -out_rpm: - intel_runtime_pm_put(i915, wakeref); -out_unlock: - if (igt_live_test_end(&t)) - err = -EIO; - mutex_unlock(&i915->drm.struct_mutex); - - mock_file_free(i915, file); - return err; -} - -static __maybe_unused const char * -__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines) -{ - struct intel_engine_cs *engine; - intel_engine_mask_t tmp; - - if (engines == ALL_ENGINES) - return "all"; - - for_each_engine_masked(engine, i915, engines, tmp) - return engine->name; - - return "none"; -} - -static void mock_barrier_task(void *data) -{ - unsigned int *counter = data; - - ++*counter; -} - -static int mock_context_barrier(void *arg) -{ -#undef pr_fmt -#define pr_fmt(x) "context_barrier_task():" # x - struct drm_i915_private *i915 = arg; - struct i915_gem_context *ctx; - struct i915_request *rq; - unsigned int counter; - int err; - - /* - * The context barrier provides us with a callback after it emits - * a request; useful for retiring old state after loading new. - */ - - mutex_lock(&i915->drm.struct_mutex); - - ctx = mock_context(i915, "mock"); - if (!ctx) { - err = -ENOMEM; - goto unlock; - } - - counter = 0; - err = context_barrier_task(ctx, 0, - NULL, mock_barrier_task, &counter); - if (err) { - pr_err("Failed at line %d, err=%d\n", __LINE__, err); - goto out; - } - if (counter == 0) { - pr_err("Did not retire immediately with 0 engines\n"); - err = -EINVAL; - goto out; - } - - counter = 0; - err = context_barrier_task(ctx, ALL_ENGINES, - NULL, mock_barrier_task, &counter); - if (err) { - pr_err("Failed at line %d, err=%d\n", __LINE__, err); - goto out; - } - if (counter == 0) { - pr_err("Did not retire immediately for all unused engines\n"); - err = -EINVAL; - goto out; - } - - rq = igt_request_alloc(ctx, i915->engine[RCS0]); - if (IS_ERR(rq)) { - pr_err("Request allocation failed!\n"); - goto out; - } - i915_request_add(rq); - - counter = 0; - context_barrier_inject_fault = BIT(RCS0); - err = context_barrier_task(ctx, ALL_ENGINES, - NULL, mock_barrier_task, &counter); - context_barrier_inject_fault = 0; - if (err == -ENXIO) - err = 0; - else - pr_err("Did not hit fault injection!\n"); - if (counter != 0) { - pr_err("Invoked callback on error!\n"); - err = -EIO; - } - if (err) - goto out; - - counter = 0; - err = context_barrier_task(ctx, ALL_ENGINES, - NULL, mock_barrier_task, &counter); - if (err) { - pr_err("Failed at line %d, err=%d\n", __LINE__, err); - goto out; - } - mock_device_flush(i915); - if (counter == 0) { - pr_err("Did not retire on each active engines\n"); - err = -EINVAL; - goto out; - } - -out: - mock_context_close(ctx); -unlock: - mutex_unlock(&i915->drm.struct_mutex); - return err; -#undef pr_fmt -#define pr_fmt(x) x -} - -int i915_gem_context_mock_selftests(void) -{ - static const struct i915_subtest tests[] = { - SUBTEST(mock_context_barrier), - }; - struct drm_i915_private *i915; - int err; - - i915 = mock_gem_device(); - if (!i915) - return -ENOMEM; - - err = i915_subtests(tests, i915); - - drm_dev_put(&i915->drm); - return err; -} - -int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) -{ - static const struct i915_subtest tests[] = { - SUBTEST(live_nop_switch), - SUBTEST(igt_ctx_exec), - SUBTEST(igt_ctx_readonly), - SUBTEST(igt_ctx_sseu), - SUBTEST(igt_shared_ctx_exec), - SUBTEST(igt_vm_isolation), - }; - - if (i915_terminally_wedged(dev_priv)) - return 0; - - return i915_subtests(tests, dev_priv); -} diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c deleted file mode 100644 index cc65a503e2f0..000000000000 --- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c +++ /dev/null @@ -1,404 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "../i915_selftest.h" - -#include "mock_gem_device.h" -#include "mock_dmabuf.h" - -static int igt_dmabuf_export(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj; - struct dma_buf *dmabuf; - - obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0); - i915_gem_object_put(obj); - if (IS_ERR(dmabuf)) { - pr_err("i915_gem_prime_export failed with err=%d\n", - (int)PTR_ERR(dmabuf)); - return PTR_ERR(dmabuf); - } - - dma_buf_put(dmabuf); - return 0; -} - -static int igt_dmabuf_import_self(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj; - struct drm_gem_object *import; - struct dma_buf *dmabuf; - int err; - - obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0); - if (IS_ERR(dmabuf)) { - pr_err("i915_gem_prime_export failed with err=%d\n", - (int)PTR_ERR(dmabuf)); - err = PTR_ERR(dmabuf); - goto out; - } - - import = i915_gem_prime_import(&i915->drm, dmabuf); - if (IS_ERR(import)) { - pr_err("i915_gem_prime_import failed with err=%d\n", - (int)PTR_ERR(import)); - err = PTR_ERR(import); - goto out_dmabuf; - } - - if (import != &obj->base) { - pr_err("i915_gem_prime_import created a new object!\n"); - err = -EINVAL; - goto out_import; - } - - err = 0; -out_import: - i915_gem_object_put(to_intel_bo(import)); -out_dmabuf: - dma_buf_put(dmabuf); -out: - i915_gem_object_put(obj); - return err; -} - -static int igt_dmabuf_import(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj; - struct dma_buf *dmabuf; - void *obj_map, *dma_map; - u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff }; - int err, i; - - dmabuf = mock_dmabuf(1); - if (IS_ERR(dmabuf)) - return PTR_ERR(dmabuf); - - obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf)); - if (IS_ERR(obj)) { - pr_err("i915_gem_prime_import failed with err=%d\n", - (int)PTR_ERR(obj)); - err = PTR_ERR(obj); - goto out_dmabuf; - } - - if (obj->base.dev != &i915->drm) { - pr_err("i915_gem_prime_import created a non-i915 object!\n"); - err = -EINVAL; - goto out_obj; - } - - if (obj->base.size != PAGE_SIZE) { - pr_err("i915_gem_prime_import is wrong size found %lld, expected %ld\n", - (long long)obj->base.size, PAGE_SIZE); - err = -EINVAL; - goto out_obj; - } - - dma_map = dma_buf_vmap(dmabuf); - if (!dma_map) { - pr_err("dma_buf_vmap failed\n"); - err = -ENOMEM; - goto out_obj; - } - - if (0) { /* Can not yet map dmabuf */ - obj_map = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(obj_map)) { - err = PTR_ERR(obj_map); - pr_err("i915_gem_object_pin_map failed with err=%d\n", err); - goto out_dma_map; - } - - for (i = 0; i < ARRAY_SIZE(pattern); i++) { - memset(dma_map, pattern[i], PAGE_SIZE); - if (memchr_inv(obj_map, pattern[i], PAGE_SIZE)) { - err = -EINVAL; - pr_err("imported vmap not all set to %x!\n", pattern[i]); - i915_gem_object_unpin_map(obj); - goto out_dma_map; - } - } - - for (i = 0; i < ARRAY_SIZE(pattern); i++) { - memset(obj_map, pattern[i], PAGE_SIZE); - if (memchr_inv(dma_map, pattern[i], PAGE_SIZE)) { - err = -EINVAL; - pr_err("exported vmap not all set to %x!\n", pattern[i]); - i915_gem_object_unpin_map(obj); - goto out_dma_map; - } - } - - i915_gem_object_unpin_map(obj); - } - - err = 0; -out_dma_map: - dma_buf_vunmap(dmabuf, dma_map); -out_obj: - i915_gem_object_put(obj); -out_dmabuf: - dma_buf_put(dmabuf); - return err; -} - -static int igt_dmabuf_import_ownership(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj; - struct dma_buf *dmabuf; - void *ptr; - int err; - - dmabuf = mock_dmabuf(1); - if (IS_ERR(dmabuf)) - return PTR_ERR(dmabuf); - - ptr = dma_buf_vmap(dmabuf); - if (!ptr) { - pr_err("dma_buf_vmap failed\n"); - err = -ENOMEM; - goto err_dmabuf; - } - - memset(ptr, 0xc5, PAGE_SIZE); - dma_buf_vunmap(dmabuf, ptr); - - obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf)); - if (IS_ERR(obj)) { - pr_err("i915_gem_prime_import failed with err=%d\n", - (int)PTR_ERR(obj)); - err = PTR_ERR(obj); - goto err_dmabuf; - } - - dma_buf_put(dmabuf); - - err = i915_gem_object_pin_pages(obj); - if (err) { - pr_err("i915_gem_object_pin_pages failed with err=%d\n", err); - goto out_obj; - } - - err = 0; - i915_gem_object_unpin_pages(obj); -out_obj: - i915_gem_object_put(obj); - return err; - -err_dmabuf: - dma_buf_put(dmabuf); - return err; -} - -static int igt_dmabuf_export_vmap(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj; - struct dma_buf *dmabuf; - void *ptr; - int err; - - obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0); - if (IS_ERR(dmabuf)) { - pr_err("i915_gem_prime_export failed with err=%d\n", - (int)PTR_ERR(dmabuf)); - err = PTR_ERR(dmabuf); - goto err_obj; - } - i915_gem_object_put(obj); - - ptr = dma_buf_vmap(dmabuf); - if (!ptr) { - pr_err("dma_buf_vmap failed\n"); - err = -ENOMEM; - goto out; - } - - if (memchr_inv(ptr, 0, dmabuf->size)) { - pr_err("Exported object not initialiased to zero!\n"); - err = -EINVAL; - goto out; - } - - memset(ptr, 0xc5, dmabuf->size); - - err = 0; - dma_buf_vunmap(dmabuf, ptr); -out: - dma_buf_put(dmabuf); - return err; - -err_obj: - i915_gem_object_put(obj); - return err; -} - -static int igt_dmabuf_export_kmap(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj; - struct dma_buf *dmabuf; - void *ptr; - int err; - - obj = i915_gem_object_create_shmem(i915, 2 * PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0); - i915_gem_object_put(obj); - if (IS_ERR(dmabuf)) { - err = PTR_ERR(dmabuf); - pr_err("i915_gem_prime_export failed with err=%d\n", err); - return err; - } - - ptr = dma_buf_kmap(dmabuf, 0); - if (!ptr) { - pr_err("dma_buf_kmap failed\n"); - err = -ENOMEM; - goto err; - } - - if (memchr_inv(ptr, 0, PAGE_SIZE)) { - dma_buf_kunmap(dmabuf, 0, ptr); - pr_err("Exported page[0] not initialiased to zero!\n"); - err = -EINVAL; - goto err; - } - - memset(ptr, 0xc5, PAGE_SIZE); - dma_buf_kunmap(dmabuf, 0, ptr); - - ptr = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(ptr)) { - err = PTR_ERR(ptr); - pr_err("i915_gem_object_pin_map failed with err=%d\n", err); - goto err; - } - memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE); - i915_gem_object_flush_map(obj); - i915_gem_object_unpin_map(obj); - - ptr = dma_buf_kmap(dmabuf, 1); - if (!ptr) { - pr_err("dma_buf_kmap failed\n"); - err = -ENOMEM; - goto err; - } - - if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) { - dma_buf_kunmap(dmabuf, 1, ptr); - pr_err("Exported page[1] not set to 0xaa!\n"); - err = -EINVAL; - goto err; - } - - memset(ptr, 0xc5, PAGE_SIZE); - dma_buf_kunmap(dmabuf, 1, ptr); - - ptr = dma_buf_kmap(dmabuf, 0); - if (!ptr) { - pr_err("dma_buf_kmap failed\n"); - err = -ENOMEM; - goto err; - } - if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) { - dma_buf_kunmap(dmabuf, 0, ptr); - pr_err("Exported page[0] did not retain 0xc5!\n"); - err = -EINVAL; - goto err; - } - dma_buf_kunmap(dmabuf, 0, ptr); - - ptr = dma_buf_kmap(dmabuf, 2); - if (ptr) { - pr_err("Erroneously kmapped beyond the end of the object!\n"); - dma_buf_kunmap(dmabuf, 2, ptr); - err = -EINVAL; - goto err; - } - - ptr = dma_buf_kmap(dmabuf, -1); - if (ptr) { - pr_err("Erroneously kmapped before the start of the object!\n"); - dma_buf_kunmap(dmabuf, -1, ptr); - err = -EINVAL; - goto err; - } - - err = 0; -err: - dma_buf_put(dmabuf); - return err; -} - -int i915_gem_dmabuf_mock_selftests(void) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_dmabuf_export), - SUBTEST(igt_dmabuf_import_self), - SUBTEST(igt_dmabuf_import), - SUBTEST(igt_dmabuf_import_ownership), - SUBTEST(igt_dmabuf_export_vmap), - SUBTEST(igt_dmabuf_export_kmap), - }; - struct drm_i915_private *i915; - int err; - - i915 = mock_gem_device(); - if (!i915) - return -ENOMEM; - - err = i915_subtests(tests, i915); - - drm_dev_put(&i915->drm); - return err; -} - -int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_dmabuf_export), - }; - - return i915_subtests(tests, i915); -} diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 4fc6e5445dd1..1d8235303edf 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -22,11 +22,13 @@ * */ -#include "../i915_selftest.h" +#include "gem/i915_gem_pm.h" +#include "gem/selftests/igt_gem_utils.h" +#include "gem/selftests/mock_context.h" + +#include "i915_selftest.h" -#include "igt_gem_utils.h" #include "lib_sw_fence.h" -#include "mock_context.h" #include "mock_drm.h" #include "mock_gem_device.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 9cca66e4420a..f1e95eaf6923 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -25,10 +25,11 @@ #include #include -#include "../i915_selftest.h" +#include "gem/selftests/mock_context.h" + #include "i915_random.h" +#include "i915_selftest.h" -#include "mock_context.h" #include "mock_drm.h" #include "mock_gem_device.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c deleted file mode 100644 index a3dd2f1be95b..000000000000 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "../i915_selftest.h" - -#include "igt_flush_test.h" -#include "mock_gem_device.h" -#include "huge_gem_object.h" - -static int igt_gem_object(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj; - int err = -ENOMEM; - - /* Basic test to ensure we can create an object */ - - obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - pr_err("i915_gem_object_create failed, err=%d\n", err); - goto out; - } - - err = 0; - i915_gem_object_put(obj); -out: - return err; -} - -static int igt_gem_huge(void *arg) -{ - const unsigned int nreal = 509; /* just to be awkward */ - struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj; - unsigned int n; - int err; - - /* Basic sanitycheck of our huge fake object allocation */ - - obj = huge_gem_object(i915, - nreal * PAGE_SIZE, - i915->ggtt.vm.total + PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - err = i915_gem_object_pin_pages(obj); - if (err) { - pr_err("Failed to allocate %u pages (%lu total), err=%d\n", - nreal, obj->base.size / PAGE_SIZE, err); - goto out; - } - - for (n = 0; n < obj->base.size / PAGE_SIZE; n++) { - if (i915_gem_object_get_page(obj, n) != - i915_gem_object_get_page(obj, n % nreal)) { - pr_err("Page lookup mismatch at index %u [%u]\n", - n, n % nreal); - err = -EINVAL; - goto out_unpin; - } - } - -out_unpin: - i915_gem_object_unpin_pages(obj); -out: - i915_gem_object_put(obj); - return err; -} - -int i915_gem_object_mock_selftests(void) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_gem_object), - }; - struct drm_i915_private *i915; - int err; - - i915 = mock_gem_device(); - if (!i915) - return -ENOMEM; - - err = i915_subtests(tests, i915); - - drm_dev_put(&i915->drm); - return err; -} - -int i915_gem_object_live_selftests(struct drm_i915_private *i915) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_gem_huge), - }; - - return i915_subtests(tests, i915); -} diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index b60591531e4a..4fd5356c6577 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -24,12 +24,14 @@ #include -#include "../i915_selftest.h" +#include "gem/i915_gem_pm.h" +#include "gem/selftests/mock_context.h" + #include "i915_random.h" +#include "i915_selftest.h" #include "igt_live_test.h" #include "lib_sw_fence.h" -#include "mock_context.h" #include "mock_drm.h" #include "mock_gem_device.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c index ff9ebe50fae8..acb2cc5136b7 100644 --- a/drivers/gpu/drm/i915/selftests/i915_timeline.c +++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c @@ -6,8 +6,10 @@ #include -#include "../i915_selftest.h" +#include "gem/i915_gem_pm.h" + #include "i915_random.h" +#include "i915_selftest.h" #include "igt_flush_test.h" #include "mock_gem_device.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 0027c1fac336..425b76133850 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -24,10 +24,11 @@ #include -#include "../i915_selftest.h" +#include "gem/selftests/mock_context.h" + +#include "i915_selftest.h" #include "mock_gem_device.h" -#include "mock_context.h" #include "mock_gtt.h" static bool assert_vma(struct i915_vma *vma, diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c index e42f3c58536a..5bfd1b2626a2 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c @@ -4,9 +4,11 @@ * Copyright © 2018 Intel Corporation */ -#include "../i915_drv.h" +#include "gem/i915_gem_context.h" + +#include "i915_drv.h" +#include "i915_selftest.h" -#include "../i915_selftest.h" #include "igt_flush_test.h" int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) diff --git a/drivers/gpu/drm/i915/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/selftests/igt_gem_utils.c deleted file mode 100644 index 16891b1a3e50..000000000000 --- a/drivers/gpu/drm/i915/selftests/igt_gem_utils.c +++ /dev/null @@ -1,34 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - */ - -#include "igt_gem_utils.h" - -#include "gt/intel_context.h" - -#include "../i915_gem_context.h" -#include "../i915_gem_pm.h" -#include "../i915_request.h" - -struct i915_request * -igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine) -{ - struct intel_context *ce; - struct i915_request *rq; - - /* - * Pinning the contexts may generate requests in order to acquire - * GGTT space, so do this first before we reserve a seqno for - * ourselves. - */ - ce = i915_gem_context_get_engine(ctx, engine->id); - if (IS_ERR(ce)) - return ERR_CAST(ce); - - rq = intel_context_create_request(ce); - intel_context_put(ce); - - return rq; -} diff --git a/drivers/gpu/drm/i915/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/selftests/igt_gem_utils.h deleted file mode 100644 index 0f17251cf75d..000000000000 --- a/drivers/gpu/drm/i915/selftests/igt_gem_utils.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - */ - -#ifndef __IGT_GEM_UTILS_H__ -#define __IGT_GEM_UTILS_H__ - -struct i915_request; -struct i915_gem_context; -struct intel_engine_cs; - -struct i915_request * -igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine); - -#endif /* __IGT_GEM_UTILS_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index ece8a8a0d3b0..38d6f1b10c54 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -4,7 +4,8 @@ * Copyright © 2018 Intel Corporation */ -#include "igt_gem_utils.h" +#include "gem/selftests/igt_gem_utils.h" + #include "igt_spinner.h" int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h index d312e7cdab68..34a88ac9b47a 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.h +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h @@ -7,13 +7,12 @@ #ifndef __I915_SELFTESTS_IGT_SPINNER_H__ #define __I915_SELFTESTS_IGT_SPINNER_H__ -#include "../i915_selftest.h" - +#include "gem/i915_gem_context.h" #include "gt/intel_engine.h" -#include "../i915_drv.h" -#include "../i915_request.h" -#include "../i915_gem_context.h" +#include "i915_drv.h" +#include "i915_request.h" +#include "i915_selftest.h" struct igt_spinner { struct drm_i915_private *i915; diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index b05a21eaa8f4..7fd0321e0947 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -22,7 +22,8 @@ * */ -#include "../i915_selftest.h" +#include "i915_selftest.h" +#include "gem/i915_gem_pm.h" /* max doorbell number + negative test for each client type */ #define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM) diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c deleted file mode 100644 index 10e67c931ed1..000000000000 --- a/drivers/gpu/drm/i915/selftests/mock_context.c +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "mock_context.h" -#include "mock_gtt.h" - -struct i915_gem_context * -mock_context(struct drm_i915_private *i915, - const char *name) -{ - struct i915_gem_context *ctx; - struct i915_gem_engines *e; - int ret; - - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return NULL; - - kref_init(&ctx->ref); - INIT_LIST_HEAD(&ctx->link); - ctx->i915 = i915; - - mutex_init(&ctx->engines_mutex); - e = default_engines(ctx); - if (IS_ERR(e)) - goto err_free; - RCU_INIT_POINTER(ctx->engines, e); - - INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); - INIT_LIST_HEAD(&ctx->handles_list); - INIT_LIST_HEAD(&ctx->hw_id_link); - mutex_init(&ctx->mutex); - - ret = i915_gem_context_pin_hw_id(ctx); - if (ret < 0) - goto err_engines; - - if (name) { - struct i915_hw_ppgtt *ppgtt; - - ctx->name = kstrdup(name, GFP_KERNEL); - if (!ctx->name) - goto err_put; - - ppgtt = mock_ppgtt(i915, name); - if (!ppgtt) - goto err_put; - - __set_ppgtt(ctx, ppgtt); - } - - return ctx; - -err_engines: - free_engines(rcu_access_pointer(ctx->engines)); -err_free: - kfree(ctx); - return NULL; - -err_put: - i915_gem_context_set_closed(ctx); - i915_gem_context_put(ctx); - return NULL; -} - -void mock_context_close(struct i915_gem_context *ctx) -{ - context_close(ctx); -} - -void mock_init_contexts(struct drm_i915_private *i915) -{ - init_contexts(i915); -} - -struct i915_gem_context * -live_context(struct drm_i915_private *i915, struct drm_file *file) -{ - struct i915_gem_context *ctx; - int err; - - lockdep_assert_held(&i915->drm.struct_mutex); - - ctx = i915_gem_create_context(i915, 0); - if (IS_ERR(ctx)) - return ctx; - - err = gem_context_register(ctx, file->driver_priv); - if (err < 0) - goto err_ctx; - - return ctx; - -err_ctx: - context_close(ctx); - return ERR_PTR(err); -} - -struct i915_gem_context * -kernel_context(struct drm_i915_private *i915) -{ - return i915_gem_context_create_kernel(i915, I915_PRIORITY_NORMAL); -} - -void kernel_context_close(struct i915_gem_context *ctx) -{ - context_close(ctx); -} diff --git a/drivers/gpu/drm/i915/selftests/mock_context.h b/drivers/gpu/drm/i915/selftests/mock_context.h deleted file mode 100644 index 29b9d60a158b..000000000000 --- a/drivers/gpu/drm/i915/selftests/mock_context.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef __MOCK_CONTEXT_H -#define __MOCK_CONTEXT_H - -void mock_init_contexts(struct drm_i915_private *i915); - -struct i915_gem_context * -mock_context(struct drm_i915_private *i915, - const char *name); - -void mock_context_close(struct i915_gem_context *ctx); - -struct i915_gem_context * -live_context(struct drm_i915_private *i915, struct drm_file *file); - -struct i915_gem_context *kernel_context(struct drm_i915_private *i915); -void kernel_context_close(struct i915_gem_context *ctx); - -#endif /* !__MOCK_CONTEXT_H */ diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c deleted file mode 100644 index ca682caf1062..000000000000 --- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "mock_dmabuf.h" - -static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment, - enum dma_data_direction dir) -{ - struct mock_dmabuf *mock = to_mock(attachment->dmabuf); - struct sg_table *st; - struct scatterlist *sg; - int i, err; - - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return ERR_PTR(-ENOMEM); - - err = sg_alloc_table(st, mock->npages, GFP_KERNEL); - if (err) - goto err_free; - - sg = st->sgl; - for (i = 0; i < mock->npages; i++) { - sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0); - sg = sg_next(sg); - } - - if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { - err = -ENOMEM; - goto err_st; - } - - return st; - -err_st: - sg_free_table(st); -err_free: - kfree(st); - return ERR_PTR(err); -} - -static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment, - struct sg_table *st, - enum dma_data_direction dir) -{ - dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir); - sg_free_table(st); - kfree(st); -} - -static void mock_dmabuf_release(struct dma_buf *dma_buf) -{ - struct mock_dmabuf *mock = to_mock(dma_buf); - int i; - - for (i = 0; i < mock->npages; i++) - put_page(mock->pages[i]); - - kfree(mock); -} - -static void *mock_dmabuf_vmap(struct dma_buf *dma_buf) -{ - struct mock_dmabuf *mock = to_mock(dma_buf); - - return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL); -} - -static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) -{ - struct mock_dmabuf *mock = to_mock(dma_buf); - - vm_unmap_ram(vaddr, mock->npages); -} - -static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) -{ - struct mock_dmabuf *mock = to_mock(dma_buf); - - return kmap(mock->pages[page_num]); -} - -static void mock_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) -{ - struct mock_dmabuf *mock = to_mock(dma_buf); - - return kunmap(mock->pages[page_num]); -} - -static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) -{ - return -ENODEV; -} - -static const struct dma_buf_ops mock_dmabuf_ops = { - .map_dma_buf = mock_map_dma_buf, - .unmap_dma_buf = mock_unmap_dma_buf, - .release = mock_dmabuf_release, - .map = mock_dmabuf_kmap, - .unmap = mock_dmabuf_kunmap, - .mmap = mock_dmabuf_mmap, - .vmap = mock_dmabuf_vmap, - .vunmap = mock_dmabuf_vunmap, -}; - -static struct dma_buf *mock_dmabuf(int npages) -{ - struct mock_dmabuf *mock; - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - struct dma_buf *dmabuf; - int i; - - mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), - GFP_KERNEL); - if (!mock) - return ERR_PTR(-ENOMEM); - - mock->npages = npages; - for (i = 0; i < npages; i++) { - mock->pages[i] = alloc_page(GFP_KERNEL); - if (!mock->pages[i]) - goto err; - } - - exp_info.ops = &mock_dmabuf_ops; - exp_info.size = npages * PAGE_SIZE; - exp_info.flags = O_CLOEXEC; - exp_info.priv = mock; - - dmabuf = dma_buf_export(&exp_info); - if (IS_ERR(dmabuf)) - goto err; - - return dmabuf; - -err: - while (i--) - put_page(mock->pages[i]); - kfree(mock); - return ERR_PTR(-ENOMEM); -} diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.h b/drivers/gpu/drm/i915/selftests/mock_dmabuf.h deleted file mode 100644 index ec80613159b9..000000000000 --- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.h +++ /dev/null @@ -1,41 +0,0 @@ - -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef __MOCK_DMABUF_H__ -#define __MOCK_DMABUF_H__ - -#include - -struct mock_dmabuf { - int npages; - struct page *pages[]; -}; - -static struct mock_dmabuf *to_mock(struct dma_buf *buf) -{ - return buf->priv; -} - -#endif /* !__MOCK_DMABUF_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 9fd02025d382..e25b74a27f83 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -27,13 +27,14 @@ #include "gt/mock_engine.h" -#include "mock_context.h" #include "mock_request.h" #include "mock_gem_device.h" -#include "mock_gem_object.h" #include "mock_gtt.h" #include "mock_uncore.h" +#include "gem/selftests/mock_context.h" +#include "gem/selftests/mock_gem_object.h" + void mock_device_flush(struct drm_i915_private *i915) { struct intel_engine_cs *engine; diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_object.h b/drivers/gpu/drm/i915/selftests/mock_gem_object.h deleted file mode 100644 index 20acdbee7bd0..000000000000 --- a/drivers/gpu/drm/i915/selftests/mock_gem_object.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __MOCK_GEM_OBJECT_H__ -#define __MOCK_GEM_OBJECT_H__ - -struct mock_object { - struct drm_i915_gem_object base; -}; - -#endif /* !__MOCK_GEM_OBJECT_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c index b99f7576153c..9390fc09984b 100644 --- a/drivers/gpu/drm/i915/selftests/mock_request.c +++ b/drivers/gpu/drm/i915/selftests/mock_request.c @@ -22,9 +22,9 @@ * */ +#include "gem/selftests/igt_gem_utils.h" #include "gt/mock_engine.h" -#include "igt_gem_utils.h" #include "mock_request.h" struct i915_request * -- cgit v1.2.3 From 37d63f8fdb4941b6a82ef9e59bee62a494225c7e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:50 +0100 Subject: drm/i915: Pull scatterlist utils out of i915_gem.h Out scatterlist utility routines can be pulled out of i915_gem.h for a bit more decluttering. v2: Push I915_GTT_PAGE_SIZE out of i915_scatterlist itself and into the caller. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-9-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_internal.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_pages.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_phys.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 1 + .../gpu/drm/i915/gem/selftests/huge_gem_object.c | 2 + .../gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 110 ------------------ drivers/gpu/drm/i915/i915_gem.c | 30 +---- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 2 + drivers/gpu/drm/i915/i915_gem_gtt.c | 3 +- drivers/gpu/drm/i915/i915_gem_gtt.h | 4 +- drivers/gpu/drm/i915/i915_gpu_error.c | 1 + drivers/gpu/drm/i915/i915_scatterlist.c | 39 +++++++ drivers/gpu/drm/i915/i915_scatterlist.h | 127 +++++++++++++++++++++ drivers/gpu/drm/i915/selftests/i915_vma.c | 1 + drivers/gpu/drm/i915/selftests/scatterlist.c | 3 +- 19 files changed, 188 insertions(+), 142 deletions(-) create mode 100644 drivers/gpu/drm/i915/i915_scatterlist.c create mode 100644 drivers/gpu/drm/i915/i915_scatterlist.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 3f3d378f467d..2d5bf69dbc4b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -44,6 +44,7 @@ i915-y += i915_drv.o \ i915_irq.o \ i915_params.o \ i915_pci.o \ + i915_scatterlist.o \ i915_suspend.o \ i915_sysfs.o \ intel_csr.o \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 600fc926f81e..625397deb701 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -10,6 +10,7 @@ #include "i915_drv.h" #include "i915_gem_object.h" +#include "i915_scatterlist.h" static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index 85a05a2435e9..0c41e04ab8fa 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -13,6 +13,7 @@ #include "i915_drv.h" #include "i915_gem.h" #include "i915_gem_object.h" +#include "i915_scatterlist.h" #include "i915_utils.h" #define QUIET (__GFP_NORETRY | __GFP_NOWARN) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 3879b3669dea..e53860147f21 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "i915_gem_object.h" +#include "i915_scatterlist.h" void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct sg_table *pages, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 1c0ce69f765b..2deac933cf59 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -15,6 +15,7 @@ #include "i915_drv.h" #include "i915_gem_object.h" +#include "i915_scatterlist.h" static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 568164ca66fd..665f22ebf8e8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -9,6 +9,7 @@ #include "i915_drv.h" #include "i915_gem_object.h" +#include "i915_scatterlist.h" /* * Move pages to appropriate lru and release the pagevec, decrementing the diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index ccac73b72597..cfa990edb351 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -14,6 +14,7 @@ #include "i915_gem_ioctls.h" #include "i915_gem_object.h" +#include "i915_scatterlist.h" #include "i915_trace.h" #include "intel_drv.h" diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c index 824f3761314c..3c5d17b2b670 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c @@ -4,6 +4,8 @@ * Copyright © 2016 Intel Corporation */ +#include "i915_scatterlist.h" + #include "huge_gem_object.h" static void huge_free_pages(struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index b7431712de66..e3a64edef918 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -4,6 +4,7 @@ * Copyright © 2016 Intel Corporation */ +#include "i915_drv.h" #include "i915_selftest.h" #include "mock_dmabuf.h" diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 38da46e773a3..e26d622b1e41 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2169,111 +2169,6 @@ enum hdmi_force_audio { GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \ INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) -/* - * Optimised SGL iterator for GEM objects - */ -static __always_inline struct sgt_iter { - struct scatterlist *sgp; - union { - unsigned long pfn; - dma_addr_t dma; - }; - unsigned int curr; - unsigned int max; -} __sgt_iter(struct scatterlist *sgl, bool dma) { - struct sgt_iter s = { .sgp = sgl }; - - if (s.sgp) { - s.max = s.curr = s.sgp->offset; - s.max += s.sgp->length; - if (dma) - s.dma = sg_dma_address(s.sgp); - else - s.pfn = page_to_pfn(sg_page(s.sgp)); - } - - return s; -} - -static inline struct scatterlist *____sg_next(struct scatterlist *sg) -{ - ++sg; - if (unlikely(sg_is_chain(sg))) - sg = sg_chain_ptr(sg); - return sg; -} - -/** - * __sg_next - return the next scatterlist entry in a list - * @sg: The current sg entry - * - * Description: - * If the entry is the last, return NULL; otherwise, step to the next - * element in the array (@sg@+1). If that's a chain pointer, follow it; - * otherwise just return the pointer to the current element. - **/ -static inline struct scatterlist *__sg_next(struct scatterlist *sg) -{ - return sg_is_last(sg) ? NULL : ____sg_next(sg); -} - -/** - * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table - * @__dmap: DMA address (output) - * @__iter: 'struct sgt_iter' (iterator state, internal) - * @__sgt: sg_table to iterate over (input) - */ -#define for_each_sgt_dma(__dmap, __iter, __sgt) \ - for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ - ((__dmap) = (__iter).dma + (__iter).curr); \ - (((__iter).curr += I915_GTT_PAGE_SIZE) >= (__iter).max) ? \ - (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) - -/** - * for_each_sgt_page - iterate over the pages of the given sg_table - * @__pp: page pointer (output) - * @__iter: 'struct sgt_iter' (iterator state, internal) - * @__sgt: sg_table to iterate over (input) - */ -#define for_each_sgt_page(__pp, __iter, __sgt) \ - for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ - ((__pp) = (__iter).pfn == 0 ? NULL : \ - pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ - (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ - (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) - -bool i915_sg_trim(struct sg_table *orig_st); - -static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg) -{ - unsigned int page_sizes; - - page_sizes = 0; - while (sg) { - GEM_BUG_ON(sg->offset); - GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE)); - page_sizes |= sg->length; - sg = __sg_next(sg); - } - - return page_sizes; -} - -static inline unsigned int i915_sg_segment_size(void) -{ - unsigned int size = swiotlb_max_segment(); - - if (size == 0) - return SCATTERLIST_MAX_SEGMENT; - - size = rounddown(size, PAGE_SIZE); - /* swiotlb_max_segment_size can return 1 byte when it means one page. */ - if (size < PAGE_SIZE) - size = PAGE_SIZE; - - return size; -} - #define INTEL_INFO(dev_priv) (&(dev_priv)->__info) #define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime) #define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps) @@ -2809,11 +2704,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj); void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); -static inline int __sg_page_count(const struct scatterlist *sg) -{ - return sg->length >> PAGE_SHIFT; -} - static inline int __must_check i915_mutex_lock_interruptible(struct drm_device *dev) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 096e31e3df92..cfe0d9ff4e5d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -50,6 +50,7 @@ #include "gt/intel_workarounds.h" #include "i915_drv.h" +#include "i915_scatterlist.h" #include "i915_trace.h" #include "i915_vgpu.h" @@ -1085,34 +1086,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) } } -bool i915_sg_trim(struct sg_table *orig_st) -{ - struct sg_table new_st; - struct scatterlist *sg, *new_sg; - unsigned int i; - - if (orig_st->nents == orig_st->orig_nents) - return false; - - if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN)) - return false; - - new_sg = new_st.sgl; - for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { - sg_set_page(new_sg, sg_page(sg), sg->length, 0); - sg_dma_address(new_sg) = sg_dma_address(sg); - sg_dma_len(new_sg) = sg_dma_len(sg); - - new_sg = sg_next(new_sg); - } - GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ - - sg_free_table(orig_st); - - *orig_st = new_st; - return true; -} - static unsigned long to_wait_timeout(s64 timeout_ns) { if (timeout_ns < 0) @@ -2370,7 +2343,6 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftests/scatterlist.c" #include "selftests/mock_gem_device.c" #include "selftests/i915_gem.c" #endif diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 3084f52e3372..2e9e32330aaa 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -22,7 +22,9 @@ */ #include + #include "i915_drv.h" +#include "i915_scatterlist.h" /** * DOC: fence register handling diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index bc5ddeb845b0..ccc4bdf356c1 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -36,8 +36,9 @@ #include #include "i915_drv.h" -#include "i915_vgpu.h" +#include "i915_scatterlist.h" #include "i915_trace.h" +#include "i915_vgpu.h" #include "intel_drv.h" #include "intel_frontbuffer.h" diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 811fa05c0322..73b6608740f2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -40,6 +40,7 @@ #include "gt/intel_reset.h" #include "i915_request.h" +#include "i915_scatterlist.h" #include "i915_selftest.h" #include "i915_timeline.h" @@ -162,7 +163,8 @@ typedef u64 gen8_ppgtt_pml4e_t; #define GEN8_PDE_IPS_64K BIT(11) #define GEN8_PDE_PS_2M BIT(7) -struct sg_table; +#define for_each_sgt_dma(__dmap, __iter, __sgt) \ + __for_each_sgt_dma(__dmap, __iter, __sgt, I915_GTT_PAGE_SIZE) struct intel_remapped_plane_info { /* in gtt pages */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index c86865a34972..707811256501 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -40,6 +40,7 @@ #include "i915_drv.h" #include "i915_gpu_error.h" +#include "i915_scatterlist.h" #include "intel_atomic.h" #include "intel_csr.h" #include "intel_overlay.h" diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c new file mode 100644 index 000000000000..cc6b3846a8c7 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_scatterlist.c @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#include "i915_scatterlist.h" + +bool i915_sg_trim(struct sg_table *orig_st) +{ + struct sg_table new_st; + struct scatterlist *sg, *new_sg; + unsigned int i; + + if (orig_st->nents == orig_st->orig_nents) + return false; + + if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN)) + return false; + + new_sg = new_st.sgl; + for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { + sg_set_page(new_sg, sg_page(sg), sg->length, 0); + sg_dma_address(new_sg) = sg_dma_address(sg); + sg_dma_len(new_sg) = sg_dma_len(sg); + + new_sg = sg_next(new_sg); + } + GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ + + sg_free_table(orig_st); + + *orig_st = new_st; + return true; +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/scatterlist.c" +#endif diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h new file mode 100644 index 000000000000..6617963df9ed --- /dev/null +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -0,0 +1,127 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#ifndef I915_SCATTERLIST_H +#define I915_SCATTERLIST_H + +#include +#include +#include + +#include "i915_gem.h" + +/* + * Optimised SGL iterator for GEM objects + */ +static __always_inline struct sgt_iter { + struct scatterlist *sgp; + union { + unsigned long pfn; + dma_addr_t dma; + }; + unsigned int curr; + unsigned int max; +} __sgt_iter(struct scatterlist *sgl, bool dma) { + struct sgt_iter s = { .sgp = sgl }; + + if (s.sgp) { + s.max = s.curr = s.sgp->offset; + s.max += s.sgp->length; + if (dma) + s.dma = sg_dma_address(s.sgp); + else + s.pfn = page_to_pfn(sg_page(s.sgp)); + } + + return s; +} + +static inline int __sg_page_count(const struct scatterlist *sg) +{ + return sg->length >> PAGE_SHIFT; +} + +static inline struct scatterlist *____sg_next(struct scatterlist *sg) +{ + ++sg; + if (unlikely(sg_is_chain(sg))) + sg = sg_chain_ptr(sg); + return sg; +} + +/** + * __sg_next - return the next scatterlist entry in a list + * @sg: The current sg entry + * + * Description: + * If the entry is the last, return NULL; otherwise, step to the next + * element in the array (@sg@+1). If that's a chain pointer, follow it; + * otherwise just return the pointer to the current element. + **/ +static inline struct scatterlist *__sg_next(struct scatterlist *sg) +{ + return sg_is_last(sg) ? NULL : ____sg_next(sg); +} + +/** + * __for_each_sgt_dma - iterate over the DMA addresses of the given sg_table + * @__dmap: DMA address (output) + * @__iter: 'struct sgt_iter' (iterator state, internal) + * @__sgt: sg_table to iterate over (input) + * @__step: step size + */ +#define __for_each_sgt_dma(__dmap, __iter, __sgt, __step) \ + for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ + ((__dmap) = (__iter).dma + (__iter).curr); \ + (((__iter).curr += (__step)) >= (__iter).max) ? \ + (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) + +/** + * for_each_sgt_page - iterate over the pages of the given sg_table + * @__pp: page pointer (output) + * @__iter: 'struct sgt_iter' (iterator state, internal) + * @__sgt: sg_table to iterate over (input) + */ +#define for_each_sgt_page(__pp, __iter, __sgt) \ + for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ + ((__pp) = (__iter).pfn == 0 ? NULL : \ + pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ + (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ + (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) + +static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg) +{ + unsigned int page_sizes; + + page_sizes = 0; + while (sg) { + GEM_BUG_ON(sg->offset); + GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE)); + page_sizes |= sg->length; + sg = __sg_next(sg); + } + + return page_sizes; +} + +static inline unsigned int i915_sg_segment_size(void) +{ + unsigned int size = swiotlb_max_segment(); + + if (size == 0) + return SCATTERLIST_MAX_SEGMENT; + + size = rounddown(size, PAGE_SIZE); + /* swiotlb_max_segment_size can return 1 byte when it means one page. */ + if (size < PAGE_SIZE) + size = PAGE_SIZE; + + return size; +} + +bool i915_sg_trim(struct sg_table *orig_st); + +#endif diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 425b76133850..6919207883f6 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -26,6 +26,7 @@ #include "gem/selftests/mock_context.h" +#include "i915_scatterlist.h" #include "i915_selftest.h" #include "mock_gem_device.h" diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c index cd6d2a16071f..d599186d5b71 100644 --- a/drivers/gpu/drm/i915/selftests/scatterlist.c +++ b/drivers/gpu/drm/i915/selftests/scatterlist.c @@ -24,7 +24,8 @@ #include #include -#include "../i915_selftest.h" +#include "i915_selftest.h" +#include "i915_utils.h" #define PFN_BIAS (1 << 10) -- cgit v1.2.3 From 6951e5893b4821f68a48022842f67c3033ca7b30 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:51 +0100 Subject: drm/i915: Move GEM object domain management from struct_mutex to local Use the per-object local lock to control the cache domain of the individual GEM objects, not struct_mutex. This is a huge leap forward for us in terms of object-level synchronisation; execbuffers are coordinated using the ww_mutex and pread/pwrite is finally fully serialised again. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-10-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gem/i915_gem_clflush.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 10 +- drivers/gpu/drm/i915/gem/i915_gem_domain.c | 70 ++++++------ drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 123 ++++++++++++++------- drivers/gpu/drm/i915/gem/i915_gem_fence.c | 96 ++++++++++++++++ drivers/gpu/drm/i915/gem/i915_gem_object.c | 2 + drivers/gpu/drm/i915/gem/i915_gem_object.h | 14 +++ drivers/gpu/drm/i915/gem/i915_gem_pm.c | 7 +- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 12 +- .../drm/i915/gem/selftests/i915_gem_coherency.c | 12 ++ .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 20 ++++ drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 6 + drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c | 4 +- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 4 + drivers/gpu/drm/i915/gt/selftest_lrc.c | 2 + drivers/gpu/drm/i915/gt/selftest_workarounds.c | 6 + drivers/gpu/drm/i915/gvt/cmd_parser.c | 2 + drivers/gpu/drm/i915/gvt/scheduler.c | 8 +- drivers/gpu/drm/i915/i915_cmd_parser.c | 23 ++-- drivers/gpu/drm/i915/i915_gem.c | 122 +++++++++++--------- drivers/gpu/drm/i915/i915_gem_gtt.c | 5 +- drivers/gpu/drm/i915/i915_gem_render_state.c | 2 + drivers/gpu/drm/i915/i915_vma.c | 8 +- drivers/gpu/drm/i915/i915_vma.h | 12 ++ drivers/gpu/drm/i915/intel_display.c | 5 + drivers/gpu/drm/i915/intel_guc_log.c | 6 +- drivers/gpu/drm/i915/intel_overlay.c | 25 +++-- drivers/gpu/drm/i915/intel_uc_fw.c | 6 +- drivers/gpu/drm/i915/selftests/i915_request.c | 4 + drivers/gpu/drm/i915/selftests/i915_vma.c | 2 + drivers/gpu/drm/i915/selftests/igt_spinner.c | 2 + 32 files changed, 445 insertions(+), 180 deletions(-) create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_fence.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 2d5bf69dbc4b..44d6e0bcf6a6 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -93,6 +93,7 @@ gem-y += \ gem/i915_gem_dmabuf.o \ gem/i915_gem_domain.o \ gem/i915_gem_execbuffer.o \ + gem/i915_gem_fence.o \ gem/i915_gem_internal.o \ gem/i915_gem_object.o \ gem/i915_gem_mman.o \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index 45d238d784fc..537aa2337cc8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -95,6 +95,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, { struct clflush *clflush; + assert_object_held(obj); + /* * Stolen memory is always coherent with the GPU as it is explicitly * marked as wc by the system, or the system is cache-coherent. @@ -144,9 +146,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, true, I915_FENCE_TIMEOUT, I915_FENCE_GFP); - reservation_object_lock(obj->resv, NULL); reservation_object_add_excl_fence(obj->resv, &clflush->dma); - reservation_object_unlock(obj->resv); i915_sw_fence_commit(&clflush->wait); } else if (obj->mm.pages) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 625397deb701..a93e233cfaa9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -151,7 +151,6 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); - struct drm_device *dev = obj->base.dev; bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); int err; @@ -159,12 +158,12 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire if (err) return err; - err = i915_mutex_lock_interruptible(dev); + err = i915_gem_object_lock_interruptible(obj); if (err) goto out; err = i915_gem_object_set_to_cpu_domain(obj, write); - mutex_unlock(&dev->struct_mutex); + i915_gem_object_unlock(obj); out: i915_gem_object_unpin_pages(obj); @@ -174,19 +173,18 @@ out: static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); - struct drm_device *dev = obj->base.dev; int err; err = i915_gem_object_pin_pages(obj); if (err) return err; - err = i915_mutex_lock_interruptible(dev); + err = i915_gem_object_lock_interruptible(obj); if (err) goto out; err = i915_gem_object_set_to_gtt_domain(obj, false); - mutex_unlock(&dev->struct_mutex); + i915_gem_object_unlock(obj); out: i915_gem_object_unpin_pages(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index bbc7fb758186..cce96e6c6e52 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -29,9 +29,9 @@ void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) if (!READ_ONCE(obj->pin_global)) return; - mutex_lock(&obj->base.dev->struct_mutex); + i915_gem_object_lock(obj); __i915_gem_object_flush_for_display(obj); - mutex_unlock(&obj->base.dev->struct_mutex); + i915_gem_object_unlock(obj); } /** @@ -47,11 +47,10 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) { int ret; - lockdep_assert_held(&obj->base.dev->struct_mutex); + assert_object_held(obj); ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | (write ? I915_WAIT_ALL : 0), MAX_SCHEDULE_TIMEOUT); if (ret) @@ -109,11 +108,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) { int ret; - lockdep_assert_held(&obj->base.dev->struct_mutex); + assert_object_held(obj); ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | (write ? I915_WAIT_ALL : 0), MAX_SCHEDULE_TIMEOUT); if (ret) @@ -179,7 +177,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, struct i915_vma *vma; int ret; - lockdep_assert_held(&obj->base.dev->struct_mutex); + assert_object_held(obj); if (obj->cache_level == cache_level) return 0; @@ -228,7 +226,6 @@ restart: */ ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT); if (ret) @@ -372,12 +369,16 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, if (ret) goto out; - ret = i915_mutex_lock_interruptible(dev); + ret = mutex_lock_interruptible(&i915->drm.struct_mutex); if (ret) goto out; - ret = i915_gem_object_set_cache_level(obj, level); - mutex_unlock(&dev->struct_mutex); + ret = i915_gem_object_lock_interruptible(obj); + if (ret == 0) { + ret = i915_gem_object_set_cache_level(obj, level); + i915_gem_object_unlock(obj); + } + mutex_unlock(&i915->drm.struct_mutex); out: i915_gem_object_put(obj); @@ -399,7 +400,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, struct i915_vma *vma; int ret; - lockdep_assert_held(&obj->base.dev->struct_mutex); + assert_object_held(obj); /* Mark the global pin early so that we account for the * display coherency whilst setting up the cache domains. @@ -484,16 +485,18 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) { - lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); + struct drm_i915_gem_object *obj = vma->obj; + + assert_object_held(obj); - if (WARN_ON(vma->obj->pin_global == 0)) + if (WARN_ON(obj->pin_global == 0)) return; - if (--vma->obj->pin_global == 0) + if (--obj->pin_global == 0) vma->display_alignment = I915_GTT_MIN_ALIGNMENT; /* Bump the LRU to try and avoid premature eviction whilst flipping */ - i915_gem_object_bump_inactive_ggtt(vma->obj); + i915_gem_object_bump_inactive_ggtt(obj); i915_vma_unpin(vma); } @@ -511,11 +514,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) { int ret; - lockdep_assert_held(&obj->base.dev->struct_mutex); + assert_object_held(obj); ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | (write ? I915_WAIT_ALL : 0), MAX_SCHEDULE_TIMEOUT); if (ret) @@ -637,7 +639,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, if (err) goto out; - err = i915_mutex_lock_interruptible(dev); + err = i915_gem_object_lock_interruptible(obj); if (err) goto out_unpin; @@ -651,7 +653,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, /* And bump the LRU for this access */ i915_gem_object_bump_inactive_ggtt(obj); - mutex_unlock(&dev->struct_mutex); + i915_gem_object_unlock(obj); if (write_domain != 0) intel_fb_obj_invalidate(obj, @@ -674,22 +676,23 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, { int ret; - lockdep_assert_held(&obj->base.dev->struct_mutex); - *needs_clflush = 0; if (!i915_gem_object_has_struct_page(obj)) return -ENODEV; + ret = i915_gem_object_lock_interruptible(obj); + if (ret) + return ret; + ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED, + I915_WAIT_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); if (ret) - return ret; + goto err_unlock; ret = i915_gem_object_pin_pages(obj); if (ret) - return ret; + goto err_unlock; if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ || !static_cpu_has(X86_FEATURE_CLFLUSH)) { @@ -717,6 +720,8 @@ out: err_unpin: i915_gem_object_unpin_pages(obj); +err_unlock: + i915_gem_object_unlock(obj); return ret; } @@ -725,23 +730,24 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj, { int ret; - lockdep_assert_held(&obj->base.dev->struct_mutex); - *needs_clflush = 0; if (!i915_gem_object_has_struct_page(obj)) return -ENODEV; + ret = i915_gem_object_lock_interruptible(obj); + if (ret) + return ret; + ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT); if (ret) - return ret; + goto err_unlock; ret = i915_gem_object_pin_pages(obj); if (ret) - return ret; + goto err_unlock; if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE || !static_cpu_has(X86_FEATURE_CLFLUSH)) { @@ -778,5 +784,7 @@ out: err_unpin: i915_gem_object_unpin_pages(obj); +err_unlock: + i915_gem_object_unlock(obj); return ret; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 09e64bf33842..ed522fdfbe7f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1075,7 +1075,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, if (use_cpu_reloc(cache, obj)) return NULL; + i915_gem_object_lock(obj); err = i915_gem_object_set_to_gtt_domain(obj, true); + i915_gem_object_unlock(obj); if (err) return ERR_PTR(err); @@ -1164,6 +1166,26 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes) *addr = value; } +static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma) +{ + struct drm_i915_gem_object *obj = vma->obj; + int err; + + i915_vma_lock(vma); + + if (obj->cache_dirty & ~obj->cache_coherent) + i915_gem_clflush_object(obj, 0); + obj->write_domain = 0; + + err = i915_request_await_object(rq, vma->obj, true); + if (err == 0) + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + + i915_vma_unlock(vma); + + return err; +} + static int __reloc_gpu_alloc(struct i915_execbuffer *eb, struct i915_vma *vma, unsigned int len) @@ -1175,15 +1197,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, u32 *cmd; int err; - if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) { - obj = vma->obj; - if (obj->cache_dirty & ~obj->cache_coherent) - i915_gem_clflush_object(obj, 0); - obj->write_domain = 0; - } - - GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU); - obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -1212,7 +1225,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, goto err_unpin; } - err = i915_request_await_object(rq, vma->obj, true); + err = reloc_move_to_gpu(rq, vma); if (err) goto err_request; @@ -1220,14 +1233,12 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, batch->node.start, PAGE_SIZE, cache->gen > 5 ? 0 : I915_DISPATCH_SECURE); if (err) - goto err_request; + goto skip_request; + i915_vma_lock(batch); GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); err = i915_vma_move_to_active(batch, rq, 0); - if (err) - goto skip_request; - - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(batch); if (err) goto skip_request; @@ -1837,24 +1848,59 @@ slow: static int eb_move_to_gpu(struct i915_execbuffer *eb) { const unsigned int count = eb->buffer_count; + struct ww_acquire_ctx acquire; unsigned int i; - int err; + int err = 0; + + ww_acquire_init(&acquire, &reservation_ww_class); for (i = 0; i < count; i++) { + struct i915_vma *vma = eb->vma[i]; + + err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire); + if (!err) + continue; + + GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */ + + if (err == -EDEADLK) { + GEM_BUG_ON(i == 0); + do { + int j = i - 1; + + ww_mutex_unlock(&eb->vma[j]->resv->lock); + + swap(eb->flags[i], eb->flags[j]); + swap(eb->vma[i], eb->vma[j]); + eb->vma[i]->exec_flags = &eb->flags[i]; + } while (--i); + GEM_BUG_ON(vma != eb->vma[0]); + vma->exec_flags = &eb->flags[0]; + + err = ww_mutex_lock_slow_interruptible(&vma->resv->lock, + &acquire); + } + if (err) + break; + } + ww_acquire_done(&acquire); + + while (i--) { unsigned int flags = eb->flags[i]; struct i915_vma *vma = eb->vma[i]; struct drm_i915_gem_object *obj = vma->obj; + assert_vma_held(vma); + if (flags & EXEC_OBJECT_CAPTURE) { struct i915_capture_list *capture; capture = kmalloc(sizeof(*capture), GFP_KERNEL); - if (unlikely(!capture)) - return -ENOMEM; - - capture->next = eb->request->capture_list; - capture->vma = eb->vma[i]; - eb->request->capture_list = capture; + if (capture) { + capture->next = eb->request->capture_list; + capture->vma = vma; + eb->request->capture_list = capture; + } } /* @@ -1874,24 +1920,15 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) flags &= ~EXEC_OBJECT_ASYNC; } - if (flags & EXEC_OBJECT_ASYNC) - continue; - - err = i915_request_await_object - (eb->request, obj, flags & EXEC_OBJECT_WRITE); - if (err) - return err; - } + if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) { + err = i915_request_await_object + (eb->request, obj, flags & EXEC_OBJECT_WRITE); + } - for (i = 0; i < count; i++) { - unsigned int flags = eb->flags[i]; - struct i915_vma *vma = eb->vma[i]; + if (err == 0) + err = i915_vma_move_to_active(vma, eb->request, flags); - err = i915_vma_move_to_active(vma, eb->request, flags); - if (unlikely(err)) { - i915_request_skip(eb->request, err); - return err; - } + i915_vma_unlock(vma); __eb_unreserve_vma(vma, flags); vma->exec_flags = NULL; @@ -1899,12 +1936,20 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) if (unlikely(flags & __EXEC_OBJECT_HAS_REF)) i915_vma_put(vma); } + ww_acquire_fini(&acquire); + + if (unlikely(err)) + goto err_skip; + eb->exec = NULL; /* Unconditionally flush any chipset caches (for streaming writes). */ i915_gem_chipset_flush(eb->i915); - return 0; + +err_skip: + i915_request_skip(eb->request, err); + return err; } static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c new file mode 100644 index 000000000000..57dbc0862713 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_fence.c @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_gem_object.h" + +struct stub_fence { + struct dma_fence dma; + struct i915_sw_fence chain; +}; + +static int __i915_sw_fence_call +stub_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) +{ + struct stub_fence *stub = container_of(fence, typeof(*stub), chain); + + switch (state) { + case FENCE_COMPLETE: + dma_fence_signal(&stub->dma); + break; + + case FENCE_FREE: + dma_fence_put(&stub->dma); + break; + } + + return NOTIFY_DONE; +} + +static const char *stub_driver_name(struct dma_fence *fence) +{ + return DRIVER_NAME; +} + +static const char *stub_timeline_name(struct dma_fence *fence) +{ + return "object"; +} + +static void stub_release(struct dma_fence *fence) +{ + struct stub_fence *stub = container_of(fence, typeof(*stub), dma); + + i915_sw_fence_fini(&stub->chain); + + BUILD_BUG_ON(offsetof(typeof(*stub), dma)); + dma_fence_free(&stub->dma); +} + +static const struct dma_fence_ops stub_fence_ops = { + .get_driver_name = stub_driver_name, + .get_timeline_name = stub_timeline_name, + .release = stub_release, +}; + +struct dma_fence * +i915_gem_object_lock_fence(struct drm_i915_gem_object *obj) +{ + struct stub_fence *stub; + + assert_object_held(obj); + + stub = kmalloc(sizeof(*stub), GFP_KERNEL); + if (!stub) + return NULL; + + i915_sw_fence_init(&stub->chain, stub_notify); + dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock, + to_i915(obj->base.dev)->mm.unordered_timeline, + 0); + + if (i915_sw_fence_await_reservation(&stub->chain, + obj->resv, NULL, + true, I915_FENCE_TIMEOUT, + I915_FENCE_GFP) < 0) + goto err; + + reservation_object_add_excl_fence(obj->resv, &stub->dma); + + return &stub->dma; + +err: + stub_release(&stub->dma); + return NULL; +} + +void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj, + struct dma_fence *fence) +{ + struct stub_fence *stub = container_of(fence, typeof(*stub), dma); + + i915_sw_fence_commit(&stub->chain); +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 457e694a5c3f..a6a3452d2b3e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -378,6 +378,8 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct i915_vma *vma; + assert_object_held(obj); + if (!(obj->write_domain & flush_domains)) return; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 8cf082abb0ab..b0488517e945 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -99,16 +99,29 @@ i915_gem_object_put(struct drm_i915_gem_object *obj) __drm_gem_object_put(&obj->base); } +#define assert_object_held(obj) reservation_object_assert_held((obj)->resv) + static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) { reservation_object_lock(obj->resv, NULL); } +static inline int +i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj) +{ + return reservation_object_lock_interruptible(obj->resv, NULL); +} + static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) { reservation_object_unlock(obj->resv); } +struct dma_fence * +i915_gem_object_lock_fence(struct drm_i915_gem_object *obj); +void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj, + struct dma_fence *fence); + static inline void i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) { @@ -372,6 +385,7 @@ static inline void i915_gem_object_finish_access(struct drm_i915_gem_object *obj) { i915_gem_object_unpin_pages(obj); + i915_gem_object_unlock(obj); } static inline struct intel_engine_cs * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index ad662e558dfb..11890e96ed65 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -187,12 +187,13 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) * machine in an unusable condition. */ - mutex_lock(&i915->drm.struct_mutex); for (phase = phases; *phase; phase++) { - list_for_each_entry(obj, *phase, mm.link) + list_for_each_entry(obj, *phase, mm.link) { + i915_gem_object_lock(obj); WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); + i915_gem_object_unlock(obj); + } } - mutex_unlock(&i915->drm.struct_mutex); intel_uc_sanitize(i915); i915_gem_sanitize(i915); diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 7b437f06a9be..465e0e1d4aa3 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -960,10 +960,6 @@ static int gpu_write(struct i915_vma *vma, GEM_BUG_ON(!intel_engine_can_store_dword(engine)); - err = i915_gem_object_set_to_gtt_domain(vma->obj, true); - if (err) - return err; - batch = gpu_write_dw(vma, dword * sizeof(u32), value); if (IS_ERR(batch)) return PTR_ERR(batch); @@ -974,13 +970,19 @@ static int gpu_write(struct i915_vma *vma, goto err_batch; } + i915_vma_lock(batch); err = i915_vma_move_to_active(batch, rq, 0); + i915_vma_unlock(batch); if (err) goto err_request; i915_gem_object_set_active_reference(batch->obj); - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_lock(vma); + err = i915_gem_object_set_to_gtt_domain(vma->obj, false); + if (err == 0) + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(vma); if (err) goto err_request; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index 5495875b48b3..b5c5dd034d5c 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -78,7 +78,9 @@ static int gtt_set(struct drm_i915_gem_object *obj, u32 __iomem *map; int err; + i915_gem_object_lock(obj); err = i915_gem_object_set_to_gtt_domain(obj, true); + i915_gem_object_unlock(obj); if (err) return err; @@ -105,7 +107,9 @@ static int gtt_get(struct drm_i915_gem_object *obj, u32 __iomem *map; int err; + i915_gem_object_lock(obj); err = i915_gem_object_set_to_gtt_domain(obj, false); + i915_gem_object_unlock(obj); if (err) return err; @@ -131,7 +135,9 @@ static int wc_set(struct drm_i915_gem_object *obj, u32 *map; int err; + i915_gem_object_lock(obj); err = i915_gem_object_set_to_wc_domain(obj, true); + i915_gem_object_unlock(obj); if (err) return err; @@ -152,7 +158,9 @@ static int wc_get(struct drm_i915_gem_object *obj, u32 *map; int err; + i915_gem_object_lock(obj); err = i915_gem_object_set_to_wc_domain(obj, false); + i915_gem_object_unlock(obj); if (err) return err; @@ -176,7 +184,9 @@ static int gpu_set(struct drm_i915_gem_object *obj, u32 *cs; int err; + i915_gem_object_lock(obj); err = i915_gem_object_set_to_gtt_domain(obj, true); + i915_gem_object_unlock(obj); if (err) return err; @@ -215,7 +225,9 @@ static int gpu_set(struct drm_i915_gem_object *obj, } intel_ring_advance(rq, cs); + i915_vma_lock(vma); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(vma); i915_vma_unpin(vma); i915_request_add(rq); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 653ae08a277f..72eedd6c2a0a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -209,7 +209,9 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); + i915_gem_object_lock(obj); err = i915_gem_object_set_to_gtt_domain(obj, false); + i915_gem_object_unlock(obj); if (err) goto err; @@ -261,7 +263,9 @@ static int gpu_fill(struct drm_i915_gem_object *obj, if (IS_ERR(vma)) return PTR_ERR(vma); + i915_gem_object_lock(obj); err = i915_gem_object_set_to_gtt_domain(obj, false); + i915_gem_object_unlock(obj); if (err) return err; @@ -302,11 +306,15 @@ static int gpu_fill(struct drm_i915_gem_object *obj, if (err) goto err_request; + i915_vma_lock(batch); err = i915_vma_move_to_active(batch, rq, 0); + i915_vma_unlock(batch); if (err) goto skip_request; + i915_vma_lock(vma); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(vma); if (err) goto skip_request; @@ -754,7 +762,9 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, if (IS_ERR(vma)) return PTR_ERR(vma); + i915_gem_object_lock(obj); err = i915_gem_object_set_to_gtt_domain(obj, false); + i915_gem_object_unlock(obj); if (err) return err; @@ -780,11 +790,15 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, if (err) goto err_request; + i915_vma_lock(batch); err = i915_vma_move_to_active(batch, rq, 0); + i915_vma_unlock(batch); if (err) goto skip_request; + i915_vma_lock(vma); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(vma); if (err) goto skip_request; @@ -1345,7 +1359,9 @@ static int write_to_scratch(struct i915_gem_context *ctx, if (err) goto err_request; + i915_vma_lock(vma); err = i915_vma_move_to_active(vma, rq, 0); + i915_vma_unlock(vma); if (err) goto skip_request; @@ -1440,7 +1456,9 @@ static int read_from_scratch(struct i915_gem_context *ctx, if (err) goto err_request; + i915_vma_lock(vma); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(vma); if (err) goto skip_request; @@ -1449,7 +1467,9 @@ static int read_from_scratch(struct i915_gem_context *ctx, i915_request_add(rq); + i915_gem_object_lock(obj); err = i915_gem_object_set_to_cpu_domain(obj, false); + i915_gem_object_unlock(obj); if (err) goto err; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 12c90d8fe0fb..297f8864d392 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -110,7 +110,9 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, GEM_BUG_ON(view.partial.size > nreal); cond_resched(); + i915_gem_object_lock(obj); err = i915_gem_object_set_to_gtt_domain(obj, true); + i915_gem_object_unlock(obj); if (err) { pr_err("Failed to flush to GTT write domain; err=%d\n", err); @@ -142,7 +144,9 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, if (offset >= obj->base.size) continue; + i915_gem_object_lock(obj); i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); + i915_gem_object_unlock(obj); p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); cpu = kmap(p) + offset_in_page(offset); @@ -344,7 +348,9 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) return PTR_ERR(rq); } + i915_vma_lock(vma); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(vma); i915_request_add(rq); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c index ed64012e5d24..94a15e3f6db8 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c @@ -46,9 +46,9 @@ static int mock_phys_object(void *arg) } /* Make the object dirty so that put_pages must do copy back the data */ - mutex_lock(&i915->drm.struct_mutex); + i915_gem_object_lock(obj); err = i915_gem_object_set_to_gtt_domain(obj, true); - mutex_unlock(&i915->drm.struct_mutex); + i915_gem_object_unlock(obj); if (err) { pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n", err); diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 690d77f5ecf6..c3fa10fd9383 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -117,7 +117,9 @@ static int move_to_active(struct i915_vma *vma, { int err; + i915_vma_lock(vma); err = i915_vma_move_to_active(vma, rq, flags); + i915_vma_unlock(vma); if (err) return err; @@ -1252,7 +1254,9 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, } } + i915_vma_lock(arg.vma); err = i915_vma_move_to_active(arg.vma, rq, flags); + i915_vma_unlock(arg.vma); if (flags & EXEC_OBJECT_NEEDS_FENCE) i915_vma_unpin_fence(arg.vma); diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index dfacc46ae7d3..d162b0d3ec8e 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -1108,11 +1108,13 @@ static int smoke_submit(struct preempt_smoke *smoke, } if (vma) { + i915_vma_lock(vma); err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); if (!err) err = i915_vma_move_to_active(vma, rq, 0); + i915_vma_unlock(vma); } i915_request_add(rq); diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 9040cae38fc5..38a69acf60ae 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -118,7 +118,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) goto err_pin; } + i915_vma_lock(vma); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(vma); if (err) goto err_req; @@ -195,8 +197,10 @@ static int check_whitelist(struct i915_gem_context *ctx, return PTR_ERR(results); err = 0; + i915_gem_object_lock(results); igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */ err = i915_gem_object_set_to_cpu_domain(results, false); + i915_gem_object_unlock(results); if (i915_terminally_wedged(ctx->i915)) err = -EIO; if (err) @@ -367,7 +371,9 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx) if (err) goto err_obj; + i915_gem_object_lock(obj); err = i915_gem_object_set_to_wc_domain(obj, true); + i915_gem_object_unlock(obj); if (err) goto err_obj; diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index e3608b170105..75cb98da6cc8 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -2844,7 +2844,9 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) goto put_obj; } + i915_gem_object_lock(obj); ret = i915_gem_object_set_to_cpu_domain(obj, false); + i915_gem_object_unlock(obj); if (ret) { gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n"); goto unmap_src; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index d66bf77f55fd..8a00e2d0c81c 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -509,18 +509,18 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) } ret = i915_gem_object_set_to_gtt_domain(bb->obj, - false); + false); if (ret) goto err; - i915_gem_object_finish_access(bb->obj); - bb->accessing = false; - ret = i915_vma_move_to_active(bb->vma, workload->req, 0); if (ret) goto err; + + i915_gem_object_finish_access(bb->obj); + bb->accessing = false; } } return 0; diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index c893bd4eb2c8..a28bcd2d7c09 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1058,19 +1058,20 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, void *dst, *src; int ret; - ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush); + ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush); if (ret) return ERR_PTR(ret); - ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush); - if (ret) { - dst = ERR_PTR(ret); - goto unpin_src; - } - dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB); + i915_gem_object_finish_access(dst_obj); if (IS_ERR(dst)) - goto unpin_dst; + return dst; + + ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush); + if (ret) { + i915_gem_object_unpin_map(dst_obj); + return ERR_PTR(ret); + } src = ERR_PTR(-ENODEV); if (src_needs_clflush && @@ -1116,13 +1117,11 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, } } + i915_gem_object_finish_access(src_obj); + /* dst_obj is returned with vmap pinned */ *needs_clflush_after = dst_needs_clflush & CLFLUSH_AFTER; -unpin_dst: - i915_gem_object_finish_access(dst_obj); -unpin_src: - i915_gem_object_finish_access(src_obj); return dst; } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index cfe0d9ff4e5d..993489d0fb2e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -104,19 +104,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj) { struct i915_vma *vma; LIST_HEAD(still_in_list); - int ret; + int ret = 0; lockdep_assert_held(&obj->base.dev->struct_mutex); - /* Closed vma are removed from the obj->vma_list - but they may - * still have an active binding on the object. To remove those we - * must wait for all rendering to complete to the object (as unbinding - * must anyway), and retire the requests. - */ - ret = i915_gem_object_set_to_cpu_domain(obj, false); - if (ret) - return ret; - spin_lock(&obj->vma.lock); while (!ret && (vma = list_first_entry_or_null(&obj->vma.list, struct i915_vma, @@ -139,29 +130,17 @@ i915_gem_object_wait_fence(struct dma_fence *fence, unsigned int flags, long timeout) { - struct i915_request *rq; - BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return timeout; - if (!dma_fence_is_i915(fence)) - return dma_fence_wait_timeout(fence, - flags & I915_WAIT_INTERRUPTIBLE, - timeout); + if (dma_fence_is_i915(fence)) + return i915_request_wait(to_request(fence), flags, timeout); - rq = to_request(fence); - if (i915_request_completed(rq)) - goto out; - - timeout = i915_request_wait(rq, flags, timeout); - -out: - if (flags & I915_WAIT_LOCKED && i915_request_completed(rq)) - i915_request_retire_upto(rq); - - return timeout; + return dma_fence_wait_timeout(fence, + flags & I915_WAIT_INTERRUPTIBLE, + timeout); } static long @@ -487,21 +466,22 @@ static int i915_gem_shmem_pread(struct drm_i915_gem_object *obj, struct drm_i915_gem_pread *args) { - char __user *user_data; - u64 remain; unsigned int needs_clflush; unsigned int idx, offset; + struct dma_fence *fence; + char __user *user_data; + u64 remain; int ret; - ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex); - if (ret) - return ret; - ret = i915_gem_object_prepare_read(obj, &needs_clflush); - mutex_unlock(&obj->base.dev->struct_mutex); if (ret) return ret; + fence = i915_gem_object_lock_fence(obj); + i915_gem_object_finish_access(obj); + if (!fence) + return -ENOMEM; + remain = args->size; user_data = u64_to_user_ptr(args->data_ptr); offset = offset_in_page(args->offset); @@ -519,7 +499,7 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj, offset = 0; } - i915_gem_object_finish_access(obj); + i915_gem_object_unlock_fence(obj, fence); return ret; } @@ -555,8 +535,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, struct i915_ggtt *ggtt = &i915->ggtt; intel_wakeref_t wakeref; struct drm_mm_node node; - struct i915_vma *vma; + struct dma_fence *fence; void __user *user_data; + struct i915_vma *vma; u64 remain, offset; int ret; @@ -585,11 +566,24 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, GEM_BUG_ON(!node.allocated); } - ret = i915_gem_object_set_to_gtt_domain(obj, false); + mutex_unlock(&i915->drm.struct_mutex); + + ret = i915_gem_object_lock_interruptible(obj); if (ret) goto out_unpin; - mutex_unlock(&i915->drm.struct_mutex); + ret = i915_gem_object_set_to_gtt_domain(obj, false); + if (ret) { + i915_gem_object_unlock(obj); + goto out_unpin; + } + + fence = i915_gem_object_lock_fence(obj); + i915_gem_object_unlock(obj); + if (!fence) { + ret = -ENOMEM; + goto out_unpin; + } user_data = u64_to_user_ptr(args->data_ptr); remain = args->size; @@ -627,8 +621,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, offset += page_length; } - mutex_lock(&i915->drm.struct_mutex); + i915_gem_object_unlock_fence(obj, fence); out_unpin: + mutex_lock(&i915->drm.struct_mutex); if (node.allocated) { wmb(); ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); @@ -739,6 +734,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, struct i915_ggtt *ggtt = &i915->ggtt; intel_wakeref_t wakeref; struct drm_mm_node node; + struct dma_fence *fence; struct i915_vma *vma; u64 remain, offset; void __user *user_data; @@ -786,11 +782,24 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, GEM_BUG_ON(!node.allocated); } - ret = i915_gem_object_set_to_gtt_domain(obj, true); + mutex_unlock(&i915->drm.struct_mutex); + + ret = i915_gem_object_lock_interruptible(obj); if (ret) goto out_unpin; - mutex_unlock(&i915->drm.struct_mutex); + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) { + i915_gem_object_unlock(obj); + goto out_unpin; + } + + fence = i915_gem_object_lock_fence(obj); + i915_gem_object_unlock(obj); + if (!fence) { + ret = -ENOMEM; + goto out_unpin; + } intel_fb_obj_invalidate(obj, ORIGIN_CPU); @@ -835,8 +844,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, } intel_fb_obj_flush(obj, ORIGIN_CPU); - mutex_lock(&i915->drm.struct_mutex); + i915_gem_object_unlock_fence(obj, fence); out_unpin: + mutex_lock(&i915->drm.struct_mutex); if (node.allocated) { wmb(); ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); @@ -882,23 +892,23 @@ static int i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *args) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - void __user *user_data; - u64 remain; unsigned int partial_cacheline_write; unsigned int needs_clflush; unsigned int offset, idx; + struct dma_fence *fence; + void __user *user_data; + u64 remain; int ret; - ret = mutex_lock_interruptible(&i915->drm.struct_mutex); - if (ret) - return ret; - ret = i915_gem_object_prepare_write(obj, &needs_clflush); - mutex_unlock(&i915->drm.struct_mutex); if (ret) return ret; + fence = i915_gem_object_lock_fence(obj); + i915_gem_object_finish_access(obj); + if (!fence) + return -ENOMEM; + /* If we don't overwrite a cacheline completely we need to be * careful to have up-to-date data by first clflushing. Don't * overcomplicate things and flush the entire patch. @@ -926,7 +936,8 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, } intel_fb_obj_flush(obj, ORIGIN_CPU); - i915_gem_object_finish_access(obj); + i915_gem_object_unlock_fence(obj, fence); + return ret; } @@ -1805,7 +1816,9 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) if (err) goto err_active; + i915_gem_object_lock(state->obj); err = i915_gem_object_set_to_cpu_domain(state->obj, false); + i915_gem_object_unlock(state->obj); if (err) goto err_active; @@ -2256,12 +2269,13 @@ int i915_gem_freeze_late(struct drm_i915_private *i915) i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND); i915_gem_drain_freed_objects(i915); - mutex_lock(&i915->drm.struct_mutex); for (phase = phases; *phase; phase++) { - list_for_each_entry(obj, *phase, mm.link) + list_for_each_entry(obj, *phase, mm.link) { + i915_gem_object_lock(obj); WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true)); + i915_gem_object_unlock(obj); + } } - mutex_unlock(&i915->drm.struct_mutex); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index ccc4bdf356c1..7496cce0d798 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3578,8 +3578,11 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) WARN_ON(i915_vma_bind(vma, obj ? obj->cache_level : 0, PIN_UPDATE)); - if (obj) + if (obj) { + i915_gem_object_lock(obj); WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); + i915_gem_object_unlock(obj); + } lock: mutex_lock(&ggtt->vm.mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index f3b42b026fff..706ed71468e8 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -222,7 +222,9 @@ int i915_gem_render_state_emit(struct i915_request *rq) goto err_unpin; } + i915_vma_lock(so.vma); err = i915_vma_move_to_active(so.vma, rq, 0); + i915_vma_unlock(so.vma); err_unpin: i915_vma_unpin(so.vma); err_vma: diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index cf405ffda045..db94d7b6c5a6 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -840,13 +840,14 @@ void i915_vma_destroy(struct i915_vma *vma) { lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); - GEM_BUG_ON(i915_vma_is_active(vma)); GEM_BUG_ON(i915_vma_is_pinned(vma)); if (i915_vma_is_closed(vma)) list_del(&vma->closed_link); WARN_ON(i915_vma_unbind(vma)); + GEM_BUG_ON(i915_vma_is_active(vma)); + __i915_vma_destroy(vma); } @@ -908,12 +909,10 @@ static void export_fence(struct i915_vma *vma, * handle an error right now. Worst case should be missed * synchronisation leading to rendering corruption. */ - reservation_object_lock(resv, NULL); if (flags & EXEC_OBJECT_WRITE) reservation_object_add_excl_fence(resv, &rq->fence); else if (reservation_object_reserve_shared(resv, 1) == 0) reservation_object_add_shared_fence(resv, &rq->fence); - reservation_object_unlock(resv); } int i915_vma_move_to_active(struct i915_vma *vma, @@ -922,7 +921,8 @@ int i915_vma_move_to_active(struct i915_vma *vma, { struct drm_i915_gem_object *obj = vma->obj; - lockdep_assert_held(&rq->i915->drm.struct_mutex); + assert_vma_held(vma); + assert_object_held(obj); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); /* diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 754a762d90b4..2657c99fe187 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -298,6 +298,18 @@ void i915_vma_close(struct i915_vma *vma); void i915_vma_reopen(struct i915_vma *vma); void i915_vma_destroy(struct i915_vma *vma); +#define assert_vma_held(vma) reservation_object_assert_held((vma)->resv) + +static inline void i915_vma_lock(struct i915_vma *vma) +{ + reservation_object_lock(vma->resv, NULL); +} + +static inline void i915_vma_unlock(struct i915_vma *vma) +{ + reservation_object_unlock(vma->resv); +} + int __i915_vma_do_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags); static inline int __must_check diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f454cf2450b5..0e3abc7f65e3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2113,6 +2113,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, * pin/unpin/fence and not more. */ wakeref = intel_runtime_pm_get(dev_priv); + i915_gem_object_lock(obj); atomic_inc(&dev_priv->gpu_error.pending_fb_pin); @@ -2167,6 +2168,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, err: atomic_dec(&dev_priv->gpu_error.pending_fb_pin); + i915_gem_object_unlock(obj); intel_runtime_pm_put(dev_priv, wakeref); return vma; } @@ -2175,9 +2177,12 @@ void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) { lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); + i915_gem_object_lock(vma->obj); if (flags & PLANE_HAS_FENCE) i915_vma_unpin_fence(vma); i915_gem_object_unpin_from_display_plane(vma); + i915_gem_object_unlock(vma->obj); + i915_vma_put(vma); } diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index 7146524264dd..67eadc82c396 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -343,8 +343,6 @@ static void capture_logs_work(struct work_struct *work) static int guc_log_map(struct intel_guc_log *log) { - struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *dev_priv = guc_to_i915(guc); void *vaddr; int ret; @@ -353,9 +351,9 @@ static int guc_log_map(struct intel_guc_log *log) if (!log->vma) return -ENODEV; - mutex_lock(&dev_priv->drm.struct_mutex); + i915_gem_object_lock(log->vma->obj); ret = i915_gem_object_set_to_wc_domain(log->vma->obj, true); - mutex_unlock(&dev_priv->drm.struct_mutex); + i915_gem_object_unlock(log->vma->obj); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 80dcd879fc58..a2ac06a08715 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -765,8 +765,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, atomic_inc(&dev_priv->gpu_error.pending_fb_pin); + i915_gem_object_lock(new_bo); vma = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, PIN_MAPPABLE); + i915_gem_object_unlock(new_bo); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out_pin_section; @@ -1305,15 +1307,20 @@ out_unlock: static int get_registers(struct intel_overlay *overlay, bool use_phys) { + struct drm_i915_private *i915 = overlay->i915; struct drm_i915_gem_object *obj; struct i915_vma *vma; int err; - obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE); + mutex_lock(&i915->drm.struct_mutex); + + obj = i915_gem_object_create_stolen(i915, PAGE_SIZE); if (obj == NULL) - obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_unlock; + } vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) { @@ -1334,10 +1341,13 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys) } overlay->reg_bo = obj; + mutex_unlock(&i915->drm.struct_mutex); return 0; err_put_bo: i915_gem_object_put(obj); +err_unlock: + mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -1363,18 +1373,16 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) INIT_ACTIVE_REQUEST(&overlay->last_flip); - mutex_lock(&dev_priv->drm.struct_mutex); - ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv)); if (ret) goto out_free; + i915_gem_object_lock(overlay->reg_bo); ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true); + i915_gem_object_unlock(overlay->reg_bo); if (ret) goto out_reg_bo; - mutex_unlock(&dev_priv->drm.struct_mutex); - memset_io(overlay->regs, 0, sizeof(struct overlay_registers)); update_polyphase_filter(overlay->regs); update_reg_attrs(overlay, overlay->regs); @@ -1386,7 +1394,6 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) out_reg_bo: i915_gem_object_put(overlay->reg_bo); out_free: - mutex_unlock(&dev_priv->drm.struct_mutex); kfree(overlay); } diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c index ec1e8c4deb4d..f342ddd47df8 100644 --- a/drivers/gpu/drm/i915/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/intel_uc_fw.c @@ -246,15 +246,13 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, intel_uc_fw_type_repr(uc_fw->type), intel_uc_fw_status_repr(uc_fw->load_status)); - intel_uc_fw_ggtt_bind(uc_fw); - /* Call custom loader */ + intel_uc_fw_ggtt_bind(uc_fw); err = xfer(uc_fw); + intel_uc_fw_ggtt_unbind(uc_fw); if (err) goto fail; - intel_uc_fw_ggtt_unbind(uc_fw); - uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS; DRM_DEBUG_DRIVER("%s fw load %s\n", intel_uc_fw_type_repr(uc_fw->type), diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 4fd5356c6577..2c5479ca1f69 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -874,7 +874,9 @@ static int live_all_engines(void *arg) i915_gem_object_set_active_reference(batch->obj); } + i915_vma_lock(batch); err = i915_vma_move_to_active(batch, request[id], 0); + i915_vma_unlock(batch); GEM_BUG_ON(err); i915_request_get(request[id]); @@ -989,7 +991,9 @@ static int live_sequential_engines(void *arg) GEM_BUG_ON(err); request[id]->batch = batch; + i915_vma_lock(batch); err = i915_vma_move_to_active(batch, request[id], 0); + i915_vma_unlock(batch); GEM_BUG_ON(err); i915_gem_object_set_active_reference(batch->obj); diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 6919207883f6..6f3e41c0cb3f 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -886,7 +886,9 @@ static int igt_vma_remapped_gtt(void *arg) unsigned int x, y; int err; + i915_gem_object_lock(obj); err = i915_gem_object_set_to_gtt_domain(obj, true); + i915_gem_object_unlock(obj); if (err) goto out; diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 38d6f1b10c54..15c0f0af9658 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -76,7 +76,9 @@ static int move_to_active(struct i915_vma *vma, { int err; + i915_vma_lock(vma); err = i915_vma_move_to_active(vma, rq, flags); + i915_vma_unlock(vma); if (err) return err; -- cgit v1.2.3 From d45a1a533499d3ef1f3d9b750de93306f0c3dbe8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:52 +0100 Subject: drm/i915: Move GEM object waiting to its own file Continuing the decluttering of i915_gem.c by moving the object wait decomposition into its own file. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-11-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gem/i915_gem_object.h | 8 + drivers/gpu/drm/i915/gem/i915_gem_wait.c | 277 +++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 7 - drivers/gpu/drm/i915/i915_gem.c | 254 -------------------------- drivers/gpu/drm/i915/i915_utils.h | 10 -- 6 files changed, 286 insertions(+), 271 deletions(-) create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_wait.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 44d6e0bcf6a6..a4ea6f6b2ab7 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -105,6 +105,7 @@ gem-y += \ gem/i915_gem_stolen.o \ gem/i915_gem_tiling.o \ gem/i915_gem_userptr.o \ + gem/i915_gem_wait.o \ gem/i915_gemfs.o i915-y += \ $(gem-y) \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index b0488517e945..5233ec3a056d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -441,4 +441,12 @@ static inline void __start_cpu_write(struct drm_i915_gem_object *obj) obj->cache_dirty = true; } +int i915_gem_object_wait(struct drm_i915_gem_object *obj, + unsigned int flags, + long timeout); +int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, + unsigned int flags, + const struct i915_sched_attr *attr); +#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX) + #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c new file mode 100644 index 000000000000..fed5c751ef37 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -0,0 +1,277 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#include +#include + +#include "gt/intel_engine.h" + +#include "i915_gem_ioctls.h" +#include "i915_gem_object.h" + +static long +i915_gem_object_wait_fence(struct dma_fence *fence, + unsigned int flags, + long timeout) +{ + BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); + + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return timeout; + + if (dma_fence_is_i915(fence)) + return i915_request_wait(to_request(fence), flags, timeout); + + return dma_fence_wait_timeout(fence, + flags & I915_WAIT_INTERRUPTIBLE, + timeout); +} + +static long +i915_gem_object_wait_reservation(struct reservation_object *resv, + unsigned int flags, + long timeout) +{ + unsigned int seq = __read_seqcount_begin(&resv->seq); + struct dma_fence *excl; + bool prune_fences = false; + + if (flags & I915_WAIT_ALL) { + struct dma_fence **shared; + unsigned int count, i; + int ret; + + ret = reservation_object_get_fences_rcu(resv, + &excl, &count, &shared); + if (ret) + return ret; + + for (i = 0; i < count; i++) { + timeout = i915_gem_object_wait_fence(shared[i], + flags, timeout); + if (timeout < 0) + break; + + dma_fence_put(shared[i]); + } + + for (; i < count; i++) + dma_fence_put(shared[i]); + kfree(shared); + + /* + * If both shared fences and an exclusive fence exist, + * then by construction the shared fences must be later + * than the exclusive fence. If we successfully wait for + * all the shared fences, we know that the exclusive fence + * must all be signaled. If all the shared fences are + * signaled, we can prune the array and recover the + * floating references on the fences/requests. + */ + prune_fences = count && timeout >= 0; + } else { + excl = reservation_object_get_excl_rcu(resv); + } + + if (excl && timeout >= 0) + timeout = i915_gem_object_wait_fence(excl, flags, timeout); + + dma_fence_put(excl); + + /* + * Opportunistically prune the fences iff we know they have *all* been + * signaled and that the reservation object has not been changed (i.e. + * no new fences have been added). + */ + if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) { + if (reservation_object_trylock(resv)) { + if (!__read_seqcount_retry(&resv->seq, seq)) + reservation_object_add_excl_fence(resv, NULL); + reservation_object_unlock(resv); + } + } + + return timeout; +} + +static void __fence_set_priority(struct dma_fence *fence, + const struct i915_sched_attr *attr) +{ + struct i915_request *rq; + struct intel_engine_cs *engine; + + if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence)) + return; + + rq = to_request(fence); + engine = rq->engine; + + local_bh_disable(); + rcu_read_lock(); /* RCU serialisation for set-wedged protection */ + if (engine->schedule) + engine->schedule(rq, attr); + rcu_read_unlock(); + local_bh_enable(); /* kick the tasklets if queues were reprioritised */ +} + +static void fence_set_priority(struct dma_fence *fence, + const struct i915_sched_attr *attr) +{ + /* Recurse once into a fence-array */ + if (dma_fence_is_array(fence)) { + struct dma_fence_array *array = to_dma_fence_array(fence); + int i; + + for (i = 0; i < array->num_fences; i++) + __fence_set_priority(array->fences[i], attr); + } else { + __fence_set_priority(fence, attr); + } +} + +int +i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, + unsigned int flags, + const struct i915_sched_attr *attr) +{ + struct dma_fence *excl; + + if (flags & I915_WAIT_ALL) { + struct dma_fence **shared; + unsigned int count, i; + int ret; + + ret = reservation_object_get_fences_rcu(obj->resv, + &excl, &count, &shared); + if (ret) + return ret; + + for (i = 0; i < count; i++) { + fence_set_priority(shared[i], attr); + dma_fence_put(shared[i]); + } + + kfree(shared); + } else { + excl = reservation_object_get_excl_rcu(obj->resv); + } + + if (excl) { + fence_set_priority(excl, attr); + dma_fence_put(excl); + } + return 0; +} + +/** + * Waits for rendering to the object to be completed + * @obj: i915 gem object + * @flags: how to wait (under a lock, for all rendering or just for writes etc) + * @timeout: how long to wait + */ +int +i915_gem_object_wait(struct drm_i915_gem_object *obj, + unsigned int flags, + long timeout) +{ + might_sleep(); + GEM_BUG_ON(timeout < 0); + + timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout); + return timeout < 0 ? timeout : 0; +} + +static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) +{ + /* nsecs_to_jiffies64() does not guard against overflow */ + if (NSEC_PER_SEC % HZ && + div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) + return MAX_JIFFY_OFFSET; + + return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); +} + +static unsigned long to_wait_timeout(s64 timeout_ns) +{ + if (timeout_ns < 0) + return MAX_SCHEDULE_TIMEOUT; + + if (timeout_ns == 0) + return 0; + + return nsecs_to_jiffies_timeout(timeout_ns); +} + +/** + * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT + * @dev: drm device pointer + * @data: ioctl data blob + * @file: drm file pointer + * + * Returns 0 if successful, else an error is returned with the remaining time in + * the timeout parameter. + * -ETIME: object is still busy after timeout + * -ERESTARTSYS: signal interrupted the wait + * -ENONENT: object doesn't exist + * Also possible, but rare: + * -EAGAIN: incomplete, restart syscall + * -ENOMEM: damn + * -ENODEV: Internal IRQ fail + * -E?: The add request failed + * + * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any + * non-zero timeout parameter the wait ioctl will wait for the given number of + * nanoseconds on an object becoming unbusy. Since the wait itself does so + * without holding struct_mutex the object may become re-busied before this + * function completes. A similar but shorter * race condition exists in the busy + * ioctl + */ +int +i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct drm_i915_gem_wait *args = data; + struct drm_i915_gem_object *obj; + ktime_t start; + long ret; + + if (args->flags != 0) + return -EINVAL; + + obj = i915_gem_object_lookup(file, args->bo_handle); + if (!obj) + return -ENOENT; + + start = ktime_get(); + + ret = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_PRIORITY | + I915_WAIT_ALL, + to_wait_timeout(args->timeout_ns)); + + if (args->timeout_ns > 0) { + args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); + if (args->timeout_ns < 0) + args->timeout_ns = 0; + + /* + * Apparently ktime isn't accurate enough and occasionally has a + * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch + * things up to make the test happy. We allow up to 1 jiffy. + * + * This is a regression from the timespec->ktime conversion. + */ + if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) + args->timeout_ns = 0; + + /* Asked to wait beyond the jiffie/scheduler precision? */ + if (ret == -ETIME && args->timeout_ns) + ret = -EAGAIN; + } + + i915_gem_object_put(obj); + return ret; +} diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e26d622b1e41..652b5edefaae 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2758,13 +2758,6 @@ void i915_gem_suspend(struct drm_i915_private *dev_priv); void i915_gem_suspend_late(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv); vm_fault_t i915_gem_fault(struct vm_fault *vmf); -int i915_gem_object_wait(struct drm_i915_gem_object *obj, - unsigned int flags, - long timeout); -int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, - unsigned int flags, - const struct i915_sched_attr *attr); -#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX) int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 993489d0fb2e..a460563e4bb5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -125,178 +125,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj) return ret; } -static long -i915_gem_object_wait_fence(struct dma_fence *fence, - unsigned int flags, - long timeout) -{ - BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); - - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - return timeout; - - if (dma_fence_is_i915(fence)) - return i915_request_wait(to_request(fence), flags, timeout); - - return dma_fence_wait_timeout(fence, - flags & I915_WAIT_INTERRUPTIBLE, - timeout); -} - -static long -i915_gem_object_wait_reservation(struct reservation_object *resv, - unsigned int flags, - long timeout) -{ - unsigned int seq = __read_seqcount_begin(&resv->seq); - struct dma_fence *excl; - bool prune_fences = false; - - if (flags & I915_WAIT_ALL) { - struct dma_fence **shared; - unsigned int count, i; - int ret; - - ret = reservation_object_get_fences_rcu(resv, - &excl, &count, &shared); - if (ret) - return ret; - - for (i = 0; i < count; i++) { - timeout = i915_gem_object_wait_fence(shared[i], - flags, timeout); - if (timeout < 0) - break; - - dma_fence_put(shared[i]); - } - - for (; i < count; i++) - dma_fence_put(shared[i]); - kfree(shared); - - /* - * If both shared fences and an exclusive fence exist, - * then by construction the shared fences must be later - * than the exclusive fence. If we successfully wait for - * all the shared fences, we know that the exclusive fence - * must all be signaled. If all the shared fences are - * signaled, we can prune the array and recover the - * floating references on the fences/requests. - */ - prune_fences = count && timeout >= 0; - } else { - excl = reservation_object_get_excl_rcu(resv); - } - - if (excl && timeout >= 0) - timeout = i915_gem_object_wait_fence(excl, flags, timeout); - - dma_fence_put(excl); - - /* - * Opportunistically prune the fences iff we know they have *all* been - * signaled and that the reservation object has not been changed (i.e. - * no new fences have been added). - */ - if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) { - if (reservation_object_trylock(resv)) { - if (!__read_seqcount_retry(&resv->seq, seq)) - reservation_object_add_excl_fence(resv, NULL); - reservation_object_unlock(resv); - } - } - - return timeout; -} - -static void __fence_set_priority(struct dma_fence *fence, - const struct i915_sched_attr *attr) -{ - struct i915_request *rq; - struct intel_engine_cs *engine; - - if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence)) - return; - - rq = to_request(fence); - engine = rq->engine; - - local_bh_disable(); - rcu_read_lock(); /* RCU serialisation for set-wedged protection */ - if (engine->schedule) - engine->schedule(rq, attr); - rcu_read_unlock(); - local_bh_enable(); /* kick the tasklets if queues were reprioritised */ -} - -static void fence_set_priority(struct dma_fence *fence, - const struct i915_sched_attr *attr) -{ - /* Recurse once into a fence-array */ - if (dma_fence_is_array(fence)) { - struct dma_fence_array *array = to_dma_fence_array(fence); - int i; - - for (i = 0; i < array->num_fences; i++) - __fence_set_priority(array->fences[i], attr); - } else { - __fence_set_priority(fence, attr); - } -} - -int -i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, - unsigned int flags, - const struct i915_sched_attr *attr) -{ - struct dma_fence *excl; - - if (flags & I915_WAIT_ALL) { - struct dma_fence **shared; - unsigned int count, i; - int ret; - - ret = reservation_object_get_fences_rcu(obj->resv, - &excl, &count, &shared); - if (ret) - return ret; - - for (i = 0; i < count; i++) { - fence_set_priority(shared[i], attr); - dma_fence_put(shared[i]); - } - - kfree(shared); - } else { - excl = reservation_object_get_excl_rcu(obj->resv); - } - - if (excl) { - fence_set_priority(excl, attr); - dma_fence_put(excl); - } - return 0; -} - -/** - * Waits for rendering to the object to be completed - * @obj: i915 gem object - * @flags: how to wait (under a lock, for all rendering or just for writes etc) - * @timeout: how long to wait - */ -int -i915_gem_object_wait(struct drm_i915_gem_object *obj, - unsigned int flags, - long timeout) -{ - might_sleep(); - GEM_BUG_ON(timeout < 0); - - timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout); - return timeout < 0 ? timeout : 0; -} - static int i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -1097,88 +925,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) } } -static unsigned long to_wait_timeout(s64 timeout_ns) -{ - if (timeout_ns < 0) - return MAX_SCHEDULE_TIMEOUT; - - if (timeout_ns == 0) - return 0; - - return nsecs_to_jiffies_timeout(timeout_ns); -} - -/** - * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT - * @dev: drm device pointer - * @data: ioctl data blob - * @file: drm file pointer - * - * Returns 0 if successful, else an error is returned with the remaining time in - * the timeout parameter. - * -ETIME: object is still busy after timeout - * -ERESTARTSYS: signal interrupted the wait - * -ENONENT: object doesn't exist - * Also possible, but rare: - * -EAGAIN: incomplete, restart syscall - * -ENOMEM: damn - * -ENODEV: Internal IRQ fail - * -E?: The add request failed - * - * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any - * non-zero timeout parameter the wait ioctl will wait for the given number of - * nanoseconds on an object becoming unbusy. Since the wait itself does so - * without holding struct_mutex the object may become re-busied before this - * function completes. A similar but shorter * race condition exists in the busy - * ioctl - */ -int -i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) -{ - struct drm_i915_gem_wait *args = data; - struct drm_i915_gem_object *obj; - ktime_t start; - long ret; - - if (args->flags != 0) - return -EINVAL; - - obj = i915_gem_object_lookup(file, args->bo_handle); - if (!obj) - return -ENOENT; - - start = ktime_get(); - - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_PRIORITY | - I915_WAIT_ALL, - to_wait_timeout(args->timeout_ns)); - - if (args->timeout_ns > 0) { - args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); - if (args->timeout_ns < 0) - args->timeout_ns = 0; - - /* - * Apparently ktime isn't accurate enough and occasionally has a - * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch - * things up to make the test happy. We allow up to 1 jiffy. - * - * This is a regression from the timespec->ktime conversion. - */ - if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) - args->timeout_ns = 0; - - /* Asked to wait beyond the jiffie/scheduler precision? */ - if (ret == -ETIME && args->timeout_ns) - ret = -EAGAIN; - } - - i915_gem_object_put(obj); - return ret; -} - static int wait_for_engines(struct drm_i915_private *i915) { if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) { diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index e52866084891..2987219a6300 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -220,16 +220,6 @@ static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); } -static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) -{ - /* nsecs_to_jiffies64() does not guard against overflow */ - if (NSEC_PER_SEC % HZ && - div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) - return MAX_JIFFY_OFFSET; - - return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); -} - /* * If you need to wait X milliseconds between events A and B, but event B * doesn't happen exactly after event A, you record the timestamp (jiffies) of -- cgit v1.2.3 From 3f43c8767ed7d32a0fcd3f531eda6808e4de4f4e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:53 +0100 Subject: drm/i915: Move GEM object busy checking to its own file Continuing the decluttering of i915_gem.c by moving the object busy checking into its own file. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-12-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gem/i915_gem_busy.c | 138 +++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_gem.c | 128 ---------------------------- 3 files changed, 139 insertions(+), 128 deletions(-) create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_busy.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index a4ea6f6b2ab7..e1ed526b96bf 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -88,6 +88,7 @@ i915-y += $(gt-y) # GEM (Graphics Execution Management) code obj-y += gem/ gem-y += \ + gem/i915_gem_busy.o \ gem/i915_gem_clflush.o \ gem/i915_gem_context.o \ gem/i915_gem_dmabuf.o \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c new file mode 100644 index 000000000000..5a5eda3003e9 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -0,0 +1,138 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2016 Intel Corporation + */ + +#include "gt/intel_engine.h" + +#include "i915_gem_ioctls.h" +#include "i915_gem_object.h" + +static __always_inline u32 __busy_read_flag(u8 id) +{ + if (id == (u8)I915_ENGINE_CLASS_INVALID) + return 0xffff0000u; + + GEM_BUG_ON(id >= 16); + return 0x10000u << id; +} + +static __always_inline u32 __busy_write_id(u8 id) +{ + /* + * The uABI guarantees an active writer is also amongst the read + * engines. This would be true if we accessed the activity tracking + * under the lock, but as we perform the lookup of the object and + * its activity locklessly we can not guarantee that the last_write + * being active implies that we have set the same engine flag from + * last_read - hence we always set both read and write busy for + * last_write. + */ + if (id == (u8)I915_ENGINE_CLASS_INVALID) + return 0xffffffffu; + + return (id + 1) | __busy_read_flag(id); +} + +static __always_inline unsigned int +__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id)) +{ + const struct i915_request *rq; + + /* + * We have to check the current hw status of the fence as the uABI + * guarantees forward progress. We could rely on the idle worker + * to eventually flush us, but to minimise latency just ask the + * hardware. + * + * Note we only report on the status of native fences. + */ + if (!dma_fence_is_i915(fence)) + return 0; + + /* opencode to_request() in order to avoid const warnings */ + rq = container_of(fence, const struct i915_request, fence); + if (i915_request_completed(rq)) + return 0; + + /* Beware type-expansion follies! */ + BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class)); + return flag(rq->engine->uabi_class); +} + +static __always_inline unsigned int +busy_check_reader(const struct dma_fence *fence) +{ + return __busy_set_if_active(fence, __busy_read_flag); +} + +static __always_inline unsigned int +busy_check_writer(const struct dma_fence *fence) +{ + if (!fence) + return 0; + + return __busy_set_if_active(fence, __busy_write_id); +} + +int +i915_gem_busy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_busy *args = data; + struct drm_i915_gem_object *obj; + struct reservation_object_list *list; + unsigned int seq; + int err; + + err = -ENOENT; + rcu_read_lock(); + obj = i915_gem_object_lookup_rcu(file, args->handle); + if (!obj) + goto out; + + /* + * A discrepancy here is that we do not report the status of + * non-i915 fences, i.e. even though we may report the object as idle, + * a call to set-domain may still stall waiting for foreign rendering. + * This also means that wait-ioctl may report an object as busy, + * where busy-ioctl considers it idle. + * + * We trade the ability to warn of foreign fences to report on which + * i915 engines are active for the object. + * + * Alternatively, we can trade that extra information on read/write + * activity with + * args->busy = + * !reservation_object_test_signaled_rcu(obj->resv, true); + * to report the overall busyness. This is what the wait-ioctl does. + * + */ +retry: + seq = raw_read_seqcount(&obj->resv->seq); + + /* Translate the exclusive fence to the READ *and* WRITE engine */ + args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); + + /* Translate shared fences to READ set of engines */ + list = rcu_dereference(obj->resv->fence); + if (list) { + unsigned int shared_count = list->shared_count, i; + + for (i = 0; i < shared_count; ++i) { + struct dma_fence *fence = + rcu_dereference(list->shared[i]); + + args->busy |= busy_check_reader(fence); + } + } + + if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) + goto retry; + + err = 0; +out: + rcu_read_unlock(); + return err; +} diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a460563e4bb5..6af448736030 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1142,134 +1142,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, return vma; } -static __always_inline u32 __busy_read_flag(u8 id) -{ - if (id == (u8)I915_ENGINE_CLASS_INVALID) - return 0xffff0000u; - - GEM_BUG_ON(id >= 16); - return 0x10000u << id; -} - -static __always_inline u32 __busy_write_id(u8 id) -{ - /* - * The uABI guarantees an active writer is also amongst the read - * engines. This would be true if we accessed the activity tracking - * under the lock, but as we perform the lookup of the object and - * its activity locklessly we can not guarantee that the last_write - * being active implies that we have set the same engine flag from - * last_read - hence we always set both read and write busy for - * last_write. - */ - if (id == (u8)I915_ENGINE_CLASS_INVALID) - return 0xffffffffu; - - return (id + 1) | __busy_read_flag(id); -} - -static __always_inline unsigned int -__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id)) -{ - const struct i915_request *rq; - - /* - * We have to check the current hw status of the fence as the uABI - * guarantees forward progress. We could rely on the idle worker - * to eventually flush us, but to minimise latency just ask the - * hardware. - * - * Note we only report on the status of native fences. - */ - if (!dma_fence_is_i915(fence)) - return 0; - - /* opencode to_request() in order to avoid const warnings */ - rq = container_of(fence, const struct i915_request, fence); - if (i915_request_completed(rq)) - return 0; - - /* Beware type-expansion follies! */ - BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class)); - return flag(rq->engine->uabi_class); -} - -static __always_inline unsigned int -busy_check_reader(const struct dma_fence *fence) -{ - return __busy_set_if_active(fence, __busy_read_flag); -} - -static __always_inline unsigned int -busy_check_writer(const struct dma_fence *fence) -{ - if (!fence) - return 0; - - return __busy_set_if_active(fence, __busy_write_id); -} - -int -i915_gem_busy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_gem_busy *args = data; - struct drm_i915_gem_object *obj; - struct reservation_object_list *list; - unsigned int seq; - int err; - - err = -ENOENT; - rcu_read_lock(); - obj = i915_gem_object_lookup_rcu(file, args->handle); - if (!obj) - goto out; - - /* - * A discrepancy here is that we do not report the status of - * non-i915 fences, i.e. even though we may report the object as idle, - * a call to set-domain may still stall waiting for foreign rendering. - * This also means that wait-ioctl may report an object as busy, - * where busy-ioctl considers it idle. - * - * We trade the ability to warn of foreign fences to report on which - * i915 engines are active for the object. - * - * Alternatively, we can trade that extra information on read/write - * activity with - * args->busy = - * !reservation_object_test_signaled_rcu(obj->resv, true); - * to report the overall busyness. This is what the wait-ioctl does. - * - */ -retry: - seq = raw_read_seqcount(&obj->resv->seq); - - /* Translate the exclusive fence to the READ *and* WRITE engine */ - args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); - - /* Translate shared fences to READ set of engines */ - list = rcu_dereference(obj->resv->fence); - if (list) { - unsigned int shared_count = list->shared_count, i; - - for (i = 0; i < shared_count; ++i) { - struct dma_fence *fence = - rcu_dereference(list->shared[i]); - - args->busy |= busy_check_reader(fence); - } - } - - if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) - goto retry; - - err = 0; -out: - rcu_read_unlock(); - return err; -} - int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -- cgit v1.2.3 From 446e2d16a131ed2319ba92d9aa61db5164aa25d2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:54 +0100 Subject: drm/i915: Move GEM client throttling to its own file Continuing the decluttering of i915_gem.c by moving the client self throttling into its own file. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-13-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gem/i915_gem_throttle.c | 73 ++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 6 --- drivers/gpu/drm/i915/i915_gem.c | 58 ---------------------- 4 files changed, 74 insertions(+), 64 deletions(-) create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_throttle.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index e1ed526b96bf..83588e9840f8 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -104,6 +104,7 @@ gem-y += \ gem/i915_gem_shmem.o \ gem/i915_gem_shrinker.o \ gem/i915_gem_stolen.o \ + gem/i915_gem_throttle.o \ gem/i915_gem_tiling.o \ gem/i915_gem_userptr.o \ gem/i915_gem_wait.o \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c new file mode 100644 index 000000000000..adb3074d9ce2 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2016 Intel Corporation + */ + +#include + +#include + +#include "i915_drv.h" +#include "i915_gem_ioctls.h" +#include "i915_gem_object.h" + +/* + * 20ms is a fairly arbitrary limit (greater than the average frame time) + * chosen to prevent the CPU getting more than a frame ahead of the GPU + * (when using lax throttling for the frontbuffer). We also use it to + * offer free GPU waitboosts for severely congested workloads. + */ +#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) + +/* + * Throttle our rendering by waiting until the ring has completed our requests + * emitted over 20 msec ago. + * + * Note that if we were to use the current jiffies each time around the loop, + * we wouldn't escape the function with any frames outstanding if the time to + * render a frame was over 20ms. + * + * This should get us reasonable parallelism between CPU and GPU but also + * relatively low latency when blocking on a particular request to finish. + */ +int +i915_gem_throttle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; + struct i915_request *request, *target = NULL; + long ret; + + /* ABI: return -EIO if already wedged */ + ret = i915_terminally_wedged(to_i915(dev)); + if (ret) + return ret; + + spin_lock(&file_priv->mm.lock); + list_for_each_entry(request, &file_priv->mm.request_list, client_link) { + if (time_after_eq(request->emitted_jiffies, recent_enough)) + break; + + if (target) { + list_del(&target->client_link); + target->file_priv = NULL; + } + + target = request; + } + if (target) + i915_request_get(target); + spin_unlock(&file_priv->mm.lock); + + if (!target) + return 0; + + ret = i915_request_wait(target, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); + i915_request_put(target); + + return ret < 0 ? ret : 0; +} diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 652b5edefaae..4eb8f6a181c1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -213,12 +213,6 @@ struct drm_i915_file_private { struct { spinlock_t lock; struct list_head request_list; -/* 20ms is a fairly arbitrary limit (greater than the average frame time) - * chosen to prevent the CPU getting more than a frame ahead of the GPU - * (when using lax throttling for the frontbuffer). We also use it to - * offer free GPU waitboosts for severely congested workloads. - */ -#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) } mm; struct idr context_idr; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6af448736030..e5aafbeb1d19 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1012,57 +1012,6 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, return 0; } -/* Throttle our rendering by waiting until the ring has completed our requests - * emitted over 20 msec ago. - * - * Note that if we were to use the current jiffies each time around the loop, - * we wouldn't escape the function with any frames outstanding if the time to - * render a frame was over 20ms. - * - * This should get us reasonable parallelism between CPU and GPU but also - * relatively low latency when blocking on a particular request to finish. - */ -static int -i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_file_private *file_priv = file->driver_priv; - unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; - struct i915_request *request, *target = NULL; - long ret; - - /* ABI: return -EIO if already wedged */ - ret = i915_terminally_wedged(dev_priv); - if (ret) - return ret; - - spin_lock(&file_priv->mm.lock); - list_for_each_entry(request, &file_priv->mm.request_list, client_link) { - if (time_after_eq(request->emitted_jiffies, recent_enough)) - break; - - if (target) { - list_del(&target->client_link); - target->file_priv = NULL; - } - - target = request; - } - if (target) - i915_request_get(target); - spin_unlock(&file_priv->mm.lock); - - if (target == NULL) - return 0; - - ret = i915_request_wait(target, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT); - i915_request_put(target); - - return ret < 0 ? ret : 0; -} - struct i915_vma * i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view, @@ -1142,13 +1091,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, return vma; } -int -i915_gem_throttle_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return i915_gem_ring_throttle(dev, file_priv); -} - int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -- cgit v1.2.3 From 754f7a0b2a13fa601c4ea494f726214c07b81e08 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:55 +0100 Subject: drm/i915: Rename intel_context.active to .inflight Rename the engine this HW context is currently active upon (that we are flying upon) to disambiguate between the mixture of different active terms (and prevent conflict in future patches). Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-14-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_context_types.h | 2 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 963a312430e6..825fcf0ac9c4 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -37,7 +37,7 @@ struct intel_context { struct i915_gem_context *gem_context; struct intel_engine_cs *engine; - struct intel_engine_cs *active; + struct intel_engine_cs *inflight; struct list_head signal_link; struct list_head signals; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 448f3c0d8704..fed704802c57 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -447,7 +447,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) __i915_request_unsubmit(rq); unwind_wa_tail(rq); - GEM_BUG_ON(rq->hw_context->active); + GEM_BUG_ON(rq->hw_context->inflight); /* * Push the request back into the queue for later resubmission. @@ -516,11 +516,11 @@ execlists_user_end(struct intel_engine_execlists *execlists) static inline void execlists_context_schedule_in(struct i915_request *rq) { - GEM_BUG_ON(rq->hw_context->active); + GEM_BUG_ON(rq->hw_context->inflight); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); intel_engine_context_in(rq->engine); - rq->hw_context->active = rq->engine; + rq->hw_context->inflight = rq->engine; } static void kick_siblings(struct i915_request *rq) @@ -535,7 +535,7 @@ static void kick_siblings(struct i915_request *rq) static inline void execlists_context_schedule_out(struct i915_request *rq, unsigned long status) { - rq->hw_context->active = NULL; + rq->hw_context->inflight = NULL; intel_engine_context_out(rq->engine); execlists_context_status_change(rq, status); trace_i915_request_out(rq); @@ -778,7 +778,7 @@ static bool virtual_matches(const struct virtual_engine *ve, const struct i915_request *rq, const struct intel_engine_cs *engine) { - const struct intel_engine_cs *active; + const struct intel_engine_cs *inflight; if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */ return false; @@ -792,8 +792,8 @@ static bool virtual_matches(const struct virtual_engine *ve, * we reuse the register offsets). This is a very small * hystersis on the greedy seelction algorithm. */ - active = READ_ONCE(ve->context.active); - if (active && active != engine) + inflight = READ_ONCE(ve->context.inflight); + if (inflight && inflight != engine) return false; return true; @@ -981,7 +981,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) u32 *regs = ve->context.lrc_reg_state; unsigned int n; - GEM_BUG_ON(READ_ONCE(ve->context.active)); + GEM_BUG_ON(READ_ONCE(ve->context.inflight)); virtual_update_register_offsets(regs, engine); if (!list_empty(&ve->context.signals)) @@ -1459,7 +1459,7 @@ static void execlists_context_unpin(struct intel_context *ce) * had the chance to run yet; let it run before we teardown the * reference it may use. */ - engine = READ_ONCE(ce->active); + engine = READ_ONCE(ce->inflight); if (unlikely(engine)) { unsigned long flags; @@ -1467,7 +1467,7 @@ static void execlists_context_unpin(struct intel_context *ce) process_csb(engine); spin_unlock_irqrestore(&engine->timeline.lock, flags); - GEM_BUG_ON(READ_ONCE(ce->active)); + GEM_BUG_ON(READ_ONCE(ce->inflight)); } i915_gem_context_unpin_hw_id(ce->gem_context); @@ -3062,7 +3062,7 @@ static void virtual_context_destroy(struct kref *kref) unsigned int n; GEM_BUG_ON(ve->request); - GEM_BUG_ON(ve->context.active); + GEM_BUG_ON(ve->context.inflight); for (n = 0; n < ve->num_siblings; n++) { struct intel_engine_cs *sibling = ve->siblings[n]; -- cgit v1.2.3 From c017cf6b1a5c7a218f7171bb8061132d9a23a918 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 10:29:56 +0100 Subject: drm/i915: Drop the deferred active reference An old optimisation to reduce the number of atomics per batch sadly relies on struct_mutex for coordination. In order to remove struct_mutex from serialising object/context closing, always taking and releasing an active reference on first use / last use greatly simplifies the locking. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190528092956.14910-15-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_object.c | 13 +----------- drivers/gpu/drm/i915/gem/i915_gem_object.h | 24 +--------------------- drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 8 -------- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 3 +-- .../drm/i915/gem/selftests/i915_gem_coherency.c | 4 ++-- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 11 +++++----- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 2 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 3 +-- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 9 +------- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 3 --- drivers/gpu/drm/i915/gvt/scheduler.c | 2 +- drivers/gpu/drm/i915/i915_gem_batch_pool.c | 2 +- drivers/gpu/drm/i915/i915_gem_render_state.c | 2 +- drivers/gpu/drm/i915/i915_vma.c | 15 ++++++-------- drivers/gpu/drm/i915/selftests/i915_request.c | 8 -------- drivers/gpu/drm/i915/selftests/igt_spinner.c | 9 +------- 18 files changed, 26 insertions(+), 96 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 5dcdf6540f43..08721ef62e4e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -112,7 +112,7 @@ static void lut_close(struct i915_gem_context *ctx) radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); vma->open_count--; - __i915_gem_object_release_unless_active(vma->obj); + i915_vma_put(vma); } rcu_read_unlock(); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index a6a3452d2b3e..f064876f1214 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -155,7 +155,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) list_del(&lut->ctx_link); i915_lut_handle_free(lut); - __i915_gem_object_release_unless_active(obj); + i915_gem_object_put(obj); } mutex_unlock(&i915->drm.struct_mutex); @@ -347,17 +347,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) call_rcu(&obj->rcu, __i915_gem_free_object_rcu); } -void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) -{ - lockdep_assert_held(&obj->base.dev->struct_mutex); - - if (!i915_gem_object_has_active_reference(obj) && - i915_gem_object_is_active(obj)) - i915_gem_object_set_active_reference(obj); - else - i915_gem_object_put(obj); -} - static inline enum fb_op_origin fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 5233ec3a056d..7cb1871d7128 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -161,31 +161,9 @@ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) static inline bool i915_gem_object_is_active(const struct drm_i915_gem_object *obj) { - return obj->active_count; + return READ_ONCE(obj->active_count); } -static inline bool -i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj) -{ - return test_bit(I915_BO_ACTIVE_REF, &obj->flags); -} - -static inline void -i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj) -{ - lockdep_assert_held(&obj->base.dev->struct_mutex); - __set_bit(I915_BO_ACTIVE_REF, &obj->flags); -} - -static inline void -i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj) -{ - lockdep_assert_held(&obj->base.dev->struct_mutex); - __clear_bit(I915_BO_ACTIVE_REF, &obj->flags); -} - -void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj); - static inline bool i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index df8e29ee3943..67a992d6ee0c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -120,14 +120,6 @@ struct drm_i915_gem_object { struct list_head batch_pool_link; I915_SELFTEST_DECLARE(struct list_head st_link); - unsigned long flags; - - /** - * Have we taken a reference for the object for incomplete GPU - * activity? - */ -#define I915_BO_ACTIVE_REF 0 - /* * Is the object to be mapped as read-only to the GPU * Only honoured if hardware has relevant pte bit diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 465e0e1d4aa3..ec2985c0a92e 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -976,8 +976,6 @@ static int gpu_write(struct i915_vma *vma, if (err) goto err_request; - i915_gem_object_set_active_reference(batch->obj); - i915_vma_lock(vma); err = i915_gem_object_set_to_gtt_domain(vma->obj, false); if (err == 0) @@ -996,6 +994,7 @@ err_request: err_batch: i915_vma_unpin(batch); i915_vma_close(batch); + i915_vma_put(batch); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index b5c5dd034d5c..c72e17da090c 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -365,7 +365,7 @@ static int igt_gem_coherency(void *arg) } } - __i915_gem_object_release_unless_active(obj); + i915_gem_object_put(obj); } } } @@ -377,7 +377,7 @@ unlock: return err; put_object: - __i915_gem_object_release_unless_active(obj); + i915_gem_object_put(obj); goto unlock; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 72eedd6c2a0a..1bc3b8026400 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -318,14 +318,14 @@ static int gpu_fill(struct drm_i915_gem_object *obj, if (err) goto skip_request; - i915_gem_object_set_active_reference(batch->obj); + i915_request_add(rq); + i915_vma_unpin(batch); i915_vma_close(batch); + i915_vma_put(batch); i915_vma_unpin(vma); - i915_request_add(rq); - return 0; skip_request: @@ -802,9 +802,9 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, if (err) goto skip_request; - i915_gem_object_set_active_reference(batch->obj); i915_vma_unpin(batch); i915_vma_close(batch); + i915_vma_put(batch); i915_vma_unpin(vma); @@ -820,6 +820,7 @@ err_request: i915_request_add(rq); err_batch: i915_vma_unpin(batch); + i915_vma_put(batch); err_vma: i915_vma_unpin(vma); @@ -1365,9 +1366,9 @@ static int write_to_scratch(struct i915_gem_context *ctx, if (err) goto skip_request; - i915_gem_object_set_active_reference(obj); i915_vma_unpin(vma); i915_vma_close(vma); + i915_vma_put(vma); i915_request_add(rq); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 297f8864d392..5db3327958fb 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -354,8 +354,8 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) i915_request_add(rq); - __i915_gem_object_release_unless_active(obj); i915_vma_unpin(vma); + i915_gem_object_put(obj); /* leave it only alive via its active ref */ return err; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 672dde71a46c..6b838948ba24 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -527,7 +527,7 @@ static void cleanup_status_page(struct intel_engine_cs *engine) i915_vma_unpin(vma); i915_gem_object_unpin_map(vma->obj); - __i915_gem_object_release_unless_active(vma->obj); + i915_gem_object_put(vma->obj); } static int pin_ggtt_status_page(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index 66d5a52d505c..ff58d658e3e2 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1302,10 +1302,9 @@ intel_engine_create_ring(struct intel_engine_cs *engine, void intel_ring_free(struct kref *ref) { struct intel_ring *ring = container_of(ref, typeof(*ring), ref); - struct drm_i915_gem_object *obj = ring->vma->obj; i915_vma_close(ring->vma); - __i915_gem_object_release_unless_active(obj); + i915_vma_put(ring->vma); i915_timeline_put(ring->timeline); kfree(ring); diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index c3fa10fd9383..3be67e561c26 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -120,15 +120,8 @@ static int move_to_active(struct i915_vma *vma, i915_vma_lock(vma); err = i915_vma_move_to_active(vma, rq, flags); i915_vma_unlock(vma); - if (err) - return err; - - if (!i915_gem_object_has_active_reference(vma->obj)) { - i915_gem_object_get(vma->obj); - i915_gem_object_set_active_reference(vma->obj); - } - return 0; + return err; } static struct i915_request * diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 38a69acf60ae..2cb1519fde42 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -142,9 +142,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) } intel_ring_advance(rq, cs); - i915_gem_object_get(result); - i915_gem_object_set_active_reference(result); - i915_request_add(rq); i915_vma_unpin(vma); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 8a00e2d0c81c..a09ad98a3f43 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -600,7 +600,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) i915_vma_unpin(bb->vma); i915_vma_close(bb->vma); } - __i915_gem_object_release_unless_active(bb->obj); + i915_gem_object_put(bb->obj); } list_del(&bb->list); kfree(bb); diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c index f3890b664e3f..56adfdcaed3e 100644 --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c @@ -55,7 +55,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool) list_for_each_entry_safe(obj, next, &pool->cache_list[n], batch_pool_link) - __i915_gem_object_release_unless_active(obj); + i915_gem_object_put(obj); INIT_LIST_HEAD(&pool->cache_list[n]); } diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 706ed71468e8..4ee032072d4f 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -230,6 +230,6 @@ err_unpin: err_vma: i915_vma_close(so.vma); err_obj: - __i915_gem_object_release_unless_active(so.obj); + i915_gem_object_put(so.obj); return err; } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index db94d7b6c5a6..59a2f6af6103 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -112,10 +112,7 @@ static void __i915_vma_retire(struct i915_active *ref) */ obj_bump_mru(obj); - if (i915_gem_object_has_active_reference(obj)) { - i915_gem_object_clear_active_reference(obj); - i915_gem_object_put(obj); - } + i915_gem_object_put(obj); /* and drop the active reference */ } static struct i915_vma * @@ -443,7 +440,7 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags) if (flags & I915_VMA_RELEASE_MAP) i915_gem_object_unpin_map(obj); - __i915_gem_object_release_unless_active(obj); + i915_gem_object_put(obj); } bool i915_vma_misplaced(const struct i915_vma *vma, @@ -933,12 +930,12 @@ int i915_vma_move_to_active(struct i915_vma *vma, * add the active reference first and queue for it to be dropped * *last*. */ - if (!vma->active.count) - obj->active_count++; + if (!vma->active.count && !obj->active_count++) + i915_gem_object_get(obj); /* once more for the active ref */ if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) { - if (!vma->active.count) - obj->active_count--; + if (!vma->active.count && !--obj->active_count) + i915_gem_object_put(obj); return -ENOMEM; } diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 2c5479ca1f69..dfaa5bc52ecc 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -869,11 +869,6 @@ static int live_all_engines(void *arg) GEM_BUG_ON(err); request[id]->batch = batch; - if (!i915_gem_object_has_active_reference(batch->obj)) { - i915_gem_object_get(batch->obj); - i915_gem_object_set_active_reference(batch->obj); - } - i915_vma_lock(batch); err = i915_vma_move_to_active(batch, request[id], 0); i915_vma_unlock(batch); @@ -996,9 +991,6 @@ static int live_sequential_engines(void *arg) i915_vma_unlock(batch); GEM_BUG_ON(err); - i915_gem_object_set_active_reference(batch->obj); - i915_vma_get(batch); - i915_request_get(request[id]); i915_request_add(request[id]); diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 15c0f0af9658..3ea77c0ca678 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -79,15 +79,8 @@ static int move_to_active(struct i915_vma *vma, i915_vma_lock(vma); err = i915_vma_move_to_active(vma, rq, flags); i915_vma_unlock(vma); - if (err) - return err; - - if (!i915_gem_object_has_active_reference(vma->obj)) { - i915_gem_object_get(vma->obj); - i915_gem_object_set_active_reference(vma->obj); - } - return 0; + return err; } struct i915_request * -- cgit v1.2.3 From 638d87c4a70e497465cc489e03310de39efa0f82 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 13 May 2019 16:39:03 +0300 Subject: drm/i915: Update pipe gamma enable bits when C8 planes are getting enabled/disabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the first C8 plane gets enabled, or the last one gets disabled we may need to enable/disable the pipe gamma for the other active planes. Check for that and run through the normal intel_color_check() path. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190513133904.20374-2-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0e3abc7f65e3..4cbea30439ba 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11507,6 +11507,17 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) return 0; } +static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_atomic_state *state = + to_intel_atomic_state(new_crtc_state->base.state); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + + return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; +} + static int intel_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) { @@ -11530,6 +11541,13 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, return ret; } + /* + * May need to update pipe gamma enable bits + * when C8 planes are getting enabled/disabled. + */ + if (c8_planes_changed(pipe_config)) + crtc_state->color_mgmt_changed = true; + if (mode_changed || pipe_config->update_pipe || crtc_state->color_mgmt_changed) { ret = intel_color_check(pipe_config); -- cgit v1.2.3 From be8a4b2d319994af690e0f636a3a7be9dd43184d Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 13 May 2019 16:39:04 +0300 Subject: drm/i915: Add debugs for the C8 vs. legacy LUT case MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Leave a hint in dmesg when we reject a configuration attempting to use C8 planes without the legacy LUT loaded. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190513133904.20374-3-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_color.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 962db1236970..45649904ba5c 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -959,8 +959,10 @@ static int check_luts(const struct intel_crtc_state *crtc_state) return 0; /* C8 relies on its palette being stored in the legacy LUT */ - if (crtc_state->c8_planes) + if (crtc_state->c8_planes) { + DRM_DEBUG_KMS("C8 pixelformat requires the legacy LUT\n"); return -EINVAL; + } degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size; gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size; -- cgit v1.2.3 From bd41ca49a2735eb2f016a3fa4fe9716b56b55d71 Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 24 May 2019 08:40:18 -0700 Subject: drm/i915: Use local variable for SSEU info in GETPARAM ioctl In the GETPARAM ioctl handler, use a local variable to consolidate usage of SSEU runtime info. v2: add const to sseu_dev_info variable Cc: Daniele Ceraolo Spurio Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Stuart Summers Signed-off-by: Manasi Navare Link: https://patchwork.freedesktop.org/patch/msgid/20190524154022.13575-2-stuart.summers@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 5ca1594f3075..a75917489438 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -332,6 +332,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, { struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; + const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; drm_i915_getparam_t *param = data; int value; @@ -385,12 +386,12 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = i915_cmd_parser_get_version(dev_priv); break; case I915_PARAM_SUBSLICE_TOTAL: - value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu); + value = sseu_subslice_total(sseu); if (!value) return -ENODEV; break; case I915_PARAM_EU_TOTAL: - value = RUNTIME_INFO(dev_priv)->sseu.eu_total; + value = sseu->eu_total; if (!value) return -ENODEV; break; @@ -407,7 +408,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = HAS_POOLED_EU(dev_priv); break; case I915_PARAM_MIN_EU_IN_POOL: - value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool; + value = sseu->min_eu_in_pool; break; case I915_PARAM_HUC_STATUS: value = intel_huc_check_status(&dev_priv->huc); @@ -458,12 +459,12 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = intel_engines_has_context_isolation(dev_priv); break; case I915_PARAM_SLICE_MASK: - value = RUNTIME_INFO(dev_priv)->sseu.slice_mask; + value = sseu->slice_mask; if (!value) return -ENODEV; break; case I915_PARAM_SUBSLICE_MASK: - value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0]; + value = sseu->subslice_mask[0]; if (!value) return -ENODEV; break; -- cgit v1.2.3 From 135a63b68250b8ab2ed854f0c907d5ff2a56f09e Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 24 May 2019 08:40:19 -0700 Subject: drm/i915: Add macro for SSEU stride calculation Subslice stride and EU stride are calculated multiple times in i915_query. Move this calculation to a macro to reduce code duplication. v2: update headers in intel_sseu.h v3: use GEN_SSEU_STRIDE for stride calculations in intel_sseu.h apply s/bits/max_entries/ to GEN_SSEU_STRIDE parameter Cc: Daniele Ceraolo Spurio Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Stuart Summers Signed-off-by: Manasi Navare Link: https://patchwork.freedesktop.org/patch/msgid/20190524154022.13575-3-stuart.summers@intel.com --- drivers/gpu/drm/i915/gt/intel_sseu.h | 2 ++ drivers/gpu/drm/i915/i915_query.c | 17 ++++++++--------- drivers/gpu/drm/i915/intel_device_info.h | 9 +++------ 3 files changed, 13 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index 73bc824094e8..d20b7f96907d 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -8,11 +8,13 @@ #define __INTEL_SSEU_H__ #include +#include struct drm_i915_private; #define GEN_MAX_SLICES (6) /* CNL upper bound */ #define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ +#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE) struct sseu_dev_info { u8 slice_mask; diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 414d0a6d1f70..7b7016171057 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -37,6 +37,8 @@ static int query_topology_info(struct drm_i915_private *dev_priv, const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; struct drm_i915_query_topology_info topo; u32 slice_length, subslice_length, eu_length, total_length; + u8 subslice_stride = GEN_SSEU_STRIDE(sseu->max_subslices); + u8 eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); int ret; if (query_item->flags != 0) @@ -48,12 +50,10 @@ static int query_topology_info(struct drm_i915_private *dev_priv, BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask)); slice_length = sizeof(sseu->slice_mask); - subslice_length = sseu->max_slices * - DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE); - eu_length = sseu->max_slices * sseu->max_subslices * - DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); - - total_length = sizeof(topo) + slice_length + subslice_length + eu_length; + subslice_length = sseu->max_slices * subslice_stride; + eu_length = sseu->max_slices * sseu->max_subslices * eu_stride; + total_length = sizeof(topo) + slice_length + subslice_length + + eu_length; ret = copy_query_item(&topo, sizeof(topo), total_length, query_item); @@ -69,10 +69,9 @@ static int query_topology_info(struct drm_i915_private *dev_priv, topo.max_eus_per_subslice = sseu->max_eus_per_subslice; topo.subslice_offset = slice_length; - topo.subslice_stride = DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE); + topo.subslice_stride = subslice_stride; topo.eu_offset = slice_length + subslice_length; - topo.eu_stride = - DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); + topo.eu_stride = eu_stride; if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr), &topo, sizeof(topo))) diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 5a2e17d6146b..9d43f7edfd63 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -231,8 +231,7 @@ static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) static inline int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice, int subslice) { - int subslice_stride = DIV_ROUND_UP(sseu->max_eus_per_subslice, - BITS_PER_BYTE); + int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); int slice_stride = sseu->max_subslices * subslice_stride; return slice * slice_stride + subslice * subslice_stride; @@ -244,8 +243,7 @@ static inline u16 sseu_get_eus(const struct sseu_dev_info *sseu, int i, offset = sseu_eu_idx(sseu, slice, subslice); u16 eu_mask = 0; - for (i = 0; - i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) { + for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { eu_mask |= ((u16) sseu->eu_mask[offset + i]) << (i * BITS_PER_BYTE); } @@ -258,8 +256,7 @@ static inline void sseu_set_eus(struct sseu_dev_info *sseu, { int i, offset = sseu_eu_idx(sseu, slice, subslice); - for (i = 0; - i < DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); i++) { + for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { sseu->eu_mask[offset + i] = (eu_mask >> (BITS_PER_BYTE * i)) & 0xff; } -- cgit v1.2.3 From b5ab1abe8df2cf38c94ffee46f0c8a377e01edb2 Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 24 May 2019 08:40:20 -0700 Subject: drm/i915: Move calculation of subslices per slice to new function Add a new function to return the number of subslices per slice to consolidate code usage. v2: rebase on changes to move sseu struct to intel_sseu.h v3: add intel_* prefix to sseu_subslices_per_slice Cc: Daniele Ceraolo Spurio Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Stuart Summers Signed-off-by: Manasi Navare Link: https://patchwork.freedesktop.org/patch/msgid/20190524154022.13575-4-stuart.summers@intel.com --- drivers/gpu/drm/i915/gt/intel_sseu.h | 6 ++++++ drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/intel_device_info.c | 4 ++-- 3 files changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index d20b7f96907d..9618dff46d83 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -63,6 +63,12 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu) return value; } +static inline unsigned int +intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice) +{ + return hweight8(sseu->subslice_mask[slice]); +} + u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, const struct intel_sseu *req_sseu); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 7ab8340af991..293c3e3e465c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4203,7 +4203,7 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, sseu_subslice_total(sseu)); for (s = 0; s < fls(sseu->slice_mask); s++) { seq_printf(m, " %s Slice%i subslices: %u\n", type, - s, hweight8(sseu->subslice_mask[s])); + s, intel_sseu_subslices_per_slice(sseu, s)); } seq_printf(m, " %s EU Total: %u\n", type, sseu->eu_total); diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 6af480b95bc6..9d6b9c45bc5e 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -93,7 +93,7 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu)); for (s = 0; s < sseu->max_slices; s++) { drm_printf(p, "slice%d: %u subslices, mask=%04x\n", - s, hweight8(sseu->subslice_mask[s]), + s, intel_sseu_subslices_per_slice(sseu, s), sseu->subslice_mask[s]); } drm_printf(p, "EU total: %u\n", sseu->eu_total); @@ -126,7 +126,7 @@ void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, for (s = 0; s < sseu->max_slices; s++) { drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n", - s, hweight8(sseu->subslice_mask[s]), + s, intel_sseu_subslices_per_slice(sseu, s), sseu->subslice_mask[s]); for (ss = 0; ss < sseu->max_subslices; ss++) { -- cgit v1.2.3 From 0040fd19e7285f096523c14daef131a81468a6ea Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 24 May 2019 08:40:21 -0700 Subject: drm/i915: Refactor sseu helper functions Move functions to intel_sseu.h and remove inline qualifier. Additionally, ensure these are all prefixed with intel_sseu_* to match the convention of other functions in i915. v2: fix spacing from checkpatch warning v3: squash helper function changes into a single patch break 80 character line to fix checkpatch warning move get/set_eus helpers to intel_device_info.c v4: Remove intel_ prefix from static functions in intel_device_info.c and correctly copy changes to stride calculation in those functions. Acked-by: Jani Nikula Cc: Daniele Ceraolo Spurio Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Stuart Summers Signed-off-by: Manasi Navare Link: https://patchwork.freedesktop.org/patch/msgid/20190524154022.13575-5-stuart.summers@intel.com --- drivers/gpu/drm/i915/gt/intel_sseu.c | 17 +++++++++ drivers/gpu/drm/i915/gt/intel_sseu.h | 10 +++--- drivers/gpu/drm/i915/i915_debugfs.c | 4 +-- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/intel_device_info.c | 60 ++++++++++++++++++++++++++------ drivers/gpu/drm/i915/intel_device_info.h | 44 ----------------------- 6 files changed, 74 insertions(+), 63 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index 7f448f3bea0b..a0756f006f5f 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -8,6 +8,23 @@ #include "intel_lrc_reg.h" #include "intel_sseu.h" +unsigned int +intel_sseu_subslice_total(const struct sseu_dev_info *sseu) +{ + unsigned int i, total = 0; + + for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++) + total += hweight8(sseu->subslice_mask[i]); + + return total; +} + +unsigned int +intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice) +{ + return hweight8(sseu->subslice_mask[slice]); +} + u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, const struct intel_sseu *req_sseu) { diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index 9618dff46d83..b50d0401a4e2 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -63,11 +63,11 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu) return value; } -static inline unsigned int -intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice) -{ - return hweight8(sseu->subslice_mask[slice]); -} +unsigned int +intel_sseu_subslice_total(const struct sseu_dev_info *sseu); + +unsigned int +intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice); u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, const struct intel_sseu *req_sseu); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 293c3e3e465c..74afdeff2245 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4176,7 +4176,7 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s]; } sseu->eu_total = sseu->eu_per_subslice * - sseu_subslice_total(sseu); + intel_sseu_subslice_total(sseu); /* subtract fused off EU(s) from enabled slice(s) */ for (s = 0; s < fls(sseu->slice_mask); s++) { @@ -4200,7 +4200,7 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, seq_printf(m, " %s Slice Total: %u\n", type, hweight8(sseu->slice_mask)); seq_printf(m, " %s Subslice Total: %u\n", type, - sseu_subslice_total(sseu)); + intel_sseu_subslice_total(sseu)); for (s = 0; s < fls(sseu->slice_mask); s++) { seq_printf(m, " %s Slice%i subslices: %u\n", type, s, intel_sseu_subslices_per_slice(sseu, s)); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index a75917489438..6a8b31f631ac 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -386,7 +386,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = i915_cmd_parser_get_version(dev_priv); break; case I915_PARAM_SUBSLICE_TOTAL: - value = sseu_subslice_total(sseu); + value = intel_sseu_subslice_total(sseu); if (!value) return -ENODEV; break; diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 9d6b9c45bc5e..97f742530fa1 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -90,7 +90,7 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) drm_printf(p, "slice total: %u, mask=%04x\n", hweight8(sseu->slice_mask), sseu->slice_mask); - drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu)); + drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu)); for (s = 0; s < sseu->max_slices; s++) { drm_printf(p, "slice%d: %u subslices, mask=%04x\n", s, intel_sseu_subslices_per_slice(sseu, s), @@ -114,6 +114,40 @@ void intel_device_info_dump_runtime(const struct intel_runtime_info *info, info->cs_timestamp_frequency_khz); } +static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice, + int subslice) +{ + int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); + int slice_stride = sseu->max_subslices * subslice_stride; + + return slice * slice_stride + subslice * subslice_stride; +} + +static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, + int subslice) +{ + int i, offset = sseu_eu_idx(sseu, slice, subslice); + u16 eu_mask = 0; + + for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { + eu_mask |= ((u16)sseu->eu_mask[offset + i]) << + (i * BITS_PER_BYTE); + } + + return eu_mask; +} + +static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice, + u16 eu_mask) +{ + int i, offset = sseu_eu_idx(sseu, slice, subslice); + + for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { + sseu->eu_mask[offset + i] = + (eu_mask >> (BITS_PER_BYTE * i)) & 0xff; + } +} + void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, struct drm_printer *p) { @@ -260,9 +294,10 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) * EU in any one subslice may be fused off for die * recovery. */ - sseu->eu_per_subslice = sseu_subslice_total(sseu) ? + sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? DIV_ROUND_UP(sseu->eu_total, - sseu_subslice_total(sseu)) : 0; + intel_sseu_subslice_total(sseu)) : + 0; /* No restrictions on Power Gating */ sseu->has_slice_pg = 1; @@ -310,8 +345,9 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) * CHV expected to always have a uniform distribution of EU * across subslices. */ - sseu->eu_per_subslice = sseu_subslice_total(sseu) ? - sseu->eu_total / sseu_subslice_total(sseu) : + sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? + sseu->eu_total / + intel_sseu_subslice_total(sseu) : 0; /* * CHV supports subslice power gating on devices with more than @@ -319,7 +355,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) * more than one EU pair per subslice. */ sseu->has_slice_pg = 0; - sseu->has_subslice_pg = sseu_subslice_total(sseu) > 1; + sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1; sseu->has_eu_pg = (sseu->eu_per_subslice > 2); } @@ -393,9 +429,10 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) * recovery. BXT is expected to be perfectly uniform in EU * distribution. */ - sseu->eu_per_subslice = sseu_subslice_total(sseu) ? + sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? DIV_ROUND_UP(sseu->eu_total, - sseu_subslice_total(sseu)) : 0; + intel_sseu_subslice_total(sseu)) : + 0; /* * SKL+ supports slice power gating on devices with more than * one slice, and supports EU power gating on devices with @@ -407,7 +444,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) sseu->has_slice_pg = !IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1; sseu->has_subslice_pg = - IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1; + IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1; sseu->has_eu_pg = sseu->eu_per_subslice > 2; if (IS_GEN9_LP(dev_priv)) { @@ -496,9 +533,10 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) * subslices with the exception that any one EU in any one subslice may * be fused off for die recovery. */ - sseu->eu_per_subslice = sseu_subslice_total(sseu) ? + sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? DIV_ROUND_UP(sseu->eu_total, - sseu_subslice_total(sseu)) : 0; + intel_sseu_subslice_total(sseu)) : + 0; /* * BDW supports slice power gating on devices with more than diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 9d43f7edfd63..6412a9c72898 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -218,50 +218,6 @@ struct intel_driver_caps { bool has_logical_contexts:1; }; -static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) -{ - unsigned int i, total = 0; - - for (i = 0; i < ARRAY_SIZE(sseu->subslice_mask); i++) - total += hweight8(sseu->subslice_mask[i]); - - return total; -} - -static inline int sseu_eu_idx(const struct sseu_dev_info *sseu, - int slice, int subslice) -{ - int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); - int slice_stride = sseu->max_subslices * subslice_stride; - - return slice * slice_stride + subslice * subslice_stride; -} - -static inline u16 sseu_get_eus(const struct sseu_dev_info *sseu, - int slice, int subslice) -{ - int i, offset = sseu_eu_idx(sseu, slice, subslice); - u16 eu_mask = 0; - - for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { - eu_mask |= ((u16) sseu->eu_mask[offset + i]) << - (i * BITS_PER_BYTE); - } - - return eu_mask; -} - -static inline void sseu_set_eus(struct sseu_dev_info *sseu, - int slice, int subslice, u16 eu_mask) -{ - int i, offset = sseu_eu_idx(sseu, slice, subslice); - - for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { - sseu->eu_mask[offset + i] = - (eu_mask >> (BITS_PER_BYTE * i)) & 0xff; - } -} - const char *intel_platform_name(enum intel_platform platform); void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv); -- cgit v1.2.3 From 1ac159e23c2c033f1fcbf7d60286b90335a4e9b2 Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 24 May 2019 08:40:22 -0700 Subject: drm/i915: Expand subslice mask Currently, the subslice_mask runtime parameter is stored as an array of subslices per slice. Expand the subslice mask array to better match what is presented to userspace through the I915_QUERY_TOPOLOGY_INFO ioctl. The index into this array is then calculated: slice * subslice stride + subslice index / 8 v2: fix spacing in set_sseu_info args use set_sseu_info to initialize sseu data when building device status in debugfs rename variables in intel_engine_types.h to avoid checkpatch warnings v3: update headers in intel_sseu.h v4: add const to some sseu_dev_info variables use sseu->eu_stride for EU stride calculations v5: address review comments from Tvrtko and Daniele v6: remove extra space in intel_sseu_get_subslices return the correct subslice enable in for_each_instdone add GEM_BUG_ON to ensure user doesn't pass invalid ss_mask size use printk formatted string for subslice mask v7: remove string.h header and rebase Cc: Daniele Ceraolo Spurio Cc: Lionel Landwerlin Acked-by: Lionel Landwerlin Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Stuart Summers Signed-off-by: Manasi Navare Link: https://patchwork.freedesktop.org/patch/msgid/20190524154022.13575-6-stuart.summers@intel.com --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 24 +++++- drivers/gpu/drm/i915/gt/intel_engine_types.h | 30 +++---- drivers/gpu/drm/i915/gt/intel_hangcheck.c | 3 +- drivers/gpu/drm/i915/gt/intel_sseu.c | 47 ++++++++++- drivers/gpu/drm/i915/gt/intel_sseu.h | 27 +++++- drivers/gpu/drm/i915/gt/intel_workarounds.c | 2 +- drivers/gpu/drm/i915/i915_debugfs.c | 40 ++++----- drivers/gpu/drm/i915/i915_drv.c | 6 +- drivers/gpu/drm/i915/i915_gpu_error.c | 5 +- drivers/gpu/drm/i915/i915_query.c | 10 +-- drivers/gpu/drm/i915/intel_device_info.c | 122 ++++++++++++--------------- 11 files changed, 200 insertions(+), 116 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 6b838948ba24..158722b50691 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -953,12 +953,30 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) } } +static inline u32 +intel_sseu_fls_subslice(const struct sseu_dev_info *sseu, u32 slice) +{ + u32 subslice; + int i; + + for (i = sseu->ss_stride - 1; i >= 0; i--) { + subslice = fls(sseu->subslice_mask[slice * sseu->ss_stride + + i]); + if (subslice) { + subslice += i * BITS_PER_BYTE; + break; + } + } + + return subslice; +} + u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) { const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 mcr_s_ss_select; u32 slice = fls(sseu->slice_mask); - u32 subslice = fls(sseu->subslice_mask[slice]); + u32 subslice = intel_sseu_fls_subslice(sseu, slice); if (IS_GEN(dev_priv, 10)) mcr_s_ss_select = GEN8_MCR_SLICE(slice) | @@ -1034,6 +1052,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, struct intel_instdone *instdone) { struct drm_i915_private *dev_priv = engine->i915; + const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; struct intel_uncore *uncore = engine->uncore; u32 mmio_base = engine->mmio_base; int slice; @@ -1051,7 +1070,8 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, instdone->slice_common = intel_uncore_read(uncore, GEN7_SC_INSTDONE); - for_each_instdone_slice_subslice(dev_priv, slice, subslice) { + for_each_instdone_slice_subslice(dev_priv, sseu, slice, + subslice) { instdone->sampler[slice][subslice] = read_subslice_reg(dev_priv, slice, subslice, GEN7_SAMPLER_INSTDONE); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 01223864237a..4f311ceeab89 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -552,20 +552,20 @@ intel_engine_is_virtual(const struct intel_engine_cs *engine) return engine->flags & I915_ENGINE_IS_VIRTUAL; } -#define instdone_slice_mask(dev_priv__) \ - (IS_GEN(dev_priv__, 7) ? \ - 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask) - -#define instdone_subslice_mask(dev_priv__) \ - (IS_GEN(dev_priv__, 7) ? \ - 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0]) - -#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ - for ((slice__) = 0, (subslice__) = 0; \ - (slice__) < I915_MAX_SLICES; \ - (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \ - (slice__) += ((subslice__) == 0)) \ - for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \ - (BIT(subslice__) & instdone_subslice_mask(dev_priv__))) +#define instdone_has_slice(dev_priv___, sseu___, slice___) \ + ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & \ + BIT(slice___)) + +#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \ + (IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \ + intel_sseu_has_subslice(sseu__, slice__, subslice__)) + +#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \ + for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \ + (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \ + (slice_) += ((subslice_) == 0)) \ + for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \ + (instdone_has_subslice(dev_priv_, sseu_, slice_, \ + subslice_))) #endif /* __INTEL_ENGINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c index 3a4d09b80fa0..b19cd4cdcb5c 100644 --- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c @@ -51,6 +51,7 @@ static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone) static bool subunits_stuck(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; + const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; struct intel_instdone instdone; struct intel_instdone *accu_instdone = &engine->hangcheck.instdone; bool stuck; @@ -72,7 +73,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine) stuck &= instdone_unchanged(instdone.slice_common, &accu_instdone->slice_common); - for_each_instdone_slice_subslice(dev_priv, slice, subslice) { + for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) { stuck &= instdone_unchanged(instdone.sampler[slice][subslice], &accu_instdone->sampler[slice][subslice]); stuck &= instdone_unchanged(instdone.row[slice][subslice], diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index a0756f006f5f..763b811f2c9d 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -8,6 +8,17 @@ #include "intel_lrc_reg.h" #include "intel_sseu.h" +void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, + u8 max_subslices, u8 max_eus_per_subslice) +{ + sseu->max_slices = max_slices; + sseu->max_subslices = max_subslices; + sseu->max_eus_per_subslice = max_eus_per_subslice; + + sseu->ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices); + sseu->eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); +} + unsigned int intel_sseu_subslice_total(const struct sseu_dev_info *sseu) { @@ -19,10 +30,44 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu) return total; } +void intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice, + u8 *to_mask) +{ + int offset = slice * sseu->ss_stride; + + memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride); +} + +u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice) +{ + int i, offset = slice * sseu->ss_stride; + u32 mask; + + GEM_BUG_ON(slice >= sseu->max_slices); + + GEM_BUG_ON(sseu->ss_stride > sizeof(mask)); + + for (i = 0; i < sseu->ss_stride; i++) + mask |= (u32)sseu->subslice_mask[offset + i] << + i * BITS_PER_BYTE; + + return mask; +} + +void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, + u32 ss_mask) +{ + int i, offset = slice * sseu->ss_stride; + + for (i = 0; i < sseu->ss_stride; i++) + sseu->subslice_mask[offset + i] = + (ss_mask >> (BITS_PER_BYTE * i)) & 0xff; +} + unsigned int intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice) { - return hweight8(sseu->subslice_mask[slice]); + return hweight32(intel_sseu_get_subslices(sseu, slice)); } u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index b50d0401a4e2..eee21d9f320e 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -15,10 +15,11 @@ struct drm_i915_private; #define GEN_MAX_SLICES (6) /* CNL upper bound */ #define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ #define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE) +#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES) struct sseu_dev_info { u8 slice_mask; - u8 subslice_mask[GEN_MAX_SLICES]; + u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE]; u16 eu_total; u8 eu_per_subslice; u8 min_eu_in_pool; @@ -33,6 +34,9 @@ struct sseu_dev_info { u8 max_subslices; u8 max_eus_per_subslice; + u8 ss_stride; + u8 eu_stride; + /* We don't have more than 8 eus per subslice at the moment and as we * store eus enabled using bits, no need to multiply by eus per * subslice. @@ -63,12 +67,33 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu) return value; } +static inline bool +intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice, + int subslice) +{ + u8 mask = sseu->subslice_mask[slice * sseu->ss_stride + + subslice / BITS_PER_BYTE]; + + return mask & BIT(subslice % BITS_PER_BYTE); +} + +void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, + u8 max_subslices, u8 max_eus_per_subslice); + unsigned int intel_sseu_subslice_total(const struct sseu_dev_info *sseu); unsigned int intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice); +void intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice, + u8 *to_mask); + +u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice); + +void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, + u32 ss_mask); + u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, const struct intel_sseu *req_sseu); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 133d069244f4..fbc853085809 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -784,7 +784,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) u32 slice = fls(sseu->slice_mask); u32 fuse3 = intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3); - u8 ss_mask = sseu->subslice_mask[slice]; + u32 ss_mask = intel_sseu_get_subslices(sseu, slice); u8 enabled_mask = (ss_mask | ss_mask >> GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 74afdeff2245..e415d7ef90f2 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1271,6 +1271,7 @@ static void i915_instdone_info(struct drm_i915_private *dev_priv, struct seq_file *m, struct intel_instdone *instdone) { + struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; int slice; int subslice; @@ -1286,11 +1287,11 @@ static void i915_instdone_info(struct drm_i915_private *dev_priv, if (INTEL_GEN(dev_priv) <= 6) return; - for_each_instdone_slice_subslice(dev_priv, slice, subslice) + for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n", slice, subslice, instdone->sampler[slice][subslice]); - for_each_instdone_slice_subslice(dev_priv, slice, subslice) + for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n", slice, subslice, instdone->row[slice][subslice]); } @@ -4084,7 +4085,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, continue; sseu->slice_mask |= BIT(s); - sseu->subslice_mask[s] = info->sseu.subslice_mask[s]; + intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask); for (ss = 0; ss < info->sseu.max_subslices; ss++) { unsigned int eu_cnt; @@ -4135,18 +4136,21 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, sseu->slice_mask |= BIT(s); if (IS_GEN9_BC(dev_priv)) - sseu->subslice_mask[s] = - RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s]; + intel_sseu_copy_subslices(&info->sseu, s, + sseu->subslice_mask); for (ss = 0; ss < info->sseu.max_subslices; ss++) { unsigned int eu_cnt; + u8 ss_idx = s * info->sseu.ss_stride + + ss / BITS_PER_BYTE; if (IS_GEN9_LP(dev_priv)) { if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) /* skip disabled subslice */ continue; - sseu->subslice_mask[s] |= BIT(ss); + sseu->subslice_mask[ss_idx] |= + BIT(ss % BITS_PER_BYTE); } eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & @@ -4163,25 +4167,23 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, struct sseu_dev_info *sseu) { + struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); int s; sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK; if (sseu->slice_mask) { - sseu->eu_per_subslice = - RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice; - for (s = 0; s < fls(sseu->slice_mask); s++) { - sseu->subslice_mask[s] = - RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s]; - } + sseu->eu_per_subslice = info->sseu.eu_per_subslice; + for (s = 0; s < fls(sseu->slice_mask); s++) + intel_sseu_copy_subslices(&info->sseu, s, + sseu->subslice_mask); sseu->eu_total = sseu->eu_per_subslice * intel_sseu_subslice_total(sseu); /* subtract fused off EU(s) from enabled slice(s) */ for (s = 0; s < fls(sseu->slice_mask); s++) { - u8 subslice_7eu = - RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s]; + u8 subslice_7eu = info->sseu.subslice_7eu[s]; sseu->eu_total -= hweight8(subslice_7eu); } @@ -4228,6 +4230,7 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, static int i915_sseu_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); struct sseu_dev_info sseu; intel_wakeref_t wakeref; @@ -4235,14 +4238,13 @@ static int i915_sseu_status(struct seq_file *m, void *unused) return -ENODEV; seq_puts(m, "SSEU Device Info\n"); - i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu); + i915_print_sseu_info(m, true, &info->sseu); seq_puts(m, "SSEU Device Status\n"); memset(&sseu, 0, sizeof(sseu)); - sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices; - sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices; - sseu.max_eus_per_subslice = - RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice; + intel_sseu_set_info(&sseu, info->sseu.max_slices, + info->sseu.max_subslices, + info->sseu.max_eus_per_subslice); with_intel_runtime_pm(dev_priv, wakeref) { if (IS_CHERRYVIEW(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6a8b31f631ac..85e77889e6a6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -334,7 +334,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, struct pci_dev *pdev = dev_priv->drm.pdev; const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; drm_i915_getparam_t *param = data; - int value; + int value = 0; switch (param->param) { case I915_PARAM_IRQ_ACTIVE: @@ -464,7 +464,9 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, return -ENODEV; break; case I915_PARAM_SUBSLICE_MASK: - value = sseu->subslice_mask[0]; + /* Only copy bits from the first slice */ + memcpy(&value, sseu->subslice_mask, + min(sseu->ss_stride, (u8)sizeof(value))); if (!value) return -ENODEV; break; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 707811256501..4b23e9e4bae6 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -411,6 +411,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, static void error_print_instdone(struct drm_i915_error_state_buf *m, const struct drm_i915_error_engine *ee) { + struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu; int slice; int subslice; @@ -426,12 +427,12 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, if (INTEL_GEN(m->i915) <= 6) return; - for_each_instdone_slice_subslice(m->i915, slice, subslice) + for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", slice, subslice, ee->instdone.sampler[slice][subslice]); - for_each_instdone_slice_subslice(m->i915, slice, subslice) + for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", slice, subslice, ee->instdone.row[slice][subslice]); diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 7b7016171057..ac8ac59c4860 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -37,8 +37,6 @@ static int query_topology_info(struct drm_i915_private *dev_priv, const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; struct drm_i915_query_topology_info topo; u32 slice_length, subslice_length, eu_length, total_length; - u8 subslice_stride = GEN_SSEU_STRIDE(sseu->max_subslices); - u8 eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); int ret; if (query_item->flags != 0) @@ -50,8 +48,8 @@ static int query_topology_info(struct drm_i915_private *dev_priv, BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask)); slice_length = sizeof(sseu->slice_mask); - subslice_length = sseu->max_slices * subslice_stride; - eu_length = sseu->max_slices * sseu->max_subslices * eu_stride; + subslice_length = sseu->max_slices * sseu->ss_stride; + eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride; total_length = sizeof(topo) + slice_length + subslice_length + eu_length; @@ -69,9 +67,9 @@ static int query_topology_info(struct drm_i915_private *dev_priv, topo.max_eus_per_subslice = sseu->max_eus_per_subslice; topo.subslice_offset = slice_length; - topo.subslice_stride = subslice_stride; + topo.subslice_stride = sseu->ss_stride; topo.eu_offset = slice_length + subslice_length; - topo.eu_stride = eu_stride; + topo.eu_stride = sseu->eu_stride; if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr), &topo, sizeof(topo))) diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 97f742530fa1..3625f777f3a3 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -92,9 +92,9 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) hweight8(sseu->slice_mask), sseu->slice_mask); drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu)); for (s = 0; s < sseu->max_slices; s++) { - drm_printf(p, "slice%d: %u subslices, mask=%04x\n", + drm_printf(p, "slice%d: %u subslices, mask=%08x\n", s, intel_sseu_subslices_per_slice(sseu, s), - sseu->subslice_mask[s]); + intel_sseu_get_subslices(sseu, s)); } drm_printf(p, "EU total: %u\n", sseu->eu_total); drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice); @@ -117,10 +117,9 @@ void intel_device_info_dump_runtime(const struct intel_runtime_info *info, static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice, int subslice) { - int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); - int slice_stride = sseu->max_subslices * subslice_stride; + int slice_stride = sseu->max_subslices * sseu->eu_stride; - return slice * slice_stride + subslice * subslice_stride; + return slice * slice_stride + subslice * sseu->eu_stride; } static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, @@ -129,7 +128,7 @@ static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, int i, offset = sseu_eu_idx(sseu, slice, subslice); u16 eu_mask = 0; - for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { + for (i = 0; i < sseu->eu_stride; i++) { eu_mask |= ((u16)sseu->eu_mask[offset + i]) << (i * BITS_PER_BYTE); } @@ -142,7 +141,7 @@ static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice, { int i, offset = sseu_eu_idx(sseu, slice, subslice); - for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { + for (i = 0; i < sseu->eu_stride; i++) { sseu->eu_mask[offset + i] = (eu_mask >> (BITS_PER_BYTE * i)) & 0xff; } @@ -159,9 +158,9 @@ void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, } for (s = 0; s < sseu->max_slices; s++) { - drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n", + drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n", s, intel_sseu_subslices_per_slice(sseu, s), - sseu->subslice_mask[s]); + intel_sseu_get_subslices(sseu, s)); for (ss = 0; ss < sseu->max_subslices; ss++) { u16 enabled_eus = sseu_get_eus(sseu, s, ss); @@ -190,15 +189,10 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) u8 eu_en; int s; - if (IS_ELKHARTLAKE(dev_priv)) { - sseu->max_slices = 1; - sseu->max_subslices = 4; - sseu->max_eus_per_subslice = 8; - } else { - sseu->max_slices = 1; - sseu->max_subslices = 8; - sseu->max_eus_per_subslice = 8; - } + if (IS_ELKHARTLAKE(dev_priv)) + intel_sseu_set_info(sseu, 1, 4, 8); + else + intel_sseu_set_info(sseu, 1, 8, 8); s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE); @@ -207,15 +201,15 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) for (s = 0; s < sseu->max_slices; s++) { if (s_en & BIT(s)) { - int ss_idx = sseu->max_subslices * s; int ss; sseu->slice_mask |= BIT(s); - sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask; - for (ss = 0; ss < sseu->max_subslices; ss++) { - if (sseu->subslice_mask[s] & BIT(ss)) + + intel_sseu_set_subslices(sseu, s, ss_en_mask); + + for (ss = 0; ss < sseu->max_subslices; ss++) + if (intel_sseu_has_subslice(sseu, s, ss)) sseu_set_eus(sseu, s, ss, eu_en); - } } } sseu->eu_per_subslice = hweight8(eu_en); @@ -235,23 +229,10 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) const int eu_mask = 0xff; u32 subslice_mask, eu_en; + intel_sseu_set_info(sseu, 6, 4, 8); + sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >> GEN10_F2_S_ENA_SHIFT; - sseu->max_slices = 6; - sseu->max_subslices = 4; - sseu->max_eus_per_subslice = 8; - - subslice_mask = (1 << 4) - 1; - subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> - GEN10_F2_SS_DIS_SHIFT); - - /* - * Slice0 can have up to 3 subslices, but there are only 2 in - * slice1/2. - */ - sseu->subslice_mask[0] = subslice_mask; - for (s = 1; s < sseu->max_slices; s++) - sseu->subslice_mask[s] = subslice_mask & 0x3; /* Slice0 */ eu_en = ~I915_READ(GEN8_EU_DISABLE0); @@ -276,14 +257,22 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) eu_en = ~I915_READ(GEN10_EU_DISABLE3); sseu_set_eus(sseu, 5, 1, eu_en & eu_mask); - /* Do a second pass where we mark the subslices disabled if all their - * eus are off. - */ + subslice_mask = (1 << 4) - 1; + subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> + GEN10_F2_SS_DIS_SHIFT); + for (s = 0; s < sseu->max_slices; s++) { for (ss = 0; ss < sseu->max_subslices; ss++) { if (sseu_get_eus(sseu, s, ss) == 0) - sseu->subslice_mask[s] &= ~BIT(ss); + subslice_mask &= ~BIT(ss); } + + /* + * Slice0 can have up to 3 subslices, but there are only 2 in + * slice1/2. + */ + intel_sseu_set_subslices(sseu, s, s == 0 ? subslice_mask : + subslice_mask & 0x3); } sseu->eu_total = compute_eu_total(sseu); @@ -309,13 +298,12 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 fuse; + u8 subslice_mask; fuse = I915_READ(CHV_FUSE_GT); sseu->slice_mask = BIT(0); - sseu->max_slices = 1; - sseu->max_subslices = 2; - sseu->max_eus_per_subslice = 8; + intel_sseu_set_info(sseu, 1, 2, 8); if (!(fuse & CHV_FGT_DISABLE_SS0)) { u8 disabled_mask = @@ -324,7 +312,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >> CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4); - sseu->subslice_mask[0] |= BIT(0); + subslice_mask |= BIT(0); sseu_set_eus(sseu, 0, 0, ~disabled_mask); } @@ -335,10 +323,12 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >> CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4); - sseu->subslice_mask[0] |= BIT(1); + subslice_mask |= BIT(1); sseu_set_eus(sseu, 0, 1, ~disabled_mask); } + intel_sseu_set_subslices(sseu, 0, subslice_mask); + sseu->eu_total = compute_eu_total(sseu); /* @@ -371,9 +361,8 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; /* BXT has a single slice and at most 3 subslices. */ - sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3; - sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4; - sseu->max_eus_per_subslice = 8; + intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3, + IS_GEN9_LP(dev_priv) ? 3 : 4, 8); /* * The subslice disable field is global, i.e. it applies @@ -392,14 +381,14 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) /* skip disabled slice */ continue; - sseu->subslice_mask[s] = subslice_mask; + intel_sseu_set_subslices(sseu, s, subslice_mask); eu_disable = I915_READ(GEN9_EU_DISABLE(s)); for (ss = 0; ss < sseu->max_subslices; ss++) { int eu_per_ss; u8 eu_disabled_mask; - if (!(sseu->subslice_mask[s] & BIT(ss))) + if (!intel_sseu_has_subslice(sseu, s, ss)) /* skip disabled subslice */ continue; @@ -472,9 +461,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) fuse2 = I915_READ(GEN8_FUSE2); sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; - sseu->max_slices = 3; - sseu->max_subslices = 3; - sseu->max_eus_per_subslice = 8; + intel_sseu_set_info(sseu, 3, 3, 8); /* * The subslice disable field is global, i.e. it applies @@ -501,18 +488,19 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) /* skip disabled slice */ continue; - sseu->subslice_mask[s] = subslice_mask; + intel_sseu_set_subslices(sseu, s, subslice_mask); for (ss = 0; ss < sseu->max_subslices; ss++) { u8 eu_disabled_mask; u32 n_disabled; - if (!(sseu->subslice_mask[s] & BIT(ss))) + if (!intel_sseu_has_subslice(sseu, s, ss)) /* skip disabled subslice */ continue; eu_disabled_mask = - eu_disable[s] >> (ss * sseu->max_eus_per_subslice); + eu_disable[s] >> + (ss * sseu->max_eus_per_subslice); sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); @@ -552,6 +540,7 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 fuse1; int s, ss; + u32 subslice_mask; /* * There isn't a register to tell us how many slices/subslices. We @@ -563,22 +552,18 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) /* fall through */ case 1: sseu->slice_mask = BIT(0); - sseu->subslice_mask[0] = BIT(0); + subslice_mask = BIT(0); break; case 2: sseu->slice_mask = BIT(0); - sseu->subslice_mask[0] = BIT(0) | BIT(1); + subslice_mask = BIT(0) | BIT(1); break; case 3: sseu->slice_mask = BIT(0) | BIT(1); - sseu->subslice_mask[0] = BIT(0) | BIT(1); - sseu->subslice_mask[1] = BIT(0) | BIT(1); + subslice_mask = BIT(0) | BIT(1); break; } - sseu->max_slices = hweight8(sseu->slice_mask); - sseu->max_subslices = hweight8(sseu->subslice_mask[0]); - fuse1 = I915_READ(HSW_PAVP_FUSE1); switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) { default: @@ -595,9 +580,14 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) sseu->eu_per_subslice = 6; break; } - sseu->max_eus_per_subslice = sseu->eu_per_subslice; + + intel_sseu_set_info(sseu, hweight8(sseu->slice_mask), + hweight8(subslice_mask), + sseu->eu_per_subslice); for (s = 0; s < sseu->max_slices; s++) { + intel_sseu_set_subslices(sseu, s, subslice_mask); + for (ss = 0; ss < sseu->max_subslices; ss++) { sseu_set_eus(sseu, s, ss, (1UL << sseu->eu_per_subslice) - 1); -- cgit v1.2.3 From 2e2f08d02dd282751e8d329f373dd85d49ae7f90 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 08:53:54 +0100 Subject: drm/i915: Take a runtime pm wakeref for atomic commits MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before we start prepping the system for an atomic modeset, wake the device up. We then keep track of this wakeref until we complete the atomic commit, so we hold keep the device awake for all potential HW access, and do not allow the device to sleep with a pending modeset. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110771 Signed-off-by: Chris Wilson Cc: Ville Syrjälä Cc: Maarten Lankhorst Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190528075354.22341-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_display.c | 5 +++++ drivers/gpu/drm/i915/intel_drv.h | 2 ++ 2 files changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4cbea30439ba..c3e2b1178d55 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13834,6 +13834,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); } + intel_runtime_pm_put(dev_priv, intel_state->wakeref); /* * Defer the cleanup of the old state to a separate worker to not @@ -13912,6 +13913,8 @@ static int intel_atomic_commit(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; + intel_state->wakeref = intel_runtime_pm_get(dev_priv); + drm_atomic_state_get(state); i915_sw_fence_init(&intel_state->commit_ready, intel_atomic_commit_ready); @@ -13948,6 +13951,7 @@ static int intel_atomic_commit(struct drm_device *dev, if (ret) { DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); i915_sw_fence_commit(&intel_state->commit_ready); + intel_runtime_pm_put(dev_priv, intel_state->wakeref); return ret; } @@ -13959,6 +13963,7 @@ static int intel_atomic_commit(struct drm_device *dev, i915_sw_fence_commit(&intel_state->commit_ready); drm_atomic_helper_cleanup_planes(dev, state); + intel_runtime_pm_put(dev_priv, intel_state->wakeref); return ret; } dev_priv->wm.distrust_bios_wm = false; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 952eabb1919a..0dcc03592d6e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -420,6 +420,8 @@ struct dpll { struct intel_atomic_state { struct drm_atomic_state base; + intel_wakeref_t wakeref; + struct { /* * Logical state of cdclk (used for all scaling, watermark, -- cgit v1.2.3 From 81a04d2e9091832710549176db0e46d1a666f9ae Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 29 May 2019 14:03:55 +0300 Subject: drm/i915: selftest_lrc: Check the correct variable We should check "request[n]" instead of just "request". Fixes: 78e41ddd2198 ("drm/i915: Apply an execution_mask to the virtual_engine") Signed-off-by: Dan Carpenter Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190529110355.GA19119@mwanda --- drivers/gpu/drm/i915/gt/selftest_lrc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index d162b0d3ec8e..06593254b7d6 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -1531,8 +1531,8 @@ static int mask_virtual_engine(struct drm_i915_private *i915, for (n = 0; n < nsibling; n++) { request[n] = i915_request_create(ve); - if (IS_ERR(request)) { - err = PTR_ERR(request); + if (IS_ERR(request[n])) { + err = PTR_ERR(request[n]); nsibling = n; goto out; } -- cgit v1.2.3 From 0c1f845772e5d6d1b1739cb938403527bc6b29e1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 28 May 2019 16:40:53 +0100 Subject: drm/i915: Avoid refcount_inc on known zero count In intel_wakeref_auto, we use refcount_inc_not_zero to detect the first use and initialise the timer. On doing so, we have to avoid using refcount_inc on that zero count as the debug code flags that as an error: refcount_t: increment on 0; use-after-free. Rearrange the code so that if we know the count is 0 and we are initialising, we explicitly set it to 1. Fixes: b27e35ae5b18 ("drm/i915: Keep user GGTT alive for a minimum of 250ms") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190528154053.22004-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_wakeref.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index c2dda5a375f0..c25ba1b5e8ba 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -114,11 +114,11 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) if (!refcount_inc_not_zero(&wf->count)) { spin_lock_irqsave(&wf->lock, flags); - if (!refcount_read(&wf->count)) { + if (!refcount_inc_not_zero(&wf->count)) { GEM_BUG_ON(wf->wakeref); wf->wakeref = intel_runtime_pm_get_if_in_use(wf->i915); + refcount_set(&wf->count, 1); } - refcount_inc(&wf->count); spin_unlock_irqrestore(&wf->lock, flags); } -- cgit v1.2.3 From a10f361d176ce53c72d5b1b2e2913a1a222ee393 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 29 May 2019 11:21:50 +0300 Subject: Revert "drm/i915: Expand subslice mask" This reverts commit 1ac159e23c2c ("drm/i915: Expand subslice mask"), which kills ICL due to GEM_BUG_ON() sanity checks before CI even gets a chance to do anything. The commit exposes an issue in commit 1e40d4aea57b ("drm/i915/cnl: Implement WaProgramMgsrForCorrectSliceSpecificMmioReads"), which will also need to be addressed. There's a proposed fix [1], but considering the seeming uncertainty with the fix as well as the size of the regressing commit (in this context, the one that actually brings down ICL), this warrants a revert to get ICL working, and gives us time to get all of this right without rushing. Even if this means shooting the messenger. <3>[ 9.426327] intel_sseu_get_subslices:46 GEM_BUG_ON(slice >= sseu->max_slices) <4>[ 9.426355] ------------[ cut here ]------------ <2>[ 9.426357] kernel BUG at drivers/gpu/drm/i915/gt/intel_sseu.c:46! <4>[ 9.426371] invalid opcode: 0000 [#1] PREEMPT SMP NOPTI <4>[ 9.426377] CPU: 1 PID: 364 Comm: systemd-udevd Not tainted 5.2.0-rc2-CI-CI_DRM_6159+ #1 <4>[ 9.426385] Hardware name: Intel Corporation Ice Lake Client Platform/IceLake U DDR4 SODIMM PD RVP TLC, BIOS ICLSFWR1.R00.3183.A00.1905020411 05/02/2019 <4>[ 9.426444] RIP: 0010:intel_sseu_get_subslices+0x8a/0xe0 [i915] <4>[ 9.426452] Code: d5 76 b7 e0 48 8b 35 9d 24 21 00 49 c7 c0 07 f0 72 a0 b9 2e 00 00 00 48 c7 c2 00 8e 6d a0 48 c7 c7 a5 14 5b a0 e8 36 3c be e0 <0f> 0b 48 c7 c1 80 d5 6f a0 ba 30 00 00 00 48 c7 c6 00 8e 6d a0 48 <4>[ 9.426468] RSP: 0018:ffffc9000037b9c8 EFLAGS: 00010282 <4>[ 9.426475] RAX: 000000000000000f RBX: 0000000000000000 RCX: 0000000000000000 <4>[ 9.426482] RDX: 0000000000000001 RSI: 0000000000000008 RDI: ffff88849e346f98 <4>[ 9.426490] RBP: ffff88848a200000 R08: 0000000000000004 R09: ffff88849d50b000 <4>[ 9.426497] R10: 0000000000000000 R11: ffff88849e346f98 R12: ffff88848a209e78 <4>[ 9.426505] R13: 0000000003000000 R14: ffff88848a20b1a8 R15: 0000000000000000 <4>[ 9.426513] FS: 00007f73d5ae8680(0000) GS:ffff88849fc80000(0000) knlGS:0000000000000000 <4>[ 9.426521] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4>[ 9.426527] CR2: 0000561417b01260 CR3: 0000000494764003 CR4: 0000000000760ee0 <4>[ 9.426535] PKRU: 55555554 <4>[ 9.426538] Call Trace: <4>[ 9.426585] wa_init_mcr+0xd5/0x110 [i915] <4>[ 9.426597] ? lock_acquire+0xa6/0x1c0 <4>[ 9.426645] icl_gt_workarounds_init+0x21/0x1a0 [i915] <4>[ 9.426694] ? i915_driver_load+0xfcf/0x18a0 [i915] <4>[ 9.426739] gt_init_workarounds+0x14c/0x230 [i915] <4>[ 9.426748] ? _raw_spin_unlock_irq+0x24/0x50 <4>[ 9.426789] intel_gt_init_workarounds+0x1b/0x30 [i915] <4>[ 9.426835] i915_driver_load+0xfd7/0x18a0 [i915] <4>[ 9.426843] ? lock_acquire+0xa6/0x1c0 <4>[ 9.426850] ? __pm_runtime_resume+0x4f/0x80 <4>[ 9.426857] ? _raw_spin_unlock_irqrestore+0x4c/0x60 <4>[ 9.426863] ? _raw_spin_unlock_irqrestore+0x4c/0x60 <4>[ 9.426870] ? lockdep_hardirqs_on+0xe3/0x1b0 <4>[ 9.426915] i915_pci_probe+0x29/0xa0 [i915] <4>[ 9.426923] pci_device_probe+0x9e/0x120 <4>[ 9.426930] really_probe+0xea/0x3c0 <4>[ 9.426936] driver_probe_device+0x10b/0x120 <4>[ 9.426942] device_driver_attach+0x4a/0x50 <4>[ 9.426948] __driver_attach+0x97/0x130 <4>[ 9.426954] ? device_driver_attach+0x50/0x50 <4>[ 9.426960] bus_for_each_dev+0x74/0xc0 <4>[ 9.426966] bus_add_driver+0x13f/0x210 <4>[ 9.426971] ? 0xffffffffa083b000 <4>[ 9.426976] driver_register+0x56/0xe0 <4>[ 9.426982] ? 0xffffffffa083b000 <4>[ 9.426987] do_one_initcall+0x58/0x300 <4>[ 9.426994] ? do_init_module+0x1d/0x1f6 <4>[ 9.427001] ? rcu_read_lock_sched_held+0x6f/0x80 <4>[ 9.427007] ? kmem_cache_alloc_trace+0x261/0x290 <4>[ 9.427014] do_init_module+0x56/0x1f6 <4>[ 9.427020] load_module+0x24d1/0x2990 <4>[ 9.427032] ? __se_sys_finit_module+0xd3/0xf0 <4>[ 9.427037] __se_sys_finit_module+0xd3/0xf0 <4>[ 9.427047] do_syscall_64+0x55/0x1c0 <4>[ 9.427053] entry_SYSCALL_64_after_hwframe+0x49/0xbe <4>[ 9.427059] RIP: 0033:0x7f73d5609839 <4>[ 9.427064] Code: 00 f3 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 1f f6 2c 00 f7 d8 64 89 01 48 <4>[ 9.427082] RSP: 002b:00007ffdf34477b8 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 <4>[ 9.427091] RAX: ffffffffffffffda RBX: 00005559fd5d7b40 RCX: 00007f73d5609839 <4>[ 9.427099] RDX: 0000000000000000 RSI: 00007f73d52e8145 RDI: 000000000000000f <4>[ 9.427106] RBP: 00007f73d52e8145 R08: 0000000000000000 R09: 00007ffdf34478d0 <4>[ 9.427114] R10: 000000000000000f R11: 0000000000000246 R12: 0000000000000000 <4>[ 9.427121] R13: 00005559fd5c90f0 R14: 0000000000020000 R15: 00005559fd5d7b40 <4>[ 9.427131] Modules linked in: i915(+) mei_hdcp x86_pkg_temp_thermal coretemp snd_hda_intel crct10dif_pclmul crc32_pclmul snd_hda_codec snd_hwdep e1000e snd_hda_core ghash_clmulni_intel ptp snd_pcm cdc_ether usbnet mii pps_core mei_me mei prime_numbers btusb btrtl btbcm btintel bluetooth ecdh_generic ecc <4>[ 9.427254] ---[ end trace af3eeb543bd66e66 ]--- [1] http://patchwork.freedesktop.org/patch/msgid/20190528200655.11605-1-chris@chris-wilson.co.uk References: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6159/fi-icl-u2/pstore0-1517155098_Oops_1.log References: 1e40d4aea57b ("drm/i915/cnl: Implement WaProgramMgsrForCorrectSliceSpecificMmioReads") Fixes: 1ac159e23c2c ("drm/i915: Expand subslice mask") Cc: Chris Wilson Cc: Daniele Ceraolo Spurio Cc: Joonas Lahtinen Cc: Lionel Landwerlin Cc: Manasi Navare Cc: Michel Thierry Cc: Mika Kuoppala Cc: Oscar Mateo Cc: Stuart Summers Cc: Tvrtko Ursulin Cc: Yunwei Zhang Acked-by: Daniel Vetter Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190529082150.31526-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 24 +----- drivers/gpu/drm/i915/gt/intel_engine_types.h | 30 +++---- drivers/gpu/drm/i915/gt/intel_hangcheck.c | 3 +- drivers/gpu/drm/i915/gt/intel_sseu.c | 47 +---------- drivers/gpu/drm/i915/gt/intel_sseu.h | 27 +----- drivers/gpu/drm/i915/gt/intel_workarounds.c | 2 +- drivers/gpu/drm/i915/i915_debugfs.c | 40 +++++---- drivers/gpu/drm/i915/i915_drv.c | 6 +- drivers/gpu/drm/i915/i915_gpu_error.c | 5 +- drivers/gpu/drm/i915/i915_query.c | 10 ++- drivers/gpu/drm/i915/intel_device_info.c | 122 +++++++++++++++------------ 11 files changed, 116 insertions(+), 200 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 158722b50691..6b838948ba24 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -953,30 +953,12 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) } } -static inline u32 -intel_sseu_fls_subslice(const struct sseu_dev_info *sseu, u32 slice) -{ - u32 subslice; - int i; - - for (i = sseu->ss_stride - 1; i >= 0; i--) { - subslice = fls(sseu->subslice_mask[slice * sseu->ss_stride + - i]); - if (subslice) { - subslice += i * BITS_PER_BYTE; - break; - } - } - - return subslice; -} - u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) { const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 mcr_s_ss_select; u32 slice = fls(sseu->slice_mask); - u32 subslice = intel_sseu_fls_subslice(sseu, slice); + u32 subslice = fls(sseu->subslice_mask[slice]); if (IS_GEN(dev_priv, 10)) mcr_s_ss_select = GEN8_MCR_SLICE(slice) | @@ -1052,7 +1034,6 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, struct intel_instdone *instdone) { struct drm_i915_private *dev_priv = engine->i915; - const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; struct intel_uncore *uncore = engine->uncore; u32 mmio_base = engine->mmio_base; int slice; @@ -1070,8 +1051,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, instdone->slice_common = intel_uncore_read(uncore, GEN7_SC_INSTDONE); - for_each_instdone_slice_subslice(dev_priv, sseu, slice, - subslice) { + for_each_instdone_slice_subslice(dev_priv, slice, subslice) { instdone->sampler[slice][subslice] = read_subslice_reg(dev_priv, slice, subslice, GEN7_SAMPLER_INSTDONE); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 4f311ceeab89..01223864237a 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -552,20 +552,20 @@ intel_engine_is_virtual(const struct intel_engine_cs *engine) return engine->flags & I915_ENGINE_IS_VIRTUAL; } -#define instdone_has_slice(dev_priv___, sseu___, slice___) \ - ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & \ - BIT(slice___)) - -#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \ - (IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \ - intel_sseu_has_subslice(sseu__, slice__, subslice__)) - -#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \ - for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \ - (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \ - (slice_) += ((subslice_) == 0)) \ - for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \ - (instdone_has_subslice(dev_priv_, sseu_, slice_, \ - subslice_))) +#define instdone_slice_mask(dev_priv__) \ + (IS_GEN(dev_priv__, 7) ? \ + 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask) + +#define instdone_subslice_mask(dev_priv__) \ + (IS_GEN(dev_priv__, 7) ? \ + 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0]) + +#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ + for ((slice__) = 0, (subslice__) = 0; \ + (slice__) < I915_MAX_SLICES; \ + (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \ + (slice__) += ((subslice__) == 0)) \ + for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \ + (BIT(subslice__) & instdone_subslice_mask(dev_priv__))) #endif /* __INTEL_ENGINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c index b19cd4cdcb5c..3a4d09b80fa0 100644 --- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c @@ -51,7 +51,6 @@ static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone) static bool subunits_stuck(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; struct intel_instdone instdone; struct intel_instdone *accu_instdone = &engine->hangcheck.instdone; bool stuck; @@ -73,7 +72,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine) stuck &= instdone_unchanged(instdone.slice_common, &accu_instdone->slice_common); - for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) { + for_each_instdone_slice_subslice(dev_priv, slice, subslice) { stuck &= instdone_unchanged(instdone.sampler[slice][subslice], &accu_instdone->sampler[slice][subslice]); stuck &= instdone_unchanged(instdone.row[slice][subslice], diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index 763b811f2c9d..a0756f006f5f 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -8,17 +8,6 @@ #include "intel_lrc_reg.h" #include "intel_sseu.h" -void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, - u8 max_subslices, u8 max_eus_per_subslice) -{ - sseu->max_slices = max_slices; - sseu->max_subslices = max_subslices; - sseu->max_eus_per_subslice = max_eus_per_subslice; - - sseu->ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices); - sseu->eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); -} - unsigned int intel_sseu_subslice_total(const struct sseu_dev_info *sseu) { @@ -30,44 +19,10 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu) return total; } -void intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice, - u8 *to_mask) -{ - int offset = slice * sseu->ss_stride; - - memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride); -} - -u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice) -{ - int i, offset = slice * sseu->ss_stride; - u32 mask; - - GEM_BUG_ON(slice >= sseu->max_slices); - - GEM_BUG_ON(sseu->ss_stride > sizeof(mask)); - - for (i = 0; i < sseu->ss_stride; i++) - mask |= (u32)sseu->subslice_mask[offset + i] << - i * BITS_PER_BYTE; - - return mask; -} - -void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, - u32 ss_mask) -{ - int i, offset = slice * sseu->ss_stride; - - for (i = 0; i < sseu->ss_stride; i++) - sseu->subslice_mask[offset + i] = - (ss_mask >> (BITS_PER_BYTE * i)) & 0xff; -} - unsigned int intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice) { - return hweight32(intel_sseu_get_subslices(sseu, slice)); + return hweight8(sseu->subslice_mask[slice]); } u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index eee21d9f320e..b50d0401a4e2 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -15,11 +15,10 @@ struct drm_i915_private; #define GEN_MAX_SLICES (6) /* CNL upper bound */ #define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ #define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE) -#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES) struct sseu_dev_info { u8 slice_mask; - u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE]; + u8 subslice_mask[GEN_MAX_SLICES]; u16 eu_total; u8 eu_per_subslice; u8 min_eu_in_pool; @@ -34,9 +33,6 @@ struct sseu_dev_info { u8 max_subslices; u8 max_eus_per_subslice; - u8 ss_stride; - u8 eu_stride; - /* We don't have more than 8 eus per subslice at the moment and as we * store eus enabled using bits, no need to multiply by eus per * subslice. @@ -67,33 +63,12 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu) return value; } -static inline bool -intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice, - int subslice) -{ - u8 mask = sseu->subslice_mask[slice * sseu->ss_stride + - subslice / BITS_PER_BYTE]; - - return mask & BIT(subslice % BITS_PER_BYTE); -} - -void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, - u8 max_subslices, u8 max_eus_per_subslice); - unsigned int intel_sseu_subslice_total(const struct sseu_dev_info *sseu); unsigned int intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice); -void intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice, - u8 *to_mask); - -u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice); - -void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, - u32 ss_mask); - u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, const struct intel_sseu *req_sseu); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index fbc853085809..133d069244f4 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -784,7 +784,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) u32 slice = fls(sseu->slice_mask); u32 fuse3 = intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3); - u32 ss_mask = intel_sseu_get_subslices(sseu, slice); + u8 ss_mask = sseu->subslice_mask[slice]; u8 enabled_mask = (ss_mask | ss_mask >> GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e415d7ef90f2..74afdeff2245 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1271,7 +1271,6 @@ static void i915_instdone_info(struct drm_i915_private *dev_priv, struct seq_file *m, struct intel_instdone *instdone) { - struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; int slice; int subslice; @@ -1287,11 +1286,11 @@ static void i915_instdone_info(struct drm_i915_private *dev_priv, if (INTEL_GEN(dev_priv) <= 6) return; - for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) + for_each_instdone_slice_subslice(dev_priv, slice, subslice) seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n", slice, subslice, instdone->sampler[slice][subslice]); - for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) + for_each_instdone_slice_subslice(dev_priv, slice, subslice) seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n", slice, subslice, instdone->row[slice][subslice]); } @@ -4085,7 +4084,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, continue; sseu->slice_mask |= BIT(s); - intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask); + sseu->subslice_mask[s] = info->sseu.subslice_mask[s]; for (ss = 0; ss < info->sseu.max_subslices; ss++) { unsigned int eu_cnt; @@ -4136,21 +4135,18 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, sseu->slice_mask |= BIT(s); if (IS_GEN9_BC(dev_priv)) - intel_sseu_copy_subslices(&info->sseu, s, - sseu->subslice_mask); + sseu->subslice_mask[s] = + RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s]; for (ss = 0; ss < info->sseu.max_subslices; ss++) { unsigned int eu_cnt; - u8 ss_idx = s * info->sseu.ss_stride + - ss / BITS_PER_BYTE; if (IS_GEN9_LP(dev_priv)) { if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) /* skip disabled subslice */ continue; - sseu->subslice_mask[ss_idx] |= - BIT(ss % BITS_PER_BYTE); + sseu->subslice_mask[s] |= BIT(ss); } eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & @@ -4167,23 +4163,25 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, struct sseu_dev_info *sseu) { - struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); int s; sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK; if (sseu->slice_mask) { - sseu->eu_per_subslice = info->sseu.eu_per_subslice; - for (s = 0; s < fls(sseu->slice_mask); s++) - intel_sseu_copy_subslices(&info->sseu, s, - sseu->subslice_mask); + sseu->eu_per_subslice = + RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice; + for (s = 0; s < fls(sseu->slice_mask); s++) { + sseu->subslice_mask[s] = + RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s]; + } sseu->eu_total = sseu->eu_per_subslice * intel_sseu_subslice_total(sseu); /* subtract fused off EU(s) from enabled slice(s) */ for (s = 0; s < fls(sseu->slice_mask); s++) { - u8 subslice_7eu = info->sseu.subslice_7eu[s]; + u8 subslice_7eu = + RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s]; sseu->eu_total -= hweight8(subslice_7eu); } @@ -4230,7 +4228,6 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, static int i915_sseu_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); struct sseu_dev_info sseu; intel_wakeref_t wakeref; @@ -4238,13 +4235,14 @@ static int i915_sseu_status(struct seq_file *m, void *unused) return -ENODEV; seq_puts(m, "SSEU Device Info\n"); - i915_print_sseu_info(m, true, &info->sseu); + i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu); seq_puts(m, "SSEU Device Status\n"); memset(&sseu, 0, sizeof(sseu)); - intel_sseu_set_info(&sseu, info->sseu.max_slices, - info->sseu.max_subslices, - info->sseu.max_eus_per_subslice); + sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices; + sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices; + sseu.max_eus_per_subslice = + RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice; with_intel_runtime_pm(dev_priv, wakeref) { if (IS_CHERRYVIEW(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 85e77889e6a6..6a8b31f631ac 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -334,7 +334,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, struct pci_dev *pdev = dev_priv->drm.pdev; const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; drm_i915_getparam_t *param = data; - int value = 0; + int value; switch (param->param) { case I915_PARAM_IRQ_ACTIVE: @@ -464,9 +464,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, return -ENODEV; break; case I915_PARAM_SUBSLICE_MASK: - /* Only copy bits from the first slice */ - memcpy(&value, sseu->subslice_mask, - min(sseu->ss_stride, (u8)sizeof(value))); + value = sseu->subslice_mask[0]; if (!value) return -ENODEV; break; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 4b23e9e4bae6..707811256501 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -411,7 +411,6 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, static void error_print_instdone(struct drm_i915_error_state_buf *m, const struct drm_i915_error_engine *ee) { - struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu; int slice; int subslice; @@ -427,12 +426,12 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, if (INTEL_GEN(m->i915) <= 6) return; - for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) + for_each_instdone_slice_subslice(m->i915, slice, subslice) err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", slice, subslice, ee->instdone.sampler[slice][subslice]); - for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) + for_each_instdone_slice_subslice(m->i915, slice, subslice) err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", slice, subslice, ee->instdone.row[slice][subslice]); diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index ac8ac59c4860..7b7016171057 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -37,6 +37,8 @@ static int query_topology_info(struct drm_i915_private *dev_priv, const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; struct drm_i915_query_topology_info topo; u32 slice_length, subslice_length, eu_length, total_length; + u8 subslice_stride = GEN_SSEU_STRIDE(sseu->max_subslices); + u8 eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); int ret; if (query_item->flags != 0) @@ -48,8 +50,8 @@ static int query_topology_info(struct drm_i915_private *dev_priv, BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask)); slice_length = sizeof(sseu->slice_mask); - subslice_length = sseu->max_slices * sseu->ss_stride; - eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride; + subslice_length = sseu->max_slices * subslice_stride; + eu_length = sseu->max_slices * sseu->max_subslices * eu_stride; total_length = sizeof(topo) + slice_length + subslice_length + eu_length; @@ -67,9 +69,9 @@ static int query_topology_info(struct drm_i915_private *dev_priv, topo.max_eus_per_subslice = sseu->max_eus_per_subslice; topo.subslice_offset = slice_length; - topo.subslice_stride = sseu->ss_stride; + topo.subslice_stride = subslice_stride; topo.eu_offset = slice_length + subslice_length; - topo.eu_stride = sseu->eu_stride; + topo.eu_stride = eu_stride; if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr), &topo, sizeof(topo))) diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 3625f777f3a3..97f742530fa1 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -92,9 +92,9 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) hweight8(sseu->slice_mask), sseu->slice_mask); drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu)); for (s = 0; s < sseu->max_slices; s++) { - drm_printf(p, "slice%d: %u subslices, mask=%08x\n", + drm_printf(p, "slice%d: %u subslices, mask=%04x\n", s, intel_sseu_subslices_per_slice(sseu, s), - intel_sseu_get_subslices(sseu, s)); + sseu->subslice_mask[s]); } drm_printf(p, "EU total: %u\n", sseu->eu_total); drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice); @@ -117,9 +117,10 @@ void intel_device_info_dump_runtime(const struct intel_runtime_info *info, static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice, int subslice) { - int slice_stride = sseu->max_subslices * sseu->eu_stride; + int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); + int slice_stride = sseu->max_subslices * subslice_stride; - return slice * slice_stride + subslice * sseu->eu_stride; + return slice * slice_stride + subslice * subslice_stride; } static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, @@ -128,7 +129,7 @@ static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, int i, offset = sseu_eu_idx(sseu, slice, subslice); u16 eu_mask = 0; - for (i = 0; i < sseu->eu_stride; i++) { + for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { eu_mask |= ((u16)sseu->eu_mask[offset + i]) << (i * BITS_PER_BYTE); } @@ -141,7 +142,7 @@ static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice, { int i, offset = sseu_eu_idx(sseu, slice, subslice); - for (i = 0; i < sseu->eu_stride; i++) { + for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { sseu->eu_mask[offset + i] = (eu_mask >> (BITS_PER_BYTE * i)) & 0xff; } @@ -158,9 +159,9 @@ void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, } for (s = 0; s < sseu->max_slices; s++) { - drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n", + drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n", s, intel_sseu_subslices_per_slice(sseu, s), - intel_sseu_get_subslices(sseu, s)); + sseu->subslice_mask[s]); for (ss = 0; ss < sseu->max_subslices; ss++) { u16 enabled_eus = sseu_get_eus(sseu, s, ss); @@ -189,10 +190,15 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) u8 eu_en; int s; - if (IS_ELKHARTLAKE(dev_priv)) - intel_sseu_set_info(sseu, 1, 4, 8); - else - intel_sseu_set_info(sseu, 1, 8, 8); + if (IS_ELKHARTLAKE(dev_priv)) { + sseu->max_slices = 1; + sseu->max_subslices = 4; + sseu->max_eus_per_subslice = 8; + } else { + sseu->max_slices = 1; + sseu->max_subslices = 8; + sseu->max_eus_per_subslice = 8; + } s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE); @@ -201,15 +207,15 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) for (s = 0; s < sseu->max_slices; s++) { if (s_en & BIT(s)) { + int ss_idx = sseu->max_subslices * s; int ss; sseu->slice_mask |= BIT(s); - - intel_sseu_set_subslices(sseu, s, ss_en_mask); - - for (ss = 0; ss < sseu->max_subslices; ss++) - if (intel_sseu_has_subslice(sseu, s, ss)) + sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask; + for (ss = 0; ss < sseu->max_subslices; ss++) { + if (sseu->subslice_mask[s] & BIT(ss)) sseu_set_eus(sseu, s, ss, eu_en); + } } } sseu->eu_per_subslice = hweight8(eu_en); @@ -229,10 +235,23 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) const int eu_mask = 0xff; u32 subslice_mask, eu_en; - intel_sseu_set_info(sseu, 6, 4, 8); - sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >> GEN10_F2_S_ENA_SHIFT; + sseu->max_slices = 6; + sseu->max_subslices = 4; + sseu->max_eus_per_subslice = 8; + + subslice_mask = (1 << 4) - 1; + subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> + GEN10_F2_SS_DIS_SHIFT); + + /* + * Slice0 can have up to 3 subslices, but there are only 2 in + * slice1/2. + */ + sseu->subslice_mask[0] = subslice_mask; + for (s = 1; s < sseu->max_slices; s++) + sseu->subslice_mask[s] = subslice_mask & 0x3; /* Slice0 */ eu_en = ~I915_READ(GEN8_EU_DISABLE0); @@ -257,22 +276,14 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) eu_en = ~I915_READ(GEN10_EU_DISABLE3); sseu_set_eus(sseu, 5, 1, eu_en & eu_mask); - subslice_mask = (1 << 4) - 1; - subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> - GEN10_F2_SS_DIS_SHIFT); - + /* Do a second pass where we mark the subslices disabled if all their + * eus are off. + */ for (s = 0; s < sseu->max_slices; s++) { for (ss = 0; ss < sseu->max_subslices; ss++) { if (sseu_get_eus(sseu, s, ss) == 0) - subslice_mask &= ~BIT(ss); + sseu->subslice_mask[s] &= ~BIT(ss); } - - /* - * Slice0 can have up to 3 subslices, but there are only 2 in - * slice1/2. - */ - intel_sseu_set_subslices(sseu, s, s == 0 ? subslice_mask : - subslice_mask & 0x3); } sseu->eu_total = compute_eu_total(sseu); @@ -298,12 +309,13 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 fuse; - u8 subslice_mask; fuse = I915_READ(CHV_FUSE_GT); sseu->slice_mask = BIT(0); - intel_sseu_set_info(sseu, 1, 2, 8); + sseu->max_slices = 1; + sseu->max_subslices = 2; + sseu->max_eus_per_subslice = 8; if (!(fuse & CHV_FGT_DISABLE_SS0)) { u8 disabled_mask = @@ -312,7 +324,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >> CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4); - subslice_mask |= BIT(0); + sseu->subslice_mask[0] |= BIT(0); sseu_set_eus(sseu, 0, 0, ~disabled_mask); } @@ -323,12 +335,10 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >> CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4); - subslice_mask |= BIT(1); + sseu->subslice_mask[0] |= BIT(1); sseu_set_eus(sseu, 0, 1, ~disabled_mask); } - intel_sseu_set_subslices(sseu, 0, subslice_mask); - sseu->eu_total = compute_eu_total(sseu); /* @@ -361,8 +371,9 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; /* BXT has a single slice and at most 3 subslices. */ - intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3, - IS_GEN9_LP(dev_priv) ? 3 : 4, 8); + sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3; + sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4; + sseu->max_eus_per_subslice = 8; /* * The subslice disable field is global, i.e. it applies @@ -381,14 +392,14 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) /* skip disabled slice */ continue; - intel_sseu_set_subslices(sseu, s, subslice_mask); + sseu->subslice_mask[s] = subslice_mask; eu_disable = I915_READ(GEN9_EU_DISABLE(s)); for (ss = 0; ss < sseu->max_subslices; ss++) { int eu_per_ss; u8 eu_disabled_mask; - if (!intel_sseu_has_subslice(sseu, s, ss)) + if (!(sseu->subslice_mask[s] & BIT(ss))) /* skip disabled subslice */ continue; @@ -461,7 +472,9 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) fuse2 = I915_READ(GEN8_FUSE2); sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; - intel_sseu_set_info(sseu, 3, 3, 8); + sseu->max_slices = 3; + sseu->max_subslices = 3; + sseu->max_eus_per_subslice = 8; /* * The subslice disable field is global, i.e. it applies @@ -488,19 +501,18 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) /* skip disabled slice */ continue; - intel_sseu_set_subslices(sseu, s, subslice_mask); + sseu->subslice_mask[s] = subslice_mask; for (ss = 0; ss < sseu->max_subslices; ss++) { u8 eu_disabled_mask; u32 n_disabled; - if (!intel_sseu_has_subslice(sseu, s, ss)) + if (!(sseu->subslice_mask[s] & BIT(ss))) /* skip disabled subslice */ continue; eu_disabled_mask = - eu_disable[s] >> - (ss * sseu->max_eus_per_subslice); + eu_disable[s] >> (ss * sseu->max_eus_per_subslice); sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); @@ -540,7 +552,6 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 fuse1; int s, ss; - u32 subslice_mask; /* * There isn't a register to tell us how many slices/subslices. We @@ -552,18 +563,22 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) /* fall through */ case 1: sseu->slice_mask = BIT(0); - subslice_mask = BIT(0); + sseu->subslice_mask[0] = BIT(0); break; case 2: sseu->slice_mask = BIT(0); - subslice_mask = BIT(0) | BIT(1); + sseu->subslice_mask[0] = BIT(0) | BIT(1); break; case 3: sseu->slice_mask = BIT(0) | BIT(1); - subslice_mask = BIT(0) | BIT(1); + sseu->subslice_mask[0] = BIT(0) | BIT(1); + sseu->subslice_mask[1] = BIT(0) | BIT(1); break; } + sseu->max_slices = hweight8(sseu->slice_mask); + sseu->max_subslices = hweight8(sseu->subslice_mask[0]); + fuse1 = I915_READ(HSW_PAVP_FUSE1); switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) { default: @@ -580,14 +595,9 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) sseu->eu_per_subslice = 6; break; } - - intel_sseu_set_info(sseu, hweight8(sseu->slice_mask), - hweight8(subslice_mask), - sseu->eu_per_subslice); + sseu->max_eus_per_subslice = sseu->eu_per_subslice; for (s = 0; s < sseu->max_slices; s++) { - intel_sseu_set_subslices(sseu, s, subslice_mask); - for (ss = 0; ss < sseu->max_subslices; ss++) { sseu_set_eus(sseu, s, ss, (1UL << sseu->eu_per_subslice) - 1); -- cgit v1.2.3 From 7f5f228008e4d6f59212b0ee693cbc36212096ba Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 29 May 2019 10:34:07 +0100 Subject: drm/i915/gtt: Avoid overflowing the WC stash An interesting issue cropped with making the pagetables be allocated and freed concurrently (i.e. removing their grandeous struct_mutex guard) was that we would overflow the page stash. This happens when we have multiple allocators grabbing WC pages such that we fill the vm's local page stash and then when we free another page, the page stash is already full and we overflow. The fix is quite simple: to check for a full page stash before adding another. This results in us keeping a vm local page stash around for much longer, which is both a blessing and a curse. Signed-off-by: Chris Wilson Cc: Matthew Auld Cc: Joonas Lahtinen Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190529093407.31697-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7496cce0d798..5b1a501c4159 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -341,11 +341,11 @@ static struct page *stash_pop_page(struct pagestash *stash) static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) { - int nr; + unsigned int nr; spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING); - nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec)); + nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec)); memcpy(stash->pvec.pages + stash->pvec.nr, pvec->pages + pvec->nr - nr, sizeof(pvec->pages[0]) * nr); @@ -399,7 +399,8 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) page = stack.pages[--stack.nr]; /* Merge spare WC pages to the global stash */ - stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); + if (stack.nr) + stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); /* Push any surplus WC pages onto the local VM stash */ if (stack.nr) @@ -469,8 +470,10 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page) */ might_sleep(); spin_lock(&vm->free_pages.lock); - if (!pagevec_add(&vm->free_pages.pvec, page)) + while (!pagevec_space(&vm->free_pages.pvec)) vm_free_pages_release(vm, false); + GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE); + pagevec_add(&vm->free_pages.pvec, page); spin_unlock(&vm->free_pages.lock); } -- cgit v1.2.3 From 0a4a6e74e7335d8c29eed70398505b5396695cc4 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Wed, 29 May 2019 13:31:07 +0100 Subject: drm/i915/gtt: grab wakeref in gen6_alloc_va_range Some steps in gen6_alloc_va_range require the HW to be awake, so ideally we should be grabbing the wakeref ourselves and not relying on the caller already holding it for us. Suggested-by: Chris Wilson Signed-off-by: Matthew Auld Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190529123108.24422-1-matthew.auld@intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 5b1a501c4159..ca8a69e8b098 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1749,10 +1749,13 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, { struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); struct i915_page_table *pt; + intel_wakeref_t wakeref; u64 from = start; unsigned int pde; bool flush = false; + wakeref = intel_runtime_pm_get(vm->i915); + gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) { const unsigned int count = gen6_pte_count(start, length); @@ -1778,12 +1781,15 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, if (flush) { mark_tlbs_dirty(&ppgtt->base); - gen6_ggtt_invalidate(ppgtt->base.vm.i915); + gen6_ggtt_invalidate(vm->i915); } + intel_runtime_pm_put(vm->i915, wakeref); + return 0; unwind_out: + intel_runtime_pm_put(vm->i915, wakeref); gen6_ppgtt_clear_range(vm, from, start - from); return -ENOMEM; } -- cgit v1.2.3 From 6501aa4e3a45075360e72784a48fcd5c32a4eb24 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Wed, 29 May 2019 13:31:08 +0100 Subject: drm/i915: add in-kernel blitter client The plan is to use the blitter engine for async object clearing when using local memory, but before we can move the worker to get_pages() we have to first tame some more of our struct_mutex usage. With this in mind we should be able to upstream the object clearing as some selftests, which should serve as a guinea pig for the ongoing locking rework and upcoming async get_pages() framework. Signed-off-by: Matthew Auld Cc: CQ Tang Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190529123108.24422-2-matthew.auld@intel.com --- drivers/gpu/drm/i915/Makefile | 2 + drivers/gpu/drm/i915/gem/i915_gem_client_blt.c | 306 +++++++++++++++++++++ drivers/gpu/drm/i915/gem/i915_gem_client_blt.h | 21 ++ drivers/gpu/drm/i915/gem/i915_gem_object_blt.c | 109 ++++++++ drivers/gpu/drm/i915/gem/i915_gem_object_blt.h | 24 ++ .../drm/i915/gem/selftests/i915_gem_client_blt.c | 127 +++++++++ .../drm/i915/gem/selftests/i915_gem_object_blt.c | 110 ++++++++ drivers/gpu/drm/i915/gt/intel_gpu_commands.h | 1 + .../gpu/drm/i915/selftests/i915_live_selftests.h | 2 + 9 files changed, 702 insertions(+) create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_client_blt.h create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_object_blt.c create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_object_blt.h create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 83588e9840f8..a7850bbffbe0 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -90,6 +90,7 @@ obj-y += gem/ gem-y += \ gem/i915_gem_busy.o \ gem/i915_gem_clflush.o \ + gem/i915_gem_client_blt.o \ gem/i915_gem_context.o \ gem/i915_gem_dmabuf.o \ gem/i915_gem_domain.o \ @@ -97,6 +98,7 @@ gem-y += \ gem/i915_gem_fence.o \ gem/i915_gem_internal.o \ gem/i915_gem_object.o \ + gem/i915_gem_object_blt.o \ gem/i915_gem_mman.o \ gem/i915_gem_pages.o \ gem/i915_gem_phys.o \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c new file mode 100644 index 000000000000..4899ca1dd76c --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c @@ -0,0 +1,306 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ +#include "i915_gem_client_blt.h" + +#include "i915_gem_object_blt.h" +#include "intel_drv.h" + +struct i915_sleeve { + struct i915_vma *vma; + struct drm_i915_gem_object *obj; + struct sg_table *pages; + struct i915_page_sizes page_sizes; +}; + +static int vma_set_pages(struct i915_vma *vma) +{ + struct i915_sleeve *sleeve = vma->private; + + vma->pages = sleeve->pages; + vma->page_sizes = sleeve->page_sizes; + + return 0; +} + +static void vma_clear_pages(struct i915_vma *vma) +{ + GEM_BUG_ON(!vma->pages); + vma->pages = NULL; +} + +static int vma_bind(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + return vma->vm->vma_ops.bind_vma(vma, cache_level, flags); +} + +static void vma_unbind(struct i915_vma *vma) +{ + vma->vm->vma_ops.unbind_vma(vma); +} + +static const struct i915_vma_ops proxy_vma_ops = { + .set_pages = vma_set_pages, + .clear_pages = vma_clear_pages, + .bind_vma = vma_bind, + .unbind_vma = vma_unbind, +}; + +static struct i915_sleeve *create_sleeve(struct i915_address_space *vm, + struct drm_i915_gem_object *obj, + struct sg_table *pages, + struct i915_page_sizes *page_sizes) +{ + struct i915_sleeve *sleeve; + struct i915_vma *vma; + int err; + + sleeve = kzalloc(sizeof(*sleeve), GFP_KERNEL); + if (!sleeve) + return ERR_PTR(-ENOMEM); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_free; + } + + vma->private = sleeve; + vma->ops = &proxy_vma_ops; + + sleeve->vma = vma; + sleeve->obj = i915_gem_object_get(obj); + sleeve->pages = pages; + sleeve->page_sizes = *page_sizes; + + return sleeve; + +err_free: + kfree(sleeve); + return ERR_PTR(err); +} + +static void destroy_sleeve(struct i915_sleeve *sleeve) +{ + i915_gem_object_put(sleeve->obj); + kfree(sleeve); +} + +struct clear_pages_work { + struct dma_fence dma; + struct dma_fence_cb cb; + struct i915_sw_fence wait; + struct work_struct work; + struct irq_work irq_work; + struct i915_sleeve *sleeve; + struct intel_context *ce; + u32 value; +}; + +static const char *clear_pages_work_driver_name(struct dma_fence *fence) +{ + return DRIVER_NAME; +} + +static const char *clear_pages_work_timeline_name(struct dma_fence *fence) +{ + return "clear"; +} + +static void clear_pages_work_release(struct dma_fence *fence) +{ + struct clear_pages_work *w = container_of(fence, typeof(*w), dma); + + destroy_sleeve(w->sleeve); + + i915_sw_fence_fini(&w->wait); + + BUILD_BUG_ON(offsetof(typeof(*w), dma)); + dma_fence_free(&w->dma); +} + +static const struct dma_fence_ops clear_pages_work_ops = { + .get_driver_name = clear_pages_work_driver_name, + .get_timeline_name = clear_pages_work_timeline_name, + .release = clear_pages_work_release, +}; + +static void clear_pages_signal_irq_worker(struct irq_work *work) +{ + struct clear_pages_work *w = container_of(work, typeof(*w), irq_work); + + dma_fence_signal(&w->dma); + dma_fence_put(&w->dma); +} + +static void clear_pages_dma_fence_cb(struct dma_fence *fence, + struct dma_fence_cb *cb) +{ + struct clear_pages_work *w = container_of(cb, typeof(*w), cb); + + if (fence->error) + dma_fence_set_error(&w->dma, fence->error); + + /* + * Push the signalling of the fence into yet another worker to avoid + * the nightmare locking around the fence spinlock. + */ + irq_work_queue(&w->irq_work); +} + +static void clear_pages_worker(struct work_struct *work) +{ + struct clear_pages_work *w = container_of(work, typeof(*w), work); + struct drm_i915_private *i915 = w->ce->gem_context->i915; + struct drm_i915_gem_object *obj = w->sleeve->obj; + struct i915_vma *vma = w->sleeve->vma; + struct i915_request *rq; + int err = w->dma.error; + + if (unlikely(err)) + goto out_signal; + + if (obj->cache_dirty) { + obj->write_domain = 0; + if (i915_gem_object_has_struct_page(obj)) + drm_clflush_sg(w->sleeve->pages); + obj->cache_dirty = false; + } + + /* XXX: we need to kill this */ + mutex_lock(&i915->drm.struct_mutex); + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (unlikely(err)) + goto out_unlock; + + rq = i915_request_create(w->ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_unpin; + } + + /* There's no way the fence has signalled */ + if (dma_fence_add_callback(&rq->fence, &w->cb, + clear_pages_dma_fence_cb)) + GEM_BUG_ON(1); + + if (w->ce->engine->emit_init_breadcrumb) { + err = w->ce->engine->emit_init_breadcrumb(rq); + if (unlikely(err)) + goto out_request; + } + + /* XXX: more feverish nightmares await */ + i915_vma_lock(vma); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(vma); + if (err) + goto out_request; + + err = intel_emit_vma_fill_blt(rq, vma, w->value); +out_request: + if (unlikely(err)) { + i915_request_skip(rq, err); + err = 0; + } + + i915_request_add(rq); +out_unpin: + i915_vma_unpin(vma); +out_unlock: + mutex_unlock(&i915->drm.struct_mutex); +out_signal: + if (unlikely(err)) { + dma_fence_set_error(&w->dma, err); + dma_fence_signal(&w->dma); + dma_fence_put(&w->dma); + } +} + +static int __i915_sw_fence_call +clear_pages_work_notify(struct i915_sw_fence *fence, + enum i915_sw_fence_notify state) +{ + struct clear_pages_work *w = container_of(fence, typeof(*w), wait); + + switch (state) { + case FENCE_COMPLETE: + schedule_work(&w->work); + break; + + case FENCE_FREE: + dma_fence_put(&w->dma); + break; + } + + return NOTIFY_DONE; +} + +static DEFINE_SPINLOCK(fence_lock); + +/* XXX: better name please */ +int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj, + struct intel_context *ce, + struct sg_table *pages, + struct i915_page_sizes *page_sizes, + u32 value) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_gem_context *ctx = ce->gem_context; + struct i915_address_space *vm; + struct clear_pages_work *work; + struct i915_sleeve *sleeve; + int err; + + vm = ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; + + sleeve = create_sleeve(vm, obj, pages, page_sizes); + if (IS_ERR(sleeve)) + return PTR_ERR(sleeve); + + work = kmalloc(sizeof(*work), GFP_KERNEL); + if (!work) { + destroy_sleeve(sleeve); + return -ENOMEM; + } + + work->value = value; + work->sleeve = sleeve; + work->ce = ce; + + INIT_WORK(&work->work, clear_pages_worker); + + init_irq_work(&work->irq_work, clear_pages_signal_irq_worker); + + dma_fence_init(&work->dma, + &clear_pages_work_ops, + &fence_lock, + i915->mm.unordered_timeline, + 0); + i915_sw_fence_init(&work->wait, clear_pages_work_notify); + + i915_gem_object_lock(obj); + err = i915_sw_fence_await_reservation(&work->wait, + obj->resv, NULL, + true, I915_FENCE_TIMEOUT, + I915_FENCE_GFP); + if (err < 0) { + dma_fence_set_error(&work->dma, err); + } else { + reservation_object_add_excl_fence(obj->resv, &work->dma); + err = 0; + } + i915_gem_object_unlock(obj); + + dma_fence_get(&work->dma); + i915_sw_fence_commit(&work->wait); + + return err; +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/i915_gem_client_blt.c" +#endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h new file mode 100644 index 000000000000..3dbd28c22ff5 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ +#ifndef __I915_GEM_CLIENT_BLT_H__ +#define __I915_GEM_CLIENT_BLT_H__ + +#include + +struct drm_i915_gem_object; +struct i915_page_sizes; +struct intel_context; +struct sg_table; + +int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj, + struct intel_context *ce, + struct sg_table *pages, + struct i915_page_sizes *page_sizes, + u32 value); + +#endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c new file mode 100644 index 000000000000..fc8ee7ef3d69 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "i915_gem_object_blt.h" + +#include "i915_gem_clflush.h" +#include "intel_drv.h" + +int intel_emit_vma_fill_blt(struct i915_request *rq, + struct i915_vma *vma, + u32 value) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 8); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + if (INTEL_GEN(rq->i915) >= 8) { + *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2); + *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE; + *cs++ = 0; + *cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; + *cs++ = lower_32_bits(vma->node.start); + *cs++ = upper_32_bits(vma->node.start); + *cs++ = value; + *cs++ = MI_NOOP; + } else { + *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); + *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE; + *cs++ = 0; + *cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; + *cs++ = vma->node.start; + *cs++ = value; + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + } + + intel_ring_advance(rq, cs); + + return 0; +} + +int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, + struct intel_context *ce, + u32 value) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_gem_context *ctx = ce->gem_context; + struct i915_address_space *vm; + struct i915_request *rq; + struct i915_vma *vma; + int err; + + /* XXX: ce->vm please */ + vm = ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (unlikely(err)) + return err; + + if (obj->cache_dirty & ~obj->cache_coherent) { + i915_gem_object_lock(obj); + i915_gem_clflush_object(obj, 0); + i915_gem_object_unlock(obj); + } + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_unpin; + } + + err = i915_request_await_object(rq, obj, true); + if (unlikely(err)) + goto out_request; + + if (ce->engine->emit_init_breadcrumb) { + err = ce->engine->emit_init_breadcrumb(rq); + if (unlikely(err)) + goto out_request; + } + + i915_vma_lock(vma); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(vma); + if (unlikely(err)) + goto out_request; + + err = intel_emit_vma_fill_blt(rq, vma, value); +out_request: + if (unlikely(err)) + i915_request_skip(rq, err); + + i915_request_add(rq); +out_unpin: + i915_vma_unpin(vma); + return err; +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/i915_gem_object_blt.c" +#endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h new file mode 100644 index 000000000000..7ec7de6ac0c0 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __I915_GEM_OBJECT_BLT_H__ +#define __I915_GEM_OBJECT_BLT_H__ + +#include + +struct drm_i915_gem_object; +struct intel_context; +struct i915_request; +struct i915_vma; + +int intel_emit_vma_fill_blt(struct i915_request *rq, + struct i915_vma *vma, + u32 value); + +int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, + struct intel_context *ce, + u32 value); + +#endif diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c new file mode 100644 index 000000000000..f3a5eb807c1c --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "i915_selftest.h" + +#include "selftests/igt_flush_test.h" +#include "selftests/mock_drm.h" +#include "mock_context.h" + +static int igt_client_fill(void *arg) +{ + struct intel_context *ce = arg; + struct drm_i915_private *i915 = ce->gem_context->i915; + struct drm_i915_gem_object *obj; + struct rnd_state prng; + IGT_TIMEOUT(end); + u32 *vaddr; + int err = 0; + + prandom_seed_state(&prng, i915_selftest.random_seed); + + do { + u32 sz = prandom_u32_state(&prng) % SZ_32M; + u32 val = prandom_u32_state(&prng); + u32 i; + + sz = round_up(sz, PAGE_SIZE); + + pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val); + + obj = i915_gem_object_create_internal(i915, sz); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_flush; + } + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_put; + } + + /* + * XXX: The goal is move this to get_pages, so try to dirty the + * CPU cache first to check that we do the required clflush + * before scheduling the blt for !llc platforms. This matches + * some version of reality where at get_pages the pages + * themselves may not yet be coherent with the GPU(swap-in). If + * we are missing the flush then we should see the stale cache + * values after we do the set_to_cpu_domain and pick it up as a + * test failure. + */ + memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32)); + + if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) + obj->cache_dirty = true; + + err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages, + &obj->mm.page_sizes, + val); + if (err) + goto err_unpin; + + /* + * XXX: For now do the wait without the object resv lock to + * ensure we don't deadlock. + */ + err = i915_gem_object_wait(obj, + I915_WAIT_INTERRUPTIBLE | + I915_WAIT_ALL, + MAX_SCHEDULE_TIMEOUT); + if (err) + goto err_unpin; + + i915_gem_object_lock(obj); + err = i915_gem_object_set_to_cpu_domain(obj, false); + i915_gem_object_unlock(obj); + if (err) + goto err_unpin; + + for (i = 0; i < obj->base.size / sizeof(u32); ++i) { + if (vaddr[i] != val) { + pr_err("vaddr[%u]=%x, expected=%x\n", i, + vaddr[i], val); + err = -EINVAL; + goto err_unpin; + } + } + + i915_gem_object_unpin_map(obj); + i915_gem_object_put(obj); + } while (!time_after(jiffies, end)); + + goto err_flush; + +err_unpin: + i915_gem_object_unpin_map(obj); +err_put: + i915_gem_object_put(obj); +err_flush: + mutex_lock(&i915->drm.struct_mutex); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); + + if (err == -ENOMEM) + err = 0; + + return err; +} + +int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_client_fill), + }; + + if (i915_terminally_wedged(i915)) + return 0; + + if (!HAS_ENGINE(i915, BCS0)) + return 0; + + return i915_subtests(tests, i915->engine[BCS0]->kernel_context); +} diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c new file mode 100644 index 000000000000..8de568d2c792 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "i915_selftest.h" + +#include "selftests/igt_flush_test.h" +#include "selftests/mock_drm.h" +#include "mock_context.h" + +static int igt_fill_blt(void *arg) +{ + struct intel_context *ce = arg; + struct drm_i915_private *i915 = ce->gem_context->i915; + struct drm_i915_gem_object *obj; + struct rnd_state prng; + IGT_TIMEOUT(end); + u32 *vaddr; + int err = 0; + + prandom_seed_state(&prng, i915_selftest.random_seed); + + do { + u32 sz = prandom_u32_state(&prng) % SZ_32M; + u32 val = prandom_u32_state(&prng); + u32 i; + + sz = round_up(sz, PAGE_SIZE); + + pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val); + + obj = i915_gem_object_create_internal(i915, sz); + if (IS_ERR(obj)) { + err = PTR_ERR(vaddr); + goto err_flush; + } + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_put; + } + + /* + * Make sure the potentially async clflush does its job, if + * required. + */ + memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32)); + + if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) + obj->cache_dirty = true; + + mutex_lock(&i915->drm.struct_mutex); + err = i915_gem_object_fill_blt(obj, ce, val); + mutex_unlock(&i915->drm.struct_mutex); + if (err) + goto err_unpin; + + i915_gem_object_lock(obj); + err = i915_gem_object_set_to_cpu_domain(obj, false); + i915_gem_object_unlock(obj); + if (err) + goto err_unpin; + + for (i = 0; i < obj->base.size / sizeof(u32); ++i) { + if (vaddr[i] != val) { + pr_err("vaddr[%u]=%x, expected=%x\n", i, + vaddr[i], val); + err = -EINVAL; + goto err_unpin; + } + } + + i915_gem_object_unpin_map(obj); + i915_gem_object_put(obj); + } while (!time_after(jiffies, end)); + + goto err_flush; + +err_unpin: + i915_gem_object_unpin_map(obj); +err_put: + i915_gem_object_put(obj); +err_flush: + mutex_lock(&i915->drm.struct_mutex); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); + + if (err == -ENOMEM) + err = 0; + + return err; +} + +int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_fill_blt), + }; + + if (i915_terminally_wedged(i915)) + return 0; + + if (!HAS_ENGINE(i915, BCS0)) + return 0; + + return i915_subtests(tests, i915->engine[BCS0]->kernel_context); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index a34ece53a771..eec31e36aca7 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -180,6 +180,7 @@ #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) #define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2)) +#define XY_COLOR_BLT_CMD (2 << 29 | 0x50 << 22) #define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index 9bda36a598b3..d5dc4427d664 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -25,6 +25,8 @@ selftest(gem, i915_gem_live_selftests) selftest(evict, i915_gem_evict_live_selftests) selftest(hugepages, i915_gem_huge_page_live_selftests) selftest(contexts, i915_gem_context_live_selftests) +selftest(blt, i915_gem_object_blt_live_selftests) +selftest(client, i915_gem_client_blt_live_selftests) selftest(reset, intel_reset_live_selftests) selftest(hangcheck, intel_hangcheck_live_selftests) selftest(execlists, intel_execlists_live_selftests) -- cgit v1.2.3 From 47bc28d7ee6d8378ba4451c43885cb3241302243 Mon Sep 17 00:00:00 2001 From: Janusz Krzysztofik Date: Thu, 30 May 2019 15:31:05 +0200 Subject: drm/i915: Split off pci_driver.remove() tail to drm_driver.release() In order to support driver hot unbind, some cleanup operations, now performed on PCI driver remove, must be called later, after all device file descriptors are closed. Split out those operations from the tail of pci_driver.remove() callback and put them into drm_driver.release() which is called as soon as all references to the driver are put. As a result, those cleanups will be now run on last drm_dev_put(), either still called from pci_driver.remove() if all device file descriptors are already closed, or on last drm_release() file operation. Signed-off-by: Janusz Krzysztofik Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190530133105.30467-1-janusz.krzysztofik@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.c | 17 +++++++++++++---- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 10 +++++++++- 3 files changed, 23 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6a8b31f631ac..1af6751e1b36 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -742,6 +742,7 @@ static int i915_load_modeset_init(struct drm_device *dev) cleanup_gem: i915_gem_suspend(dev_priv); + i915_gem_fini_hw(dev_priv); i915_gem_fini(dev_priv); cleanup_modeset: intel_modeset_cleanup(dev); @@ -1690,7 +1691,6 @@ static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) pci_disable_msi(pdev); pm_qos_remove_request(&dev_priv->pm_qos); - i915_ggtt_cleanup_hw(dev_priv); } /** @@ -1914,6 +1914,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) out_cleanup_hw: i915_driver_cleanup_hw(dev_priv); + i915_ggtt_cleanup_hw(dev_priv); out_cleanup_mmio: i915_driver_cleanup_mmio(dev_priv); out_runtime_pm_put: @@ -1965,21 +1966,29 @@ void i915_driver_unload(struct drm_device *dev) cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); i915_reset_error_state(dev_priv); - i915_gem_fini(dev_priv); + i915_gem_fini_hw(dev_priv); intel_power_domains_fini_hw(dev_priv); i915_driver_cleanup_hw(dev_priv); - i915_driver_cleanup_mmio(dev_priv); enable_rpm_wakeref_asserts(dev_priv); - intel_runtime_pm_cleanup(dev_priv); } static void i915_driver_release(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); + disable_rpm_wakeref_asserts(dev_priv); + + i915_gem_fini(dev_priv); + + i915_ggtt_cleanup_hw(dev_priv); + i915_driver_cleanup_mmio(dev_priv); + + enable_rpm_wakeref_asserts(dev_priv); + intel_runtime_pm_cleanup(dev_priv); + i915_driver_cleanup_early(dev_priv); i915_driver_destroy(dev_priv); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4eb8f6a181c1..0dee503a5b68 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2745,6 +2745,7 @@ void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); +void i915_gem_fini_hw(struct drm_i915_private *dev_priv); void i915_gem_fini(struct drm_i915_private *dev_priv); int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, unsigned int flags, long timeout); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e5aafbeb1d19..5c6d94fe1ca2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1664,7 +1664,7 @@ err_uc_misc: return ret; } -void i915_gem_fini(struct drm_i915_private *dev_priv) +void i915_gem_fini_hw(struct drm_i915_private *dev_priv) { GEM_BUG_ON(dev_priv->gt.awake); @@ -1679,6 +1679,14 @@ void i915_gem_fini(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->drm.struct_mutex); intel_uc_fini_hw(dev_priv); intel_uc_fini(dev_priv); + mutex_unlock(&dev_priv->drm.struct_mutex); + + i915_gem_drain_freed_objects(dev_priv); +} + +void i915_gem_fini(struct drm_i915_private *dev_priv) +{ + mutex_lock(&dev_priv->drm.struct_mutex); intel_engines_cleanup(dev_priv); i915_gem_contexts_fini(dev_priv); i915_gem_fini_scratch(dev_priv); -- cgit v1.2.3 From f2d1315895ca5a4747abd3f6ece66fc25613405a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 30 May 2019 09:23:58 +0100 Subject: drm/i915: Drop check for non-NULL entry in llist_for_each_entry_safe Since the next entry is an offset from a pointer, it can not be NULL. For simplicity, drop the extra conditional before calling cond_resched() Reported-by: Dan Carpenter Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20190530082358.13663-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index f064876f1214..55e79fdb81aa 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -248,8 +248,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); atomic_dec(&i915->mm.free_count); - if (on) - cond_resched(); + cond_resched(); } intel_runtime_pm_put(i915, wakeref); } -- cgit v1.2.3 From 1fbf9d81429af2a7333b72877cc01af1a7612d35 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 30 May 2019 13:13:11 +0100 Subject: drm/i915: Make default value for i915.mmio_debug a compile time option The normal behaviour is to periodically check for a mmio access error, and once detected enable mmio access checking. However this is useless if the error only occurs once during module load, and so we may miss such errors in CI. To allow ourselves to catch them, allow CI to opt into always enabling mmio debugging. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20190530121311.6794-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Kconfig.debug | 14 ++++++++++++++ drivers/gpu/drm/i915/i915_params.h | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 04b686d2c2d0..09aa0f4c8bf1 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -32,6 +32,7 @@ config DRM_I915_DEBUG select DRM_I915_SW_FENCE_DEBUG_OBJECTS select DRM_I915_SELFTEST select DRM_I915_DEBUG_RUNTIME_PM + select DRM_I915_DEBUG_MMIO default n help Choose this option to turn on extra driver debugging that may affect @@ -41,6 +42,19 @@ config DRM_I915_DEBUG If in doubt, say "N". +config DRM_I915_DEBUG_MMIO + bool "Always insert extra checks around mmio access by default" + default n + help + By default, always enables the extra sanity checks (extra register + reads) around every mmio (register) access that will slow the system + down. This sets the default value of i915.mmio_debug to -1 and can + be overridden at module load. + + Recommended for driver developers only. + + If in doubt, say "N". + config DRM_I915_DEBUG_GEM bool "Insert extra checks into the GEM internals" default n diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 3f14e9881a0d..cf9d0be49a38 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -59,7 +59,7 @@ struct drm_printer; param(char *, guc_firmware_path, NULL) \ param(char *, huc_firmware_path, NULL) \ param(char *, dmc_firmware_path, NULL) \ - param(int, mmio_debug, 0) \ + param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO)) \ param(int, edp_vswing, 0) \ param(int, reset, 2) \ param(unsigned int, inject_load_failure, 0) \ -- cgit v1.2.3 From fd1e194f4869dca12ee0cba7c25621d436030a9e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 31 May 2019 11:32:01 +0100 Subject: drm/i915: fix use of uninitialized pointer vaddr The assignment of err is using the incorrect pointer vaddr that has not been initialized. Fix this by using the correct pointer obj instead. Addresses-Coverity: ("Uninitialized pointer read") Fixes: 6501aa4e3a45 ("drm/i915: add in-kernel blitter client") Signed-off-by: Colin Ian King Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190531103201.10124-1-colin.king@canonical.com --- drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c index 8de568d2c792..e23d8c9e9298 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c @@ -32,7 +32,7 @@ static int igt_fill_blt(void *arg) obj = i915_gem_object_create_internal(i915, sz); if (IS_ERR(obj)) { - err = PTR_ERR(vaddr); + err = PTR_ERR(obj); goto err_flush; } -- cgit v1.2.3 From 7ef5ef5cdead61b8bc17493aae565962611a2918 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 6 May 2019 16:48:01 +0300 Subject: drm/i915: add force_probe module parameter to replace alpha_support The i915.alpha_support module parameter has caused some confusion along the way. Add new i915.force_probe parameter to specify PCI IDs of devices to probe, when the devices are recognized but not automatically probed by the driver. The name is intended to reflect what the parameter effectively does, avoiding any overloaded semantics of "alpha" and "support". The parameter supports "" to disable, ",[,...]" to enable force probe for one or more devices, and "*" to enable force probe for all known devices. Also add new CONFIG_DRM_I915_FORCE_PROBE config option to replace the DRM_I915_ALPHA_SUPPORT option. This defaults to "*" if DRM_I915_ALPHA_SUPPORT=y. Instead of replacing i915.alpha_support immediately, let the two coexist for a while, with a deprecation message, for a transition period. Cc: Joonas Lahtinen Cc: Rodrigo Vivi Reviewed-by: Rodrigo Vivi Acked-by: Joonas Lahtinen Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190506134801.28751-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/Kconfig | 29 +++++++++++------- drivers/gpu/drm/i915/i915_drv.h | 2 -- drivers/gpu/drm/i915/i915_params.c | 7 +++-- drivers/gpu/drm/i915/i915_params.h | 1 + drivers/gpu/drm/i915/i915_pci.c | 51 ++++++++++++++++++++++++++++---- drivers/gpu/drm/i915/intel_device_info.h | 2 +- 6 files changed, 72 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 978cb39a47a8..0d21402945ab 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -45,19 +45,28 @@ config DRM_I915 config DRM_I915_ALPHA_SUPPORT bool "Enable alpha quality support for new Intel hardware by default" depends on DRM_I915 - default n help - Choose this option if you have new Intel hardware and want to enable - the alpha quality i915 driver support for the hardware in this kernel - version. You can also enable the support at runtime using the module - parameter i915.alpha_support=1; this option changes the default for - that module parameter. + This option is deprecated. Use DRM_I915_FORCE_PROBE option instead. - It is recommended to upgrade to a kernel version with proper support - as soon as it is available. Generally fixes for platforms with alpha - support are not backported to older kernels. +config DRM_I915_FORCE_PROBE + string "Force probe driver for selected new Intel hardware" + depends on DRM_I915 + default "*" if DRM_I915_ALPHA_SUPPORT + help + This is the default value for the i915.force_probe module + parameter. Using the module parameter overrides this option. - If in doubt, say "N". + Force probe the driver for new Intel graphics devices that are + recognized but not properly supported by this kernel version. It is + recommended to upgrade to a kernel version with proper support as soon + as it is available. + + Use "" to disable force probe. If in doubt, use this. + + Use "[,,...]" to force probe the driver for listed + devices. For example, "4500" or "4500,4571". + + Use "*" to force probe the driver for all known devices. config DRM_I915_CAPTURE_ERROR bool "Enable capturing GPU state following a hang" diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0dee503a5b68..8dcc30c1846a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2340,8 +2340,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_ICL_WITH_PORT_F(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF) -#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support) - #define SKL_REVID_A0 0x0 #define SKL_REVID_B0 0x1 #define SKL_REVID_C0 0x2 diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index b5be0abbba35..5b07766a1c26 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -87,9 +87,12 @@ i915_param_named_unsafe(enable_psr, int, 0600, "(0=disabled, 1=enabled) " "Default: -1 (use per-chip default)"); +i915_param_named_unsafe(force_probe, charp, 0400, + "Force probe the driver for specified devices. " + "See CONFIG_DRM_I915_FORCE_PROBE for details."); + i915_param_named_unsafe(alpha_support, bool, 0400, - "Enable alpha quality driver support for latest hardware. " - "See also CONFIG_DRM_I915_ALPHA_SUPPORT."); + "Deprecated. See i915.force_probe."); i915_param_named_unsafe(disable_power_well, int, 0400, "Disable display power wells when possible " diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index cf9d0be49a38..a4770ce46bd2 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -64,6 +64,7 @@ struct drm_printer; param(int, reset, 2) \ param(unsigned int, inject_load_failure, 0) \ param(int, fastboot, -1) \ + param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \ /* leave bools at the end to not create holes */ \ param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \ param(bool, enable_hangcheck, true) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index fc66d7f348fc..e761ea86b481 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -760,7 +760,7 @@ static const struct intel_device_info intel_icelake_11_info = { static const struct intel_device_info intel_elkhartlake_info = { GEN11_FEATURES, PLATFORM(INTEL_ELKHARTLAKE), - .is_alpha_support = 1, + .require_force_probe = 1, .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0), .ppgtt_size = 36, }; @@ -854,16 +854,57 @@ static void i915_pci_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } +/* is device_id present in comma separated list of ids */ +static bool force_probe(u16 device_id, const char *devices) +{ + char *s, *p, *tok; + bool ret; + + /* FIXME: transitional */ + if (i915_modparams.alpha_support) { + DRM_INFO("i915.alpha_support is deprecated, use i915.force_probe=%04x instead\n", + device_id); + return true; + } + + if (!devices || !*devices) + return false; + + /* match everything */ + if (strcmp(devices, "*") == 0) + return true; + + s = kstrdup(devices, GFP_KERNEL); + if (!s) + return false; + + for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) { + u16 val; + + if (kstrtou16(tok, 16, &val) == 0 && val == device_id) { + ret = true; + break; + } + } + + kfree(s); + + return ret; +} + static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct intel_device_info *intel_info = (struct intel_device_info *) ent->driver_data; int err; - if (IS_ALPHA_SUPPORT(intel_info) && !i915_modparams.alpha_support) { - DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n" - "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n" - "to enable support in this kernel version, or check for kernel updates.\n"); + if (intel_info->require_force_probe && + !force_probe(pdev->device, i915_modparams.force_probe)) { + DRM_INFO("Your graphics device %04x is not properly supported by the driver in this\n" + "kernel version. To force driver probe anyway, use i915.force_probe=%04x\n" + "module parameter or CONFIG_DRM_I915_FORCE_PROBE=%04x configuration option,\n" + "or (recommended) check for kernel updates.\n", + pdev->device, pdev->device, pdev->device); return -ENODEV; } diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 6412a9c72898..d67dedf0cbd8 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -105,7 +105,7 @@ enum intel_ppgtt_type { #define DEV_INFO_FOR_EACH_FLAG(func) \ func(is_mobile); \ func(is_lp); \ - func(is_alpha_support); \ + func(require_force_probe); \ /* Keep has_* in alphabetical order */ \ func(has_64bit_reloc); \ func(gpu_reset_clobbers_display); \ -- cgit v1.2.3 From 3b4fa9640ccded07fff6d563d3ac1b2f3f111d97 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 30 May 2019 21:34:59 +0100 Subject: drm/i915: Track the purgeable objects on a separate eviction list Currently the purgeable objects, I915_MADV_DONTNEED, are mixed in the normal bound/unbound lists. Every shrinker pass starts with an attempt to purge from this set of unneeded objects, which entails us doing a walk over both lists looking for any candidates. If there are none, and since we are shrinking we can reasonably assume that the lists are full!, this becomes a very slow futile walk. If we separate out the purgeable objects into own list, this search then becomes its own phase that is preferentially handled during shrinking. Instead the cost becomes that we then need to filter the purgeable list if we want to distinguish between bound and unbound objects. Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190530203500.26272-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_domain.c | 14 +++++++++----- drivers/gpu/drm/i915/gem/i915_gem_object.c | 11 ++++++++++- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 4 +--- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 22 ++++++++++------------ drivers/gpu/drm/i915/i915_drv.h | 16 ++++++++++------ drivers/gpu/drm/i915/i915_gem.c | 20 ++++++++++++++++++-- drivers/gpu/drm/i915/i915_vma.c | 3 ++- 8 files changed, 61 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index cce96e6c6e52..52b73e90c9f4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -462,7 +462,6 @@ err_unpin_global: static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct list_head *list; struct i915_vma *vma; GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); @@ -476,10 +475,15 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) } mutex_unlock(&i915->ggtt.vm.mutex); - spin_lock(&i915->mm.obj_lock); - list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list; - list_move_tail(&obj->mm.link, list); - spin_unlock(&i915->mm.obj_lock); + if (obj->mm.madv == I915_MADV_WILLNEED) { + struct list_head *list; + + spin_lock(&i915->mm.obj_lock); + list = obj->bind_count ? + &i915->mm.bound_list : &i915->mm.unbound_list; + list_move_tail(&obj->mm.link, list); + spin_unlock(&i915->mm.obj_lock); + } } void diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 55e79fdb81aa..1ec60be06755 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -333,9 +333,18 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) if (obj->mm.quirked) __i915_gem_object_unpin_pages(obj); - if (discard_backing_storage(obj)) + if (discard_backing_storage(obj)) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + obj->mm.madv = I915_MADV_DONTNEED; + if (i915_gem_object_has_pages(obj)) { + spin_lock(&i915->mm.obj_lock); + list_move_tail(&obj->mm.link, &i915->mm.purge_list); + spin_unlock(&i915->mm.obj_lock); + } + } + /* * Before we free the object, make sure any pure RCU-only * read-side critical sections are complete, e.g. diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 11890e96ed65..89bb6d822f6e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -164,6 +164,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) struct list_head *phases[] = { &i915->mm.unbound_list, &i915->mm.bound_list, + &i915->mm.purge_list, NULL }, **phase; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 665f22ebf8e8..19d9ecdb2894 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -80,9 +80,7 @@ rebuild_st: sg_page_sizes = 0; for (i = 0; i < page_count; i++) { const unsigned int shrink[] = { - (I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_PURGEABLE), + I915_SHRINK_BOUND | I915_SHRINK_UNBOUND, 0, }, *s = shrink; gfp_t gfp = noreclaim; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index cd42299f019a..6a93e326abf3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -144,6 +144,7 @@ i915_gem_shrink(struct drm_i915_private *i915, struct list_head *list; unsigned int bit; } phases[] = { + { &i915->mm.purge_list, ~0u }, { &i915->mm.unbound_list, I915_SHRINK_UNBOUND }, { &i915->mm.bound_list, I915_SHRINK_BOUND }, { NULL, 0 }, @@ -226,10 +227,6 @@ i915_gem_shrink(struct drm_i915_private *i915, mm.link))) { list_move_tail(&obj->mm.link, &still_in_list); - if (flags & I915_SHRINK_PURGEABLE && - obj->mm.madv != I915_MADV_DONTNEED) - continue; - if (flags & I915_SHRINK_VMAPS && !is_vmalloc_addr(obj->mm.mapping)) continue; @@ -239,6 +236,10 @@ i915_gem_shrink(struct drm_i915_private *i915, i915_gem_object_is_framebuffer(obj))) continue; + if (!(flags & I915_SHRINK_BOUND) && + READ_ONCE(obj->bind_count)) + continue; + if (!can_release_pages(obj)) continue; @@ -324,6 +325,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) count += obj->base.size >> PAGE_SHIFT; num_objects++; } + list_for_each_entry(obj, &i915->mm.purge_list, mm.link) + if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) { + count += obj->base.size >> PAGE_SHIFT; + num_objects++; + } spin_unlock(&i915->mm.obj_lock); /* Update our preferred vmscan batch size for the next pass. @@ -361,15 +367,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) &sc->nr_scanned, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | - I915_SHRINK_PURGEABLE | I915_SHRINK_WRITEBACK); - if (sc->nr_scanned < sc->nr_to_scan) - freed += i915_gem_shrink(i915, - sc->nr_to_scan - sc->nr_scanned, - &sc->nr_scanned, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_WRITEBACK); if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) { intel_wakeref_t wakeref; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8dcc30c1846a..dc21955891c7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -864,6 +864,10 @@ struct i915_gem_mm { * not actually have any pages attached. */ struct list_head unbound_list; + /** + * List of objects which are purgeable. May be active. + */ + struct list_head purge_list; /** List of all objects in gtt_space, currently mmaped by userspace. * All objects within this list must also be on bound_list. @@ -2865,12 +2869,12 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915, unsigned long target, unsigned long *nr_scanned, unsigned flags); -#define I915_SHRINK_PURGEABLE BIT(0) -#define I915_SHRINK_UNBOUND BIT(1) -#define I915_SHRINK_BOUND BIT(2) -#define I915_SHRINK_ACTIVE BIT(3) -#define I915_SHRINK_VMAPS BIT(4) -#define I915_SHRINK_WRITEBACK BIT(5) +#define I915_SHRINK_UNBOUND BIT(0) +#define I915_SHRINK_BOUND BIT(1) +#define I915_SHRINK_ACTIVE BIT(2) +#define I915_SHRINK_VMAPS BIT(3) +#define I915_SHRINK_WRITEBACK BIT(4) + unsigned long i915_gem_shrink_all(struct drm_i915_private *i915); void i915_gem_shrinker_register(struct drm_i915_private *i915); void i915_gem_shrinker_unregister(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5c6d94fe1ca2..1362a8803d2a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1095,7 +1095,7 @@ int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_madvise *args = data; struct drm_i915_gem_object *obj; int err; @@ -1118,7 +1118,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, if (i915_gem_object_has_pages(obj) && i915_gem_object_is_tiled(obj) && - dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { + i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { if (obj->mm.madv == I915_MADV_WILLNEED) { GEM_BUG_ON(!obj->mm.quirked); __i915_gem_object_unpin_pages(obj); @@ -1134,6 +1134,20 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, if (obj->mm.madv != __I915_MADV_PURGED) obj->mm.madv = args->madv; + if (i915_gem_object_has_pages(obj)) { + struct list_head *list; + + spin_lock(&i915->mm.obj_lock); + if (obj->mm.madv != I915_MADV_WILLNEED) + list = &i915->mm.purge_list; + else if (obj->bind_count) + list = &i915->mm.bound_list; + else + list = &i915->mm.unbound_list; + list_move_tail(&obj->mm.link, list); + spin_unlock(&i915->mm.obj_lock); + } + /* if the object is no longer attached, discard its backing storage */ if (obj->mm.madv == I915_MADV_DONTNEED && !i915_gem_object_has_pages(obj)) @@ -1750,6 +1764,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915) init_llist_head(&i915->mm.free_list); + INIT_LIST_HEAD(&i915->mm.purge_list); INIT_LIST_HEAD(&i915->mm.unbound_list); INIT_LIST_HEAD(&i915->mm.bound_list); INIT_LIST_HEAD(&i915->mm.fence_list); @@ -1844,6 +1859,7 @@ int i915_gem_freeze_late(struct drm_i915_private *i915) i915_gem_object_unlock(obj); } } + GEM_BUG_ON(!list_empty(&i915->mm.purge_list)); return 0; } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 59a2f6af6103..f640caec4bae 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -717,7 +717,8 @@ i915_vma_remove(struct i915_vma *vma) struct drm_i915_gem_object *obj = vma->obj; spin_lock(&i915->mm.obj_lock); - if (--obj->bind_count == 0) + if (--obj->bind_count == 0 && + obj->mm.madv == I915_MADV_WILLNEED) list_move_tail(&obj->mm.link, &i915->mm.unbound_list); spin_unlock(&i915->mm.obj_lock); -- cgit v1.2.3 From d82b4b26218d359eeba3f401c9fc649388641b1a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 30 May 2019 21:35:00 +0100 Subject: drm/i915: Report all objects with allocated pages to the shrinker Currently, we try to report to the shrinker the precise number of objects (pages) that are available to be reaped at this moment. This requires searching all objects with allocated pages to see if they fulfill the search criteria, and this count is performed quite frequently. (The shrinker tries to free ~128 pages on each invocation, before which we count all the objects; counting takes longer than unbinding the objects!) If we take the pragmatic view that with sufficient desire, all objects are eventually reapable (they become inactive, or no longer used as framebuffer etc), we can simply return the count of pinned pages maintained during get_pages/put_pages rather than walk the lists every time. The downside is that we may (slightly) over-report the number of objects/pages we could shrink and so penalize ourselves by shrinking more than required. This is mitigated by keeping the order in which we shrink objects such that we avoid penalizing active and frequently used objects, and if memory is so tight that we need to free them we would need to anyway. v2: Only expose shrinkable objects to the shrinker; a small reduction in not considering stolen and foreign objects. v3: Restore the tracking from a "backup" copy from before the gem/ split Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190530203500.26272-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_domain.c | 3 +- drivers/gpu/drm/i915/gem/i915_gem_object.c | 33 ++++------------ drivers/gpu/drm/i915/gem/i915_gem_pages.c | 20 +++++++--- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 28 +++----------- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 2 +- drivers/gpu/drm/i915/i915_debugfs.c | 58 ++-------------------------- drivers/gpu/drm/i915/i915_drv.h | 7 ++-- drivers/gpu/drm/i915/i915_gem.c | 23 +++++------ drivers/gpu/drm/i915/i915_vma.c | 16 ++++++-- 9 files changed, 62 insertions(+), 128 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 52b73e90c9f4..e5deae62681f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -475,7 +475,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) } mutex_unlock(&i915->ggtt.vm.mutex); - if (obj->mm.madv == I915_MADV_WILLNEED) { + if (i915_gem_object_is_shrinkable(obj) && + obj->mm.madv == I915_MADV_WILLNEED) { struct list_head *list; spin_lock(&i915->mm.obj_lock); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 1ec60be06755..b840cf179bbe 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -44,25 +44,6 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj) return kmem_cache_free(global.slab_objects, obj); } -/* some bookkeeping */ -static void i915_gem_info_add_obj(struct drm_i915_private *i915, - u64 size) -{ - spin_lock(&i915->mm.object_stat_lock); - i915->mm.object_count++; - i915->mm.object_memory += size; - spin_unlock(&i915->mm.object_stat_lock); -} - -static void i915_gem_info_remove_obj(struct drm_i915_private *i915, - u64 size) -{ - spin_lock(&i915->mm.object_stat_lock); - i915->mm.object_count--; - i915->mm.object_memory -= size; - spin_unlock(&i915->mm.object_stat_lock); -} - static void frontbuffer_retire(struct i915_active_request *active, struct i915_request *request) @@ -98,8 +79,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, obj->mm.madv = I915_MADV_WILLNEED; INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); mutex_init(&obj->mm.get_page.lock); - - i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); } /** @@ -163,11 +142,14 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) static bool discard_backing_storage(struct drm_i915_gem_object *obj) { - /* If we are the last user of the backing storage (be it shmemfs + /* + * If we are the last user of the backing storage (be it shmemfs * pages or stolen etc), we know that the pages are going to be * immediately released. In this case, we can then skip copying * back the contents from the GPU. */ + if (!i915_gem_object_is_shrinkable(obj)) + return false; if (obj->mm.madv != I915_MADV_WILLNEED) return false; @@ -208,13 +190,15 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, GEM_BUG_ON(!list_empty(&obj->vma.list)); GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree)); - /* This serializes freeing with the shrinker. Since the free + /* + * This serializes freeing with the shrinker. Since the free * is delayed, first by RCU then by the workqueue, we want the * shrinker to be able to free pages of unreferenced objects, * or else we may oom whilst there are plenty of deferred * freed objects. */ - if (i915_gem_object_has_pages(obj)) { + if (i915_gem_object_has_pages(obj) && + i915_gem_object_is_shrinkable(obj)) { spin_lock(&i915->mm.obj_lock); list_del_init(&obj->mm.link); spin_unlock(&i915->mm.obj_lock); @@ -240,7 +224,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, reservation_object_fini(&obj->__builtin_resv); drm_gem_object_release(&obj->base); - i915_gem_info_remove_obj(i915, obj->base.size); bitmap_free(obj->bit_17); i915_gem_object_free(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index e53860147f21..7e64fd6bc19b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -56,9 +56,13 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, } GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); - spin_lock(&i915->mm.obj_lock); - list_add(&obj->mm.link, &i915->mm.unbound_list); - spin_unlock(&i915->mm.obj_lock); + if (i915_gem_object_is_shrinkable(obj)) { + spin_lock(&i915->mm.obj_lock); + i915->mm.shrink_count++; + i915->mm.shrink_memory += obj->base.size; + list_add(&obj->mm.link, &i915->mm.unbound_list); + spin_unlock(&i915->mm.obj_lock); + } } int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) @@ -146,9 +150,13 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) if (IS_ERR_OR_NULL(pages)) return pages; - spin_lock(&i915->mm.obj_lock); - list_del(&obj->mm.link); - spin_unlock(&i915->mm.obj_lock); + if (i915_gem_object_is_shrinkable(obj)) { + spin_lock(&i915->mm.obj_lock); + list_del(&obj->mm.link); + i915->mm.shrink_count--; + i915->mm.shrink_memory -= obj->base.size; + spin_unlock(&i915->mm.obj_lock); + } if (obj->mm.mapping) { void *ptr; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 6a93e326abf3..d71e630c6fb8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -309,30 +309,14 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *i915 = container_of(shrinker, struct drm_i915_private, mm.shrinker); - struct drm_i915_gem_object *obj; - unsigned long num_objects = 0; - unsigned long count = 0; + unsigned long num_objects; + unsigned long count; - spin_lock(&i915->mm.obj_lock); - list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) - if (can_release_pages(obj)) { - count += obj->base.size >> PAGE_SHIFT; - num_objects++; - } + count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT; + num_objects = READ_ONCE(i915->mm.shrink_count); - list_for_each_entry(obj, &i915->mm.bound_list, mm.link) - if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) { - count += obj->base.size >> PAGE_SHIFT; - num_objects++; - } - list_for_each_entry(obj, &i915->mm.purge_list, mm.link) - if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) { - count += obj->base.size >> PAGE_SHIFT; - num_objects++; - } - spin_unlock(&i915->mm.obj_lock); - - /* Update our preferred vmscan batch size for the next pass. + /* + * Update our preferred vmscan batch size for the next pass. * Our rough guess for an effective batch size is roughly 2 * available GEM objects worth of pages. That is we don't want * the shrinker to fire, until it is worth the cost of freeing an diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 9080a736663a..84d4f549eb21 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -690,7 +690,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv mutex_unlock(&ggtt->vm.mutex); spin_lock(&dev_priv->mm.obj_lock); - list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); + GEM_BUG_ON(i915_gem_object_is_shrinkable(obj)); obj->bind_count++; spin_unlock(&dev_priv->mm.obj_lock); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 74afdeff2245..f212241a2758 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -271,7 +271,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) unsigned long total, count, n; int ret; - total = READ_ONCE(dev_priv->mm.object_count); + total = READ_ONCE(dev_priv->mm.shrink_count); objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL); if (!objects) return -ENOMEM; @@ -460,9 +460,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data) char buf[80]; int ret; - seq_printf(m, "%u objects, %llu bytes\n", - dev_priv->mm.object_count, - dev_priv->mm.object_memory); + seq_printf(m, "%u shrinkable objects, %llu bytes\n", + dev_priv->mm.shrink_count, + dev_priv->mm.shrink_memory); size = count = 0; mapped_size = mapped_count = 0; @@ -552,55 +552,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data) return 0; } -static int i915_gem_gtt_info(struct seq_file *m, void *data) -{ - struct drm_info_node *node = m->private; - struct drm_i915_private *dev_priv = node_to_i915(node); - struct drm_device *dev = &dev_priv->drm; - struct drm_i915_gem_object **objects; - struct drm_i915_gem_object *obj; - u64 total_obj_size, total_gtt_size; - unsigned long nobject, n; - int count, ret; - - nobject = READ_ONCE(dev_priv->mm.object_count); - objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL); - if (!objects) - return -ENOMEM; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - count = 0; - spin_lock(&dev_priv->mm.obj_lock); - list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { - objects[count++] = obj; - if (count == nobject) - break; - } - spin_unlock(&dev_priv->mm.obj_lock); - - total_obj_size = total_gtt_size = 0; - for (n = 0; n < count; n++) { - obj = objects[n]; - - seq_puts(m, " "); - describe_obj(m, obj); - seq_putc(m, '\n'); - total_obj_size += obj->base.size; - total_gtt_size += i915_gem_obj_total_ggtt_size(obj); - } - - mutex_unlock(&dev->struct_mutex); - - seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", - count, total_obj_size, total_gtt_size); - kvfree(objects); - - return 0; -} - static int i915_gem_batch_pool_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -4582,7 +4533,6 @@ static const struct file_operations i915_fifo_underrun_reset_ops = { static const struct drm_info_list i915_debugfs_list[] = { {"i915_capabilities", i915_capabilities, 0}, {"i915_gem_objects", i915_gem_object_info, 0}, - {"i915_gem_gtt", i915_gem_gtt_info, 0}, {"i915_gem_stolen", i915_gem_stolen_list_info }, {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index dc21955891c7..76f2bf90ed86 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -926,10 +926,9 @@ struct i915_gem_mm { /** Bit 6 swizzling required for Y tiling */ u32 bit_6_swizzle_y; - /* accounting, useful for userland debugging */ - spinlock_t object_stat_lock; - u64 object_memory; - u32 object_count; + /* shrinker accounting, also useful for userland debugging */ + u64 shrink_memory; + u32 shrink_count; }; #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1362a8803d2a..4739a6307c32 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1137,15 +1137,17 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, if (i915_gem_object_has_pages(obj)) { struct list_head *list; - spin_lock(&i915->mm.obj_lock); - if (obj->mm.madv != I915_MADV_WILLNEED) - list = &i915->mm.purge_list; - else if (obj->bind_count) - list = &i915->mm.bound_list; - else - list = &i915->mm.unbound_list; - list_move_tail(&obj->mm.link, list); - spin_unlock(&i915->mm.obj_lock); + if (i915_gem_object_is_shrinkable(obj)) { + spin_lock(&i915->mm.obj_lock); + if (obj->mm.madv != I915_MADV_WILLNEED) + list = &i915->mm.purge_list; + else if (obj->bind_count) + list = &i915->mm.bound_list; + else + list = &i915->mm.unbound_list; + list_move_tail(&obj->mm.link, list); + spin_unlock(&i915->mm.obj_lock); + } } /* if the object is no longer attached, discard its backing storage */ @@ -1758,7 +1760,6 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) static void i915_gem_init__mm(struct drm_i915_private *i915) { - spin_lock_init(&i915->mm.object_stat_lock); spin_lock_init(&i915->mm.obj_lock); spin_lock_init(&i915->mm.free_lock); @@ -1808,7 +1809,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) i915_gem_drain_freed_objects(dev_priv); GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list)); GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); - WARN_ON(dev_priv->mm.object_count); + WARN_ON(dev_priv->mm.shrink_count); cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index f640caec4bae..b7fb7d216f77 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -110,7 +110,8 @@ static void __i915_vma_retire(struct i915_active *ref) * so that we don't steal from recently used but inactive objects * (unless we are forced to ofc!) */ - obj_bump_mru(obj); + if (i915_gem_object_is_shrinkable(obj)) + obj_bump_mru(obj); i915_gem_object_put(obj); /* and drop the active reference */ } @@ -677,11 +678,14 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) struct drm_i915_gem_object *obj = vma->obj; spin_lock(&dev_priv->mm.obj_lock); - list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); - obj->bind_count++; - spin_unlock(&dev_priv->mm.obj_lock); + if (i915_gem_object_is_shrinkable(obj)) + list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); + + obj->bind_count++; assert_bind_count(obj); + + spin_unlock(&dev_priv->mm.obj_lock); } return 0; @@ -717,9 +721,13 @@ i915_vma_remove(struct i915_vma *vma) struct drm_i915_gem_object *obj = vma->obj; spin_lock(&i915->mm.obj_lock); + + GEM_BUG_ON(obj->bind_count == 0); if (--obj->bind_count == 0 && + i915_gem_object_is_shrinkable(obj) && obj->mm.madv == I915_MADV_WILLNEED) list_move_tail(&obj->mm.link, &i915->mm.unbound_list); + spin_unlock(&i915->mm.obj_lock); /* -- cgit v1.2.3 From 912348b64d091a53c6c25c2a5486bae966f8b379 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 31 May 2019 11:26:26 +0300 Subject: drm/i915/icl: Ensure port A combo PHY HW state is correct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make sure the HW state of the port A combo PHY is correct wrt. the IREFGEN setting. This will force a reprogramming during init or a WARN during uninit if the setting is incorrect. On my ICL RVP I haven't seen this check failing and leading to a forced reinit/WARN, but let's add it still for consistency. Cc: Ville Syrjälä Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190531082626.30640-1-imre.deak@intel.com --- drivers/gpu/drm/i915/intel_combo_phy.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c index 98213cc58736..841708da5a56 100644 --- a/drivers/gpu/drm/i915/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/intel_combo_phy.c @@ -198,6 +198,10 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, ret = cnl_verify_procmon_ref_values(dev_priv, port); + if (port == PORT_A) + ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW8(port), + IREFGEN, IREFGEN); + ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port), CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE); -- cgit v1.2.3 From 7645b19d9bdd13ef3e32ab3a040b017f9d71f911 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Fri, 31 May 2019 15:24:08 -0700 Subject: drm/i915: extract intel_display_power.h/c from intel_runtime_pm.h/c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Keep all the device-level PM management in intel_runtime_pm.h/c and move all the display specific bits into their own file. Also add the new header to Makefile.header-test. Apart from the giant code move, the only difference is with the intel_runtime__raw() functions, which are now exposed in the header. The _put() version is also not conditionally compiled anymore since it is ok to always pass the wakeref taken from the _get() to __intel_runtime_pm_put (it is -1 if tracking is disabled). Suggested-by: Chris Wilson Signed-off-by: Daniele Ceraolo Spurio Cc: Chris Wilson Cc: Imre Deak Cc: Ville Syrjälä Cc: Jani Nikula Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190531222409.9177-2-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/Makefile.header-test | 1 + drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_display_power.c | 4611 ++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_display_power.h | 96 + drivers/gpu/drm/i915/intel_runtime_pm.c | 4791 +--------------------------- drivers/gpu/drm/i915/intel_runtime_pm.h | 83 +- 7 files changed, 4811 insertions(+), 4773 deletions(-) create mode 100644 drivers/gpu/drm/i915/intel_display_power.c create mode 100644 drivers/gpu/drm/i915/intel_display_power.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index a7850bbffbe0..c0a7b2994077 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -159,6 +159,7 @@ i915-y += intel_audio.o \ intel_combo_phy.o \ intel_connector.o \ intel_display.o \ + intel_display_power.o \ intel_dpio_phy.o \ intel_dpll_mgr.o \ intel_fbc.o \ diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index e01cd91dc1c8..6ef3b647ac65 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -25,6 +25,7 @@ header_test := \ intel_crt.h \ intel_csr.h \ intel_ddi.h \ + intel_display_power.h \ intel_dp.h \ intel_dp_aux_backlight.h \ intel_dp_link_training.h \ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 76f2bf90ed86..fd5450576728 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -70,6 +70,7 @@ #include "intel_bios.h" #include "intel_device_info.h" #include "intel_display.h" +#include "intel_display_power.h" #include "intel_dpll_mgr.h" #include "intel_frontbuffer.h" #include "intel_opregion.h" diff --git a/drivers/gpu/drm/i915/intel_display_power.c b/drivers/gpu/drm/i915/intel_display_power.c new file mode 100644 index 000000000000..278a7edc94f5 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_display_power.c @@ -0,0 +1,4611 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#include + +#include "i915_drv.h" +#include "i915_irq.h" +#include "intel_cdclk.h" +#include "intel_combo_phy.h" +#include "intel_crt.h" +#include "intel_csr.h" +#include "intel_dp.h" +#include "intel_dpio_phy.h" +#include "intel_drv.h" +#include "intel_hotplug.h" +#include "intel_sideband.h" + +bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, + enum i915_power_well_id power_well_id); + +const char * +intel_display_power_domain_str(enum intel_display_power_domain domain) +{ + switch (domain) { + case POWER_DOMAIN_DISPLAY_CORE: + return "DISPLAY_CORE"; + case POWER_DOMAIN_PIPE_A: + return "PIPE_A"; + case POWER_DOMAIN_PIPE_B: + return "PIPE_B"; + case POWER_DOMAIN_PIPE_C: + return "PIPE_C"; + case POWER_DOMAIN_PIPE_A_PANEL_FITTER: + return "PIPE_A_PANEL_FITTER"; + case POWER_DOMAIN_PIPE_B_PANEL_FITTER: + return "PIPE_B_PANEL_FITTER"; + case POWER_DOMAIN_PIPE_C_PANEL_FITTER: + return "PIPE_C_PANEL_FITTER"; + case POWER_DOMAIN_TRANSCODER_A: + return "TRANSCODER_A"; + case POWER_DOMAIN_TRANSCODER_B: + return "TRANSCODER_B"; + case POWER_DOMAIN_TRANSCODER_C: + return "TRANSCODER_C"; + case POWER_DOMAIN_TRANSCODER_EDP: + return "TRANSCODER_EDP"; + case POWER_DOMAIN_TRANSCODER_EDP_VDSC: + return "TRANSCODER_EDP_VDSC"; + case POWER_DOMAIN_TRANSCODER_DSI_A: + return "TRANSCODER_DSI_A"; + case POWER_DOMAIN_TRANSCODER_DSI_C: + return "TRANSCODER_DSI_C"; + case POWER_DOMAIN_PORT_DDI_A_LANES: + return "PORT_DDI_A_LANES"; + case POWER_DOMAIN_PORT_DDI_B_LANES: + return "PORT_DDI_B_LANES"; + case POWER_DOMAIN_PORT_DDI_C_LANES: + return "PORT_DDI_C_LANES"; + case POWER_DOMAIN_PORT_DDI_D_LANES: + return "PORT_DDI_D_LANES"; + case POWER_DOMAIN_PORT_DDI_E_LANES: + return "PORT_DDI_E_LANES"; + case POWER_DOMAIN_PORT_DDI_F_LANES: + return "PORT_DDI_F_LANES"; + case POWER_DOMAIN_PORT_DDI_A_IO: + return "PORT_DDI_A_IO"; + case POWER_DOMAIN_PORT_DDI_B_IO: + return "PORT_DDI_B_IO"; + case POWER_DOMAIN_PORT_DDI_C_IO: + return "PORT_DDI_C_IO"; + case POWER_DOMAIN_PORT_DDI_D_IO: + return "PORT_DDI_D_IO"; + case POWER_DOMAIN_PORT_DDI_E_IO: + return "PORT_DDI_E_IO"; + case POWER_DOMAIN_PORT_DDI_F_IO: + return "PORT_DDI_F_IO"; + case POWER_DOMAIN_PORT_DSI: + return "PORT_DSI"; + case POWER_DOMAIN_PORT_CRT: + return "PORT_CRT"; + case POWER_DOMAIN_PORT_OTHER: + return "PORT_OTHER"; + case POWER_DOMAIN_VGA: + return "VGA"; + case POWER_DOMAIN_AUDIO: + return "AUDIO"; + case POWER_DOMAIN_AUX_A: + return "AUX_A"; + case POWER_DOMAIN_AUX_B: + return "AUX_B"; + case POWER_DOMAIN_AUX_C: + return "AUX_C"; + case POWER_DOMAIN_AUX_D: + return "AUX_D"; + case POWER_DOMAIN_AUX_E: + return "AUX_E"; + case POWER_DOMAIN_AUX_F: + return "AUX_F"; + case POWER_DOMAIN_AUX_IO_A: + return "AUX_IO_A"; + case POWER_DOMAIN_AUX_TBT1: + return "AUX_TBT1"; + case POWER_DOMAIN_AUX_TBT2: + return "AUX_TBT2"; + case POWER_DOMAIN_AUX_TBT3: + return "AUX_TBT3"; + case POWER_DOMAIN_AUX_TBT4: + return "AUX_TBT4"; + case POWER_DOMAIN_GMBUS: + return "GMBUS"; + case POWER_DOMAIN_INIT: + return "INIT"; + case POWER_DOMAIN_MODESET: + return "MODESET"; + case POWER_DOMAIN_GT_IRQ: + return "GT_IRQ"; + default: + MISSING_CASE(domain); + return "?"; + } +} + +static void intel_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name); + power_well->desc->ops->enable(dev_priv, power_well); + power_well->hw_enabled = true; +} + +static void intel_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name); + power_well->hw_enabled = false; + power_well->desc->ops->disable(dev_priv, power_well); +} + +static void intel_power_well_get(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if (!power_well->count++) + intel_power_well_enable(dev_priv, power_well); +} + +static void intel_power_well_put(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + WARN(!power_well->count, "Use count on power well %s is already zero", + power_well->desc->name); + + if (!--power_well->count) + intel_power_well_disable(dev_priv, power_well); +} + +/** + * __intel_display_power_is_enabled - unlocked check for a power domain + * @dev_priv: i915 device instance + * @domain: power domain to check + * + * This is the unlocked version of intel_display_power_is_enabled() and should + * only be used from error capture and recovery code where deadlocks are + * possible. + * + * Returns: + * True when the power domain is enabled, false otherwise. + */ +bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + struct i915_power_well *power_well; + bool is_enabled; + + if (dev_priv->runtime_pm.suspended) + return false; + + is_enabled = true; + + for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { + if (power_well->desc->always_on) + continue; + + if (!power_well->hw_enabled) { + is_enabled = false; + break; + } + } + + return is_enabled; +} + +/** + * intel_display_power_is_enabled - check for a power domain + * @dev_priv: i915 device instance + * @domain: power domain to check + * + * This function can be used to check the hw power domain state. It is mostly + * used in hardware state readout functions. Everywhere else code should rely + * upon explicit power domain reference counting to ensure that the hardware + * block is powered up before accessing it. + * + * Callers must hold the relevant modesetting locks to ensure that concurrent + * threads can't disable the power well while the caller tries to read a few + * registers. + * + * Returns: + * True when the power domain is enabled, false otherwise. + */ +bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + struct i915_power_domains *power_domains; + bool ret; + + power_domains = &dev_priv->power_domains; + + mutex_lock(&power_domains->lock); + ret = __intel_display_power_is_enabled(dev_priv, domain); + mutex_unlock(&power_domains->lock); + + return ret; +} + +/* + * Starting with Haswell, we have a "Power Down Well" that can be turned off + * when not needed anymore. We have 4 registers that can request the power well + * to be enabled, and it will only be disabled if none of the registers is + * requesting it to be enabled. + */ +static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, + u8 irq_pipe_mask, bool has_vga) +{ + struct pci_dev *pdev = dev_priv->drm.pdev; + + /* + * After we re-enable the power well, if we touch VGA register 0x3d5 + * we'll get unclaimed register interrupts. This stops after we write + * anything to the VGA MSR register. The vgacon module uses this + * register all the time, so if we unbind our driver and, as a + * consequence, bind vgacon, we'll get stuck in an infinite loop at + * console_unlock(). So make here we touch the VGA MSR register, making + * sure vgacon can keep working normally without triggering interrupts + * and error messages. + */ + if (has_vga) { + vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); + outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); + vga_put(pdev, VGA_RSRC_LEGACY_IO); + } + + if (irq_pipe_mask) + gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); +} + +static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, + u8 irq_pipe_mask) +{ + if (irq_pipe_mask) + gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); +} + +static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + + /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ + WARN_ON(intel_wait_for_register(&dev_priv->uncore, + regs->driver, + HSW_PWR_WELL_CTL_STATE(pw_idx), + HSW_PWR_WELL_CTL_STATE(pw_idx), + 1)); +} + +static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, + const struct i915_power_well_regs *regs, + int pw_idx) +{ + u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); + u32 ret; + + ret = I915_READ(regs->bios) & req_mask ? 1 : 0; + ret |= I915_READ(regs->driver) & req_mask ? 2 : 0; + if (regs->kvmr.reg) + ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0; + ret |= I915_READ(regs->debug) & req_mask ? 8 : 0; + + return ret; +} + +static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + bool disabled; + u32 reqs; + + /* + * Bspec doesn't require waiting for PWs to get disabled, but still do + * this for paranoia. The known cases where a PW will be forced on: + * - a KVMR request on any power well via the KVMR request register + * - a DMC request on PW1 and MISC_IO power wells via the BIOS and + * DEBUG request registers + * Skip the wait in case any of the request bits are set and print a + * diagnostic message. + */ + wait_for((disabled = !(I915_READ(regs->driver) & + HSW_PWR_WELL_CTL_STATE(pw_idx))) || + (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); + if (disabled) + return; + + DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", + power_well->desc->name, + !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); +} + +static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, + enum skl_power_gate pg) +{ + /* Timeout 5us for PG#0, for other PGs 1us */ + WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS, + SKL_FUSE_PG_DIST_STATUS(pg), + SKL_FUSE_PG_DIST_STATUS(pg), 1)); +} + +static void hsw_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + bool wait_fuses = power_well->desc->hsw.has_fuses; + enum skl_power_gate uninitialized_var(pg); + u32 val; + + if (wait_fuses) { + pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : + SKL_PW_CTL_IDX_TO_PG(pw_idx); + /* + * For PW1 we have to wait both for the PW0/PG0 fuse state + * before enabling the power well and PW1/PG1's own fuse + * state after the enabling. For all other power wells with + * fuses we only have to wait for that PW/PG's fuse state + * after the enabling. + */ + if (pg == SKL_PG1) + gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); + } + + val = I915_READ(regs->driver); + I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); + hsw_wait_for_power_well_enable(dev_priv, power_well); + + /* Display WA #1178: cnl */ + if (IS_CANNONLAKE(dev_priv) && + pw_idx >= GLK_PW_CTL_IDX_AUX_B && + pw_idx <= CNL_PW_CTL_IDX_AUX_F) { + val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx)); + val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS; + I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val); + } + + if (wait_fuses) + gen9_wait_for_power_well_fuses(dev_priv, pg); + + hsw_power_well_post_enable(dev_priv, + power_well->desc->hsw.irq_pipe_mask, + power_well->desc->hsw.has_vga); +} + +static void hsw_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + u32 val; + + hsw_power_well_pre_disable(dev_priv, + power_well->desc->hsw.irq_pipe_mask); + + val = I915_READ(regs->driver); + I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); + hsw_wait_for_power_well_disable(dev_priv, power_well); +} + +#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) + +static void +icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + enum port port = ICL_AUX_PW_TO_PORT(pw_idx); + u32 val; + + val = I915_READ(regs->driver); + I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); + + val = I915_READ(ICL_PORT_CL_DW12(port)); + I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX); + + hsw_wait_for_power_well_enable(dev_priv, power_well); + + /* Display WA #1178: icl */ + if (IS_ICELAKE(dev_priv) && + pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && + !intel_bios_is_port_edp(dev_priv, port)) { + val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx)); + val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; + I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val); + } +} + +static void +icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + enum port port = ICL_AUX_PW_TO_PORT(pw_idx); + u32 val; + + val = I915_READ(ICL_PORT_CL_DW12(port)); + I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX); + + val = I915_READ(regs->driver); + I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); + + hsw_wait_for_power_well_disable(dev_priv, power_well); +} + +#define ICL_AUX_PW_TO_CH(pw_idx) \ + ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) + +static void +icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx); + u32 val; + + val = I915_READ(DP_AUX_CH_CTL(aux_ch)); + val &= ~DP_AUX_CH_CTL_TBT_IO; + if (power_well->desc->hsw.is_tc_tbt) + val |= DP_AUX_CH_CTL_TBT_IO; + I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); + + hsw_power_well_enable(dev_priv, power_well); +} + +/* + * We should only use the power well if we explicitly asked the hardware to + * enable it, so check if it's enabled and also check if we've requested it to + * be enabled. + */ +static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + enum i915_power_well_id id = power_well->desc->id; + int pw_idx = power_well->desc->hsw.idx; + u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | + HSW_PWR_WELL_CTL_STATE(pw_idx); + u32 val; + + val = I915_READ(regs->driver); + + /* + * On GEN9 big core due to a DMC bug the driver's request bits for PW1 + * and the MISC_IO PW will be not restored, so check instead for the + * BIOS's own request bits, which are forced-on for these power wells + * when exiting DC5/6. + */ + if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) && + (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) + val |= I915_READ(regs->bios); + + return (val & mask) == mask; +} + +static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) +{ + WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), + "DC9 already programmed to be enabled.\n"); + WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, + "DC5 still not disabled to enable DC9.\n"); + WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) & + HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), + "Power well 2 on.\n"); + WARN_ONCE(intel_irqs_enabled(dev_priv), + "Interrupts not disabled yet.\n"); + + /* + * TODO: check for the following to verify the conditions to enter DC9 + * state are satisfied: + * 1] Check relevant display engine registers to verify if mode set + * disable sequence was followed. + * 2] Check if display uninitialize sequence is initialized. + */ +} + +static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) +{ + WARN_ONCE(intel_irqs_enabled(dev_priv), + "Interrupts not disabled yet.\n"); + WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, + "DC5 still not disabled.\n"); + + /* + * TODO: check for the following to verify DC9 state was indeed + * entered before programming to disable it: + * 1] Check relevant display engine registers to verify if mode + * set disable sequence was followed. + * 2] Check if display uninitialize sequence is initialized. + */ +} + +static void gen9_write_dc_state(struct drm_i915_private *dev_priv, + u32 state) +{ + int rewrites = 0; + int rereads = 0; + u32 v; + + I915_WRITE(DC_STATE_EN, state); + + /* It has been observed that disabling the dc6 state sometimes + * doesn't stick and dmc keeps returning old value. Make sure + * the write really sticks enough times and also force rewrite until + * we are confident that state is exactly what we want. + */ + do { + v = I915_READ(DC_STATE_EN); + + if (v != state) { + I915_WRITE(DC_STATE_EN, state); + rewrites++; + rereads = 0; + } else if (rereads++ > 5) { + break; + } + + } while (rewrites < 100); + + if (v != state) + DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n", + state, v); + + /* Most of the times we need one retry, avoid spam */ + if (rewrites > 1) + DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n", + state, rewrites); +} + +static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) +{ + u32 mask; + + mask = DC_STATE_EN_UPTO_DC5; + if (INTEL_GEN(dev_priv) >= 11) + mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; + else if (IS_GEN9_LP(dev_priv)) + mask |= DC_STATE_EN_DC9; + else + mask |= DC_STATE_EN_UPTO_DC6; + + return mask; +} + +void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) +{ + u32 val; + + val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv); + + DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n", + dev_priv->csr.dc_state, val); + dev_priv->csr.dc_state = val; +} + +/** + * gen9_set_dc_state - set target display C power state + * @dev_priv: i915 device instance + * @state: target DC power state + * - DC_STATE_DISABLE + * - DC_STATE_EN_UPTO_DC5 + * - DC_STATE_EN_UPTO_DC6 + * - DC_STATE_EN_DC9 + * + * Signal to DMC firmware/HW the target DC power state passed in @state. + * DMC/HW can turn off individual display clocks and power rails when entering + * a deeper DC power state (higher in number) and turns these back when exiting + * that state to a shallower power state (lower in number). The HW will decide + * when to actually enter a given state on an on-demand basis, for instance + * depending on the active state of display pipes. The state of display + * registers backed by affected power rails are saved/restored as needed. + * + * Based on the above enabling a deeper DC power state is asynchronous wrt. + * enabling it. Disabling a deeper power state is synchronous: for instance + * setting %DC_STATE_DISABLE won't complete until all HW resources are turned + * back on and register state is restored. This is guaranteed by the MMIO write + * to DC_STATE_EN blocking until the state is restored. + */ +static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) +{ + u32 val; + u32 mask; + + if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) + state &= dev_priv->csr.allowed_dc_mask; + + val = I915_READ(DC_STATE_EN); + mask = gen9_dc_mask(dev_priv); + DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", + val & mask, state); + + /* Check if DMC is ignoring our DC state requests */ + if ((val & mask) != dev_priv->csr.dc_state) + DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n", + dev_priv->csr.dc_state, val & mask); + + val &= ~mask; + val |= state; + + gen9_write_dc_state(dev_priv, val); + + dev_priv->csr.dc_state = val & mask; +} + +void bxt_enable_dc9(struct drm_i915_private *dev_priv) +{ + assert_can_enable_dc9(dev_priv); + + DRM_DEBUG_KMS("Enabling DC9\n"); + /* + * Power sequencer reset is not needed on + * platforms with South Display Engine on PCH, + * because PPS registers are always on. + */ + if (!HAS_PCH_SPLIT(dev_priv)) + intel_power_sequencer_reset(dev_priv); + gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); +} + +void bxt_disable_dc9(struct drm_i915_private *dev_priv) +{ + assert_can_disable_dc9(dev_priv); + + DRM_DEBUG_KMS("Disabling DC9\n"); + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + intel_pps_unlock_regs_wa(dev_priv); +} + +static void assert_csr_loaded(struct drm_i915_private *dev_priv) +{ + WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), + "CSR program storage start is NULL\n"); + WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); + WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); +} + +static struct i915_power_well * +lookup_power_well(struct drm_i915_private *dev_priv, + enum i915_power_well_id power_well_id) +{ + struct i915_power_well *power_well; + + for_each_power_well(dev_priv, power_well) + if (power_well->desc->id == power_well_id) + return power_well; + + /* + * It's not feasible to add error checking code to the callers since + * this condition really shouldn't happen and it doesn't even make sense + * to abort things like display initialization sequences. Just return + * the first power well and hope the WARN gets reported so we can fix + * our driver. + */ + WARN(1, "Power well %d not defined for this platform\n", power_well_id); + return &dev_priv->power_domains.power_wells[0]; +} + +static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) +{ + bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, + SKL_DISP_PW_2); + + WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); + + WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), + "DC5 already programmed to be enabled.\n"); + assert_rpm_wakelock_held(dev_priv); + + assert_csr_loaded(dev_priv); +} + +void gen9_enable_dc5(struct drm_i915_private *dev_priv) +{ + assert_can_enable_dc5(dev_priv); + + DRM_DEBUG_KMS("Enabling DC5\n"); + + /* Wa Display #1183: skl,kbl,cfl */ + if (IS_GEN9_BC(dev_priv)) + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | + SKL_SELECT_ALTERNATE_DC_EXIT); + + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); +} + +static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) +{ + WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, + "Backlight is not disabled.\n"); + WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), + "DC6 already programmed to be enabled.\n"); + + assert_csr_loaded(dev_priv); +} + +void skl_enable_dc6(struct drm_i915_private *dev_priv) +{ + assert_can_enable_dc6(dev_priv); + + DRM_DEBUG_KMS("Enabling DC6\n"); + + /* Wa Display #1183: skl,kbl,cfl */ + if (IS_GEN9_BC(dev_priv)) + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | + SKL_SELECT_ALTERNATE_DC_EXIT); + + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); +} + +static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; + int pw_idx = power_well->desc->hsw.idx; + u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); + u32 bios_req = I915_READ(regs->bios); + + /* Take over the request bit if set by BIOS. */ + if (bios_req & mask) { + u32 drv_req = I915_READ(regs->driver); + + if (!(drv_req & mask)) + I915_WRITE(regs->driver, drv_req | mask); + I915_WRITE(regs->bios, bios_req & ~mask); + } +} + +static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); +} + +static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); +} + +static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); +} + +static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) +{ + struct i915_power_well *power_well; + + power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); + if (power_well->count > 0) + bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); + + power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); + if (power_well->count > 0) + bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); + + if (IS_GEMINILAKE(dev_priv)) { + power_well = lookup_power_well(dev_priv, + GLK_DISP_PW_DPIO_CMN_C); + if (power_well->count > 0) + bxt_ddi_phy_verify_state(dev_priv, + power_well->desc->bxt.phy); + } +} + +static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; +} + +static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) +{ + u32 tmp = I915_READ(DBUF_CTL); + + WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != + (DBUF_POWER_STATE | DBUF_POWER_REQUEST), + "Unexpected DBuf power power state (0x%08x)\n", tmp); +} + +static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + struct intel_cdclk_state cdclk_state = {}; + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + dev_priv->display.get_cdclk(dev_priv, &cdclk_state); + /* Can't read out voltage_level so can't use intel_cdclk_changed() */ + WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state)); + + gen9_assert_dbuf_enabled(dev_priv); + + if (IS_GEN9_LP(dev_priv)) + bxt_verify_ddi_phy_power_wells(dev_priv); + + if (INTEL_GEN(dev_priv) >= 11) + /* + * DMC retains HW context only for port A, the other combo + * PHY's HW context for port B is lost after DC transitions, + * so we need to restore it manually. + */ + intel_combo_phy_init(dev_priv); +} + +static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if (!dev_priv->csr.dmc_payload) + return; + + if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6) + skl_enable_dc6(dev_priv); + else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) + gen9_enable_dc5(dev_priv); +} + +static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ +} + +static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ +} + +static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return true; +} + +static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) + i830_enable_pipe(dev_priv, PIPE_A); + if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) + i830_enable_pipe(dev_priv, PIPE_B); +} + +static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + i830_disable_pipe(dev_priv, PIPE_B); + i830_disable_pipe(dev_priv, PIPE_A); +} + +static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE && + I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE; +} + +static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if (power_well->count > 0) + i830_pipes_power_well_enable(dev_priv, power_well); + else + i830_pipes_power_well_disable(dev_priv, power_well); +} + +static void vlv_set_power_well(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, bool enable) +{ + int pw_idx = power_well->desc->vlv.idx; + u32 mask; + u32 state; + u32 ctrl; + + mask = PUNIT_PWRGT_MASK(pw_idx); + state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : + PUNIT_PWRGT_PWR_GATE(pw_idx); + + vlv_punit_get(dev_priv); + +#define COND \ + ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) + + if (COND) + goto out; + + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); + ctrl &= ~mask; + ctrl |= state; + vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); + + if (wait_for(COND, 100)) + DRM_ERROR("timeout setting power well state %08x (%08x)\n", + state, + vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); + +#undef COND + +out: + vlv_punit_put(dev_priv); +} + +static void vlv_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_set_power_well(dev_priv, power_well, true); +} + +static void vlv_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_set_power_well(dev_priv, power_well, false); +} + +static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + int pw_idx = power_well->desc->vlv.idx; + bool enabled = false; + u32 mask; + u32 state; + u32 ctrl; + + mask = PUNIT_PWRGT_MASK(pw_idx); + ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); + + vlv_punit_get(dev_priv); + + state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; + /* + * We only ever set the power-on and power-gate states, anything + * else is unexpected. + */ + WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) && + state != PUNIT_PWRGT_PWR_GATE(pw_idx)); + if (state == ctrl) + enabled = true; + + /* + * A transient state at this point would mean some unexpected party + * is poking at the power controls too. + */ + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; + WARN_ON(ctrl != state); + + vlv_punit_put(dev_priv); + + return enabled; +} + +static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) +{ + u32 val; + + /* + * On driver load, a pipe may be active and driving a DSI display. + * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck + * (and never recovering) in this case. intel_dsi_post_disable() will + * clear it when we turn off the display. + */ + val = I915_READ(DSPCLK_GATE_D); + val &= DPOUNIT_CLOCK_GATE_DISABLE; + val |= VRHUNIT_CLOCK_GATE_DISABLE; + I915_WRITE(DSPCLK_GATE_D, val); + + /* + * Disable trickle feed and enable pnd deadline calculation + */ + I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); + I915_WRITE(CBR1_VLV, 0); + + WARN_ON(dev_priv->rawclk_freq == 0); + + I915_WRITE(RAWCLK_FREQ_VLV, + DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); +} + +static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) +{ + struct intel_encoder *encoder; + enum pipe pipe; + + /* + * Enable the CRI clock source so we can get at the + * display and the reference clock for VGA + * hotplug / manual detection. Supposedly DSI also + * needs the ref clock up and running. + * + * CHV DPLL B/C have some issues if VGA mode is enabled. + */ + for_each_pipe(dev_priv, pipe) { + u32 val = I915_READ(DPLL(pipe)); + + val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; + if (pipe != PIPE_A) + val |= DPLL_INTEGRATED_CRI_CLK_VLV; + + I915_WRITE(DPLL(pipe), val); + } + + vlv_init_display_clock_gating(dev_priv); + + spin_lock_irq(&dev_priv->irq_lock); + valleyview_enable_display_irqs(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + + /* + * During driver initialization/resume we can avoid restoring the + * part of the HW/SW state that will be inited anyway explicitly. + */ + if (dev_priv->power_domains.initializing) + return; + + intel_hpd_init(dev_priv); + + /* Re-enable the ADPA, if we have one */ + for_each_intel_encoder(&dev_priv->drm, encoder) { + if (encoder->type == INTEL_OUTPUT_ANALOG) + intel_crt_reset(&encoder->base); + } + + i915_redisable_vga_power_on(dev_priv); + + intel_pps_unlock_regs_wa(dev_priv); +} + +static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) +{ + spin_lock_irq(&dev_priv->irq_lock); + valleyview_disable_display_irqs(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + + /* make sure we're done processing display irqs */ + synchronize_irq(dev_priv->drm.irq); + + intel_power_sequencer_reset(dev_priv); + + /* Prevent us from re-enabling polling on accident in late suspend */ + if (!dev_priv->drm.dev->power.is_suspended) + intel_hpd_poll_init(dev_priv); +} + +static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_set_power_well(dev_priv, power_well, true); + + vlv_display_power_well_init(dev_priv); +} + +static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_display_power_well_deinit(dev_priv); + + vlv_set_power_well(dev_priv, power_well, false); +} + +static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + /* since ref/cri clock was enabled */ + udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ + + vlv_set_power_well(dev_priv, power_well, true); + + /* + * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - + * 6. De-assert cmn_reset/side_reset. Same as VLV X0. + * a. GUnit 0x2110 bit[0] set to 1 (def 0) + * b. The other bits such as sfr settings / modesel may all + * be set to 0. + * + * This should only be done on init and resume from S3 with + * both PLLs disabled, or we risk losing DPIO and PLL + * synchronization. + */ + I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); +} + +static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) + assert_pll_disabled(dev_priv, pipe); + + /* Assert common reset */ + I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); + + vlv_set_power_well(dev_priv, power_well, false); +} + +#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) + +#define BITS_SET(val, bits) (((val) & (bits)) == (bits)) + +static void assert_chv_phy_status(struct drm_i915_private *dev_priv) +{ + struct i915_power_well *cmn_bc = + lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); + struct i915_power_well *cmn_d = + lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); + u32 phy_control = dev_priv->chv_phy_control; + u32 phy_status = 0; + u32 phy_status_mask = 0xffffffff; + + /* + * The BIOS can leave the PHY is some weird state + * where it doesn't fully power down some parts. + * Disable the asserts until the PHY has been fully + * reset (ie. the power well has been disabled at + * least once). + */ + if (!dev_priv->chv_phy_assert[DPIO_PHY0]) + phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | + PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); + + if (!dev_priv->chv_phy_assert[DPIO_PHY1]) + phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | + PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); + + if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { + phy_status |= PHY_POWERGOOD(DPIO_PHY0); + + /* this assumes override is only used to enable lanes */ + if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) + phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); + + if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) + phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); + + /* CL1 is on whenever anything is on in either channel */ + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | + PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) + phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); + + /* + * The DPLLB check accounts for the pipe B + port A usage + * with CL2 powered up but all the lanes in the second channel + * powered down. + */ + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && + (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) + phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); + + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); + + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); + } + + if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { + phy_status |= PHY_POWERGOOD(DPIO_PHY1); + + /* this assumes override is only used to enable lanes */ + if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) + phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); + + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) + phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); + + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); + if (BITS_SET(phy_control, + PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) + phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); + } + + phy_status &= phy_status_mask; + + /* + * The PHY may be busy with some initial calibration and whatnot, + * so the power state can take a while to actually change. + */ + if (intel_wait_for_register(&dev_priv->uncore, + DISPLAY_PHY_STATUS, + phy_status_mask, + phy_status, + 10)) + DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", + I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask, + phy_status, dev_priv->chv_phy_control); +} + +#undef BITS_SET + +static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum dpio_phy phy; + enum pipe pipe; + u32 tmp; + + WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && + power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); + + if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { + pipe = PIPE_A; + phy = DPIO_PHY0; + } else { + pipe = PIPE_C; + phy = DPIO_PHY1; + } + + /* since ref/cri clock was enabled */ + udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ + vlv_set_power_well(dev_priv, power_well, true); + + /* Poll for phypwrgood signal */ + if (intel_wait_for_register(&dev_priv->uncore, + DISPLAY_PHY_STATUS, + PHY_POWERGOOD(phy), + PHY_POWERGOOD(phy), + 1)) + DRM_ERROR("Display PHY %d is not power up\n", phy); + + vlv_dpio_get(dev_priv); + + /* Enable dynamic power down */ + tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); + tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | + DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); + + if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { + tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); + tmp |= DPIO_DYNPWRDOWNEN_CH1; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); + } else { + /* + * Force the non-existing CL2 off. BXT does this + * too, so maybe it saves some power even though + * CL2 doesn't exist? + */ + tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); + tmp |= DPIO_CL2_LDOFUSE_PWRENB; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); + } + + vlv_dpio_put(dev_priv); + + dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); + I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); + + DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", + phy, dev_priv->chv_phy_control); + + assert_chv_phy_status(dev_priv); +} + +static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum dpio_phy phy; + + WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && + power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); + + if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { + phy = DPIO_PHY0; + assert_pll_disabled(dev_priv, PIPE_A); + assert_pll_disabled(dev_priv, PIPE_B); + } else { + phy = DPIO_PHY1; + assert_pll_disabled(dev_priv, PIPE_C); + } + + dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); + I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); + + vlv_set_power_well(dev_priv, power_well, false); + + DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", + phy, dev_priv->chv_phy_control); + + /* PHY is fully reset now, so we can enable the PHY state asserts */ + dev_priv->chv_phy_assert[phy] = true; + + assert_chv_phy_status(dev_priv); +} + +static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, + enum dpio_channel ch, bool override, unsigned int mask) +{ + enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; + u32 reg, val, expected, actual; + + /* + * The BIOS can leave the PHY is some weird state + * where it doesn't fully power down some parts. + * Disable the asserts until the PHY has been fully + * reset (ie. the power well has been disabled at + * least once). + */ + if (!dev_priv->chv_phy_assert[phy]) + return; + + if (ch == DPIO_CH0) + reg = _CHV_CMN_DW0_CH0; + else + reg = _CHV_CMN_DW6_CH1; + + vlv_dpio_get(dev_priv); + val = vlv_dpio_read(dev_priv, pipe, reg); + vlv_dpio_put(dev_priv); + + /* + * This assumes !override is only used when the port is disabled. + * All lanes should power down even without the override when + * the port is disabled. + */ + if (!override || mask == 0xf) { + expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; + /* + * If CH1 common lane is not active anymore + * (eg. for pipe B DPLL) the entire channel will + * shut down, which causes the common lane registers + * to read as 0. That means we can't actually check + * the lane power down status bits, but as the entire + * register reads as 0 it's a good indication that the + * channel is indeed entirely powered down. + */ + if (ch == DPIO_CH1 && val == 0) + expected = 0; + } else if (mask != 0x0) { + expected = DPIO_ANYDL_POWERDOWN; + } else { + expected = 0; + } + + if (ch == DPIO_CH0) + actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; + else + actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; + actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; + + WARN(actual != expected, + "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", + !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), + !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), + reg, val); +} + +bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, + enum dpio_channel ch, bool override) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + bool was_override; + + mutex_lock(&power_domains->lock); + + was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + + if (override == was_override) + goto out; + + if (override) + dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + else + dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + + I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); + + DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", + phy, ch, dev_priv->chv_phy_control); + + assert_chv_phy_status(dev_priv); + +out: + mutex_unlock(&power_domains->lock); + + return was_override; +} + +void chv_phy_powergate_lanes(struct intel_encoder *encoder, + bool override, unsigned int mask) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct i915_power_domains *power_domains = &dev_priv->power_domains; + enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); + enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); + + mutex_lock(&power_domains->lock); + + dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); + dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); + + if (override) + dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + else + dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); + + I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); + + DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", + phy, ch, mask, dev_priv->chv_phy_control); + + assert_chv_phy_status(dev_priv); + + assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); + + mutex_unlock(&power_domains->lock); +} + +static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum pipe pipe = PIPE_A; + bool enabled; + u32 state, ctrl; + + vlv_punit_get(dev_priv); + + state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); + /* + * We only ever set the power-on and power-gate states, anything + * else is unexpected. + */ + WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); + enabled = state == DP_SSS_PWR_ON(pipe); + + /* + * A transient state at this point would mean some unexpected party + * is poking at the power controls too. + */ + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); + WARN_ON(ctrl << 16 != state); + + vlv_punit_put(dev_priv); + + return enabled; +} + +static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well, + bool enable) +{ + enum pipe pipe = PIPE_A; + u32 state; + u32 ctrl; + + state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); + + vlv_punit_get(dev_priv); + +#define COND \ + ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) + + if (COND) + goto out; + + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); + ctrl &= ~DP_SSC_MASK(pipe); + ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); + + if (wait_for(COND, 100)) + DRM_ERROR("timeout setting power well state %08x (%08x)\n", + state, + vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); + +#undef COND + +out: + vlv_punit_put(dev_priv); +} + +static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + chv_set_pipe_power_well(dev_priv, power_well, true); + + vlv_display_power_well_init(dev_priv); +} + +static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + vlv_display_power_well_deinit(dev_priv); + + chv_set_pipe_power_well(dev_priv, power_well, false); +} + +static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) +{ + return power_domains->async_put_domains[0] | + power_domains->async_put_domains[1]; +} + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + +static bool +assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) +{ + return !WARN_ON(power_domains->async_put_domains[0] & + power_domains->async_put_domains[1]); +} + +static bool +__async_put_domains_state_ok(struct i915_power_domains *power_domains) +{ + enum intel_display_power_domain domain; + bool err = false; + + err |= !assert_async_put_domain_masks_disjoint(power_domains); + err |= WARN_ON(!!power_domains->async_put_wakeref != + !!__async_put_domains_mask(power_domains)); + + for_each_power_domain(domain, __async_put_domains_mask(power_domains)) + err |= WARN_ON(power_domains->domain_use_count[domain] != 1); + + return !err; +} + +static void print_power_domains(struct i915_power_domains *power_domains, + const char *prefix, u64 mask) +{ + enum intel_display_power_domain domain; + + DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask)); + for_each_power_domain(domain, mask) + DRM_DEBUG_DRIVER("%s use_count %d\n", + intel_display_power_domain_str(domain), + power_domains->domain_use_count[domain]); +} + +static void +print_async_put_domains_state(struct i915_power_domains *power_domains) +{ + DRM_DEBUG_DRIVER("async_put_wakeref %u\n", + power_domains->async_put_wakeref); + + print_power_domains(power_domains, "async_put_domains[0]", + power_domains->async_put_domains[0]); + print_power_domains(power_domains, "async_put_domains[1]", + power_domains->async_put_domains[1]); +} + +static void +verify_async_put_domains_state(struct i915_power_domains *power_domains) +{ + if (!__async_put_domains_state_ok(power_domains)) + print_async_put_domains_state(power_domains); +} + +#else + +static void +assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) +{ +} + +static void +verify_async_put_domains_state(struct i915_power_domains *power_domains) +{ +} + +#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ + +static u64 async_put_domains_mask(struct i915_power_domains *power_domains) +{ + assert_async_put_domain_masks_disjoint(power_domains); + + return __async_put_domains_mask(power_domains); +} + +static void +async_put_domains_clear_domain(struct i915_power_domains *power_domains, + enum intel_display_power_domain domain) +{ + assert_async_put_domain_masks_disjoint(power_domains); + + power_domains->async_put_domains[0] &= ~BIT_ULL(domain); + power_domains->async_put_domains[1] &= ~BIT_ULL(domain); +} + +static bool +intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + bool ret = false; + + if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) + goto out_verify; + + async_put_domains_clear_domain(power_domains, domain); + + ret = true; + + if (async_put_domains_mask(power_domains)) + goto out_verify; + + cancel_delayed_work(&power_domains->async_put_work); + intel_runtime_pm_put_raw(dev_priv, + fetch_and_zero(&power_domains->async_put_wakeref)); +out_verify: + verify_async_put_domains_state(power_domains); + + return ret; +} + +static void +__intel_display_power_get_domain(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *power_well; + + if (intel_display_power_grab_async_put_ref(dev_priv, domain)) + return; + + for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) + intel_power_well_get(dev_priv, power_well); + + power_domains->domain_use_count[domain]++; +} + +/** + * intel_display_power_get - grab a power domain reference + * @dev_priv: i915 device instance + * @domain: power domain to reference + * + * This function grabs a power domain reference for @domain and ensures that the + * power domain and all its parents are powered up. Therefore users should only + * grab a reference to the innermost power domain they need. + * + * Any power domain reference obtained by this function must have a symmetric + * call to intel_display_power_put() to release the reference again. + */ +intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv); + + mutex_lock(&power_domains->lock); + __intel_display_power_get_domain(dev_priv, domain); + mutex_unlock(&power_domains->lock); + + return wakeref; +} + +/** + * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain + * @dev_priv: i915 device instance + * @domain: power domain to reference + * + * This function grabs a power domain reference for @domain and ensures that the + * power domain and all its parents are powered up. Therefore users should only + * grab a reference to the innermost power domain they need. + * + * Any power domain reference obtained by this function must have a symmetric + * call to intel_display_power_put() to release the reference again. + */ +intel_wakeref_t +intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + intel_wakeref_t wakeref; + bool is_enabled; + + wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + if (!wakeref) + return false; + + mutex_lock(&power_domains->lock); + + if (__intel_display_power_is_enabled(dev_priv, domain)) { + __intel_display_power_get_domain(dev_priv, domain); + is_enabled = true; + } else { + is_enabled = false; + } + + mutex_unlock(&power_domains->lock); + + if (!is_enabled) { + intel_runtime_pm_put(dev_priv, wakeref); + wakeref = 0; + } + + return wakeref; +} + +static void +__intel_display_power_put_domain(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + struct i915_power_domains *power_domains; + struct i915_power_well *power_well; + const char *name = intel_display_power_domain_str(domain); + + power_domains = &dev_priv->power_domains; + + WARN(!power_domains->domain_use_count[domain], + "Use count on domain %s is already zero\n", + name); + WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain), + "Async disabling of domain %s is pending\n", + name); + + power_domains->domain_use_count[domain]--; + + for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) + intel_power_well_put(dev_priv, power_well); +} + +static void __intel_display_power_put(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + + mutex_lock(&power_domains->lock); + __intel_display_power_put_domain(dev_priv, domain); + mutex_unlock(&power_domains->lock); +} + +/** + * intel_display_power_put_unchecked - release an unchecked power domain reference + * @dev_priv: i915 device instance + * @domain: power domain to reference + * + * This function drops the power domain reference obtained by + * intel_display_power_get() and might power down the corresponding hardware + * block right away if this is the last reference. + * + * This function exists only for historical reasons and should be avoided in + * new code, as the correctness of its use cannot be checked. Always use + * intel_display_power_put() instead. + */ +void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + __intel_display_power_put(dev_priv, domain); + intel_runtime_pm_put_unchecked(dev_priv); +} + +static void +queue_async_put_domains_work(struct i915_power_domains *power_domains, + intel_wakeref_t wakeref) +{ + WARN_ON(power_domains->async_put_wakeref); + power_domains->async_put_wakeref = wakeref; + WARN_ON(!queue_delayed_work(system_unbound_wq, + &power_domains->async_put_work, + msecs_to_jiffies(100))); +} + +static void +release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) +{ + struct drm_i915_private *dev_priv = + container_of(power_domains, struct drm_i915_private, + power_domains); + enum intel_display_power_domain domain; + intel_wakeref_t wakeref; + + /* + * The caller must hold already raw wakeref, upgrade that to a proper + * wakeref to make the state checker happy about the HW access during + * power well disabling. + */ + assert_rpm_raw_wakeref_held(dev_priv); + wakeref = intel_runtime_pm_get(dev_priv); + + for_each_power_domain(domain, mask) { + /* Clear before put, so put's sanity check is happy. */ + async_put_domains_clear_domain(power_domains, domain); + __intel_display_power_put_domain(dev_priv, domain); + } + + intel_runtime_pm_put(dev_priv, wakeref); +} + +static void +intel_display_power_put_async_work(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, + power_domains.async_put_work.work); + struct i915_power_domains *power_domains = &dev_priv->power_domains; + intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(dev_priv); + intel_wakeref_t old_work_wakeref = 0; + + mutex_lock(&power_domains->lock); + + /* + * Bail out if all the domain refs pending to be released were grabbed + * by subsequent gets or a flush_work. + */ + old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); + if (!old_work_wakeref) + goto out_verify; + + release_async_put_domains(power_domains, + power_domains->async_put_domains[0]); + + /* Requeue the work if more domains were async put meanwhile. */ + if (power_domains->async_put_domains[1]) { + power_domains->async_put_domains[0] = + fetch_and_zero(&power_domains->async_put_domains[1]); + queue_async_put_domains_work(power_domains, + fetch_and_zero(&new_work_wakeref)); + } + +out_verify: + verify_async_put_domains_state(power_domains); + + mutex_unlock(&power_domains->lock); + + if (old_work_wakeref) + intel_runtime_pm_put_raw(dev_priv, old_work_wakeref); + if (new_work_wakeref) + intel_runtime_pm_put_raw(dev_priv, new_work_wakeref); +} + +/** + * intel_display_power_put_async - release a power domain reference asynchronously + * @i915: i915 device instance + * @domain: power domain to reference + * @wakeref: wakeref acquired for the reference that is being released + * + * This function drops the power domain reference obtained by + * intel_display_power_get*() and schedules a work to power down the + * corresponding hardware block if this is the last reference. + */ +void __intel_display_power_put_async(struct drm_i915_private *i915, + enum intel_display_power_domain domain, + intel_wakeref_t wakeref) +{ + struct i915_power_domains *power_domains = &i915->power_domains; + intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(i915); + + mutex_lock(&power_domains->lock); + + if (power_domains->domain_use_count[domain] > 1) { + __intel_display_power_put_domain(i915, domain); + + goto out_verify; + } + + WARN_ON(power_domains->domain_use_count[domain] != 1); + + /* Let a pending work requeue itself or queue a new one. */ + if (power_domains->async_put_wakeref) { + power_domains->async_put_domains[1] |= BIT_ULL(domain); + } else { + power_domains->async_put_domains[0] |= BIT_ULL(domain); + queue_async_put_domains_work(power_domains, + fetch_and_zero(&work_wakeref)); + } + +out_verify: + verify_async_put_domains_state(power_domains); + + mutex_unlock(&power_domains->lock); + + if (work_wakeref) + intel_runtime_pm_put_raw(i915, work_wakeref); + + intel_runtime_pm_put(i915, wakeref); +} + +/** + * intel_display_power_flush_work - flushes the async display power disabling work + * @i915: i915 device instance + * + * Flushes any pending work that was scheduled by a preceding + * intel_display_power_put_async() call, completing the disabling of the + * corresponding power domains. + * + * Note that the work handler function may still be running after this + * function returns; to ensure that the work handler isn't running use + * intel_display_power_flush_work_sync() instead. + */ +void intel_display_power_flush_work(struct drm_i915_private *i915) +{ + struct i915_power_domains *power_domains = &i915->power_domains; + intel_wakeref_t work_wakeref; + + mutex_lock(&power_domains->lock); + + work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); + if (!work_wakeref) + goto out_verify; + + release_async_put_domains(power_domains, + async_put_domains_mask(power_domains)); + cancel_delayed_work(&power_domains->async_put_work); + +out_verify: + verify_async_put_domains_state(power_domains); + + mutex_unlock(&power_domains->lock); + + if (work_wakeref) + intel_runtime_pm_put_raw(i915, work_wakeref); +} + +/** + * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work + * @i915: i915 device instance + * + * Like intel_display_power_flush_work(), but also ensure that the work + * handler function is not running any more when this function returns. + */ +static void +intel_display_power_flush_work_sync(struct drm_i915_private *i915) +{ + struct i915_power_domains *power_domains = &i915->power_domains; + + intel_display_power_flush_work(i915); + cancel_delayed_work_sync(&power_domains->async_put_work); + + verify_async_put_domains_state(power_domains); + + WARN_ON(power_domains->async_put_wakeref); +} + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) +/** + * intel_display_power_put - release a power domain reference + * @dev_priv: i915 device instance + * @domain: power domain to reference + * @wakeref: wakeref acquired for the reference that is being released + * + * This function drops the power domain reference obtained by + * intel_display_power_get() and might power down the corresponding hardware + * block right away if this is the last reference. + */ +void intel_display_power_put(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain, + intel_wakeref_t wakeref) +{ + __intel_display_power_put(dev_priv, domain); + intel_runtime_pm_put(dev_priv, wakeref); +} +#endif + +#define I830_PIPES_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_A) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define VLV_DISPLAY_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ + BIT_ULL(POWER_DOMAIN_PIPE_A) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ + BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_GMBUS) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define CHV_DISPLAY_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ + BIT_ULL(POWER_DOMAIN_PIPE_A) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_GMBUS) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define HSW_DISPLAY_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define BDW_DISPLAY_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_GMBUS) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) +#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) +#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) +#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_GMBUS) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_AUX_F) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_F) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) +#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +/* + * ICL PW_0/PG_0 domains (HW/DMC control): + * - PCI + * - clocks except port PLL + * - central power except FBC + * - shared functions except pipe interrupts, pipe MBUS, DBUF registers + * ICL PW_1/PG_1 domains (HW/DMC control): + * - DBUF function + * - PIPE_A and its planes, except VGA + * - transcoder EDP + PSR + * - transcoder DSI + * - DDI_A + * - FBC + */ +#define ICL_PW_4_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + /* VDSC/joining */ +#define ICL_PW_3_POWER_DOMAINS ( \ + ICL_PW_4_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_AUX_E) | \ + BIT_ULL(POWER_DOMAIN_AUX_F) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + /* + * - transcoder WD + * - KVMR (HW control) + */ +#define ICL_PW_2_POWER_DOMAINS ( \ + ICL_PW_3_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + /* + * - KVMR (HW control) + */ +#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + ICL_PW_2_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define ICL_DDI_IO_A_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) +#define ICL_DDI_IO_B_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) +#define ICL_DDI_IO_C_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) +#define ICL_DDI_IO_D_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) +#define ICL_DDI_IO_E_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) +#define ICL_DDI_IO_F_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) + +#define ICL_AUX_A_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_A)) +#define ICL_AUX_B_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_B)) +#define ICL_AUX_C_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_C)) +#define ICL_AUX_D_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_D)) +#define ICL_AUX_E_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_E)) +#define ICL_AUX_F_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_F)) +#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT1)) +#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT2)) +#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT3)) +#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT4)) + +static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = i9xx_always_on_power_well_noop, + .disable = i9xx_always_on_power_well_noop, + .is_enabled = i9xx_always_on_power_well_enabled, +}; + +static const struct i915_power_well_ops chv_pipe_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = chv_pipe_power_well_enable, + .disable = chv_pipe_power_well_disable, + .is_enabled = chv_pipe_power_well_enabled, +}; + +static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = chv_dpio_cmn_power_well_enable, + .disable = chv_dpio_cmn_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +static const struct i915_power_well_desc i9xx_always_on_power_well[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, +}; + +static const struct i915_power_well_ops i830_pipes_power_well_ops = { + .sync_hw = i830_pipes_power_well_sync_hw, + .enable = i830_pipes_power_well_enable, + .disable = i830_pipes_power_well_disable, + .is_enabled = i830_pipes_power_well_enabled, +}; + +static const struct i915_power_well_desc i830_power_wells[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "pipes", + .domains = I830_PIPES_POWER_DOMAINS, + .ops = &i830_pipes_power_well_ops, + .id = DISP_PW_ID_NONE, + }, +}; + +static const struct i915_power_well_ops hsw_power_well_ops = { + .sync_hw = hsw_power_well_sync_hw, + .enable = hsw_power_well_enable, + .disable = hsw_power_well_disable, + .is_enabled = hsw_power_well_enabled, +}; + +static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = gen9_dc_off_power_well_enable, + .disable = gen9_dc_off_power_well_disable, + .is_enabled = gen9_dc_off_power_well_enabled, +}; + +static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = bxt_dpio_cmn_power_well_enable, + .disable = bxt_dpio_cmn_power_well_disable, + .is_enabled = bxt_dpio_cmn_power_well_enabled, +}; + +static const struct i915_power_well_regs hsw_power_well_regs = { + .bios = HSW_PWR_WELL_CTL1, + .driver = HSW_PWR_WELL_CTL2, + .kvmr = HSW_PWR_WELL_CTL3, + .debug = HSW_PWR_WELL_CTL4, +}; + +static const struct i915_power_well_desc hsw_power_wells[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "display", + .domains = HSW_DISPLAY_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = HSW_DISP_PW_GLOBAL, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, + .hsw.has_vga = true, + }, + }, +}; + +static const struct i915_power_well_desc bdw_power_wells[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "display", + .domains = BDW_DISPLAY_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = HSW_DISP_PW_GLOBAL, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, + .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + .hsw.has_vga = true, + }, + }, +}; + +static const struct i915_power_well_ops vlv_display_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = vlv_display_power_well_enable, + .disable = vlv_display_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = vlv_dpio_cmn_power_well_enable, + .disable = vlv_dpio_cmn_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +static const struct i915_power_well_ops vlv_dpio_power_well_ops = { + .sync_hw = i9xx_power_well_sync_hw_noop, + .enable = vlv_power_well_enable, + .disable = vlv_power_well_disable, + .is_enabled = vlv_power_well_enabled, +}; + +static const struct i915_power_well_desc vlv_power_wells[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "display", + .domains = VLV_DISPLAY_POWER_DOMAINS, + .ops = &vlv_display_power_well_ops, + .id = VLV_DISP_PW_DISP2D, + { + .vlv.idx = PUNIT_PWGT_IDX_DISP2D, + }, + }, + { + .name = "dpio-tx-b-01", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, + }, + }, + { + .name = "dpio-tx-b-23", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, + }, + }, + { + .name = "dpio-tx-c-01", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, + }, + }, + { + .name = "dpio-tx-c-23", + .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | + VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, + .ops = &vlv_dpio_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, + }, + }, + { + .name = "dpio-common", + .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, + .ops = &vlv_dpio_cmn_power_well_ops, + .id = VLV_DISP_PW_DPIO_CMN_BC, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, + }, + }, +}; + +static const struct i915_power_well_desc chv_power_wells[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "display", + /* + * Pipe A power well is the new disp2d well. Pipe B and C + * power wells don't actually exist. Pipe A power well is + * required for any pipe to work. + */ + .domains = CHV_DISPLAY_POWER_DOMAINS, + .ops = &chv_pipe_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "dpio-common-bc", + .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, + .ops = &chv_dpio_cmn_power_well_ops, + .id = VLV_DISP_PW_DPIO_CMN_BC, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, + }, + }, + { + .name = "dpio-common-d", + .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, + .ops = &chv_dpio_cmn_power_well_ops, + .id = CHV_DISP_PW_DPIO_CMN_D, + { + .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, + }, + }, +}; + +bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, + enum i915_power_well_id power_well_id) +{ + struct i915_power_well *power_well; + bool ret; + + power_well = lookup_power_well(dev_priv, power_well_id); + ret = power_well->desc->ops->is_enabled(dev_priv, power_well); + + return ret; +} + +static const struct i915_power_well_desc skl_power_wells[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 1", + /* Handled by the DMC firmware */ + .always_on = true, + .domains = 0, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_1, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, + { + .name = "MISC IO power well", + /* Handled by the DMC firmware */ + .always_on = true, + .domains = 0, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_MISC_IO, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, + }, + }, + { + .name = "DC off", + .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 2", + .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, + .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, + { + .name = "DDI A/E IO power well", + .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, + }, + }, + { + .name = "DDI B IO power well", + .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_B, + }, + }, + { + .name = "DDI C IO power well", + .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_C, + }, + }, + { + .name = "DDI D IO power well", + .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_D, + }, + }, +}; + +static const struct i915_power_well_desc bxt_power_wells[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 1", + /* Handled by the DMC firmware */ + .always_on = true, + .domains = 0, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_1, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, + { + .name = "DC off", + .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 2", + .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, + .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, + { + .name = "dpio-common-a", + .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .id = BXT_DISP_PW_DPIO_CMN_A, + { + .bxt.phy = DPIO_PHY1, + }, + }, + { + .name = "dpio-common-bc", + .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .id = VLV_DISP_PW_DPIO_CMN_BC, + { + .bxt.phy = DPIO_PHY0, + }, + }, +}; + +static const struct i915_power_well_desc glk_power_wells[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 1", + /* Handled by the DMC firmware */ + .always_on = true, + .domains = 0, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_1, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, + { + .name = "DC off", + .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 2", + .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, + .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, + { + .name = "dpio-common-a", + .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .id = BXT_DISP_PW_DPIO_CMN_A, + { + .bxt.phy = DPIO_PHY1, + }, + }, + { + .name = "dpio-common-b", + .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .id = VLV_DISP_PW_DPIO_CMN_BC, + { + .bxt.phy = DPIO_PHY0, + }, + }, + { + .name = "dpio-common-c", + .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, + .ops = &bxt_dpio_cmn_power_well_ops, + .id = GLK_DISP_PW_DPIO_CMN_C, + { + .bxt.phy = DPIO_PHY2, + }, + }, + { + .name = "AUX A", + .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_A, + }, + }, + { + .name = "AUX B", + .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_B, + }, + }, + { + .name = "AUX C", + .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_C, + }, + }, + { + .name = "DDI A IO power well", + .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_DDI_A, + }, + }, + { + .name = "DDI B IO power well", + .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_B, + }, + }, + { + .name = "DDI C IO power well", + .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_C, + }, + }, +}; + +static const struct i915_power_well_desc cnl_power_wells[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 1", + /* Handled by the DMC firmware */ + .always_on = true, + .domains = 0, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_1, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, + { + .name = "AUX A", + .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_A, + }, + }, + { + .name = "AUX B", + .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_B, + }, + }, + { + .name = "AUX C", + .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_AUX_C, + }, + }, + { + .name = "AUX D", + .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = CNL_PW_CTL_IDX_AUX_D, + }, + }, + { + .name = "DC off", + .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 2", + .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_PW_2, + .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, + { + .name = "DDI A IO power well", + .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = GLK_PW_CTL_IDX_DDI_A, + }, + }, + { + .name = "DDI B IO power well", + .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_B, + }, + }, + { + .name = "DDI C IO power well", + .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_C, + }, + }, + { + .name = "DDI D IO power well", + .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = SKL_PW_CTL_IDX_DDI_D, + }, + }, + { + .name = "DDI F IO power well", + .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = CNL_PW_CTL_IDX_DDI_F, + }, + }, + { + .name = "AUX F", + .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = CNL_PW_CTL_IDX_AUX_F, + }, + }, +}; + +static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { + .sync_hw = hsw_power_well_sync_hw, + .enable = icl_combo_phy_aux_power_well_enable, + .disable = icl_combo_phy_aux_power_well_disable, + .is_enabled = hsw_power_well_enabled, +}; + +static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = { + .sync_hw = hsw_power_well_sync_hw, + .enable = icl_tc_phy_aux_power_well_enable, + .disable = hsw_power_well_disable, + .is_enabled = hsw_power_well_enabled, +}; + +static const struct i915_power_well_regs icl_aux_power_well_regs = { + .bios = ICL_PWR_WELL_CTL_AUX1, + .driver = ICL_PWR_WELL_CTL_AUX2, + .debug = ICL_PWR_WELL_CTL_AUX4, +}; + +static const struct i915_power_well_regs icl_ddi_power_well_regs = { + .bios = ICL_PWR_WELL_CTL_DDI1, + .driver = ICL_PWR_WELL_CTL_DDI2, + .debug = ICL_PWR_WELL_CTL_DDI4, +}; + +static const struct i915_power_well_desc icl_power_wells[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 1", + /* Handled by the DMC firmware */ + .always_on = true, + .domains = 0, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_1, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, + { + .name = "DC off", + .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 2", + .domains = ICL_PW_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .hsw.has_fuses = true, + }, + }, + { + .name = "power well 3", + .domains = ICL_PW_3_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .hsw.irq_pipe_mask = BIT(PIPE_B), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, + { + .name = "DDI A IO", + .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_A, + }, + }, + { + .name = "DDI B IO", + .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_B, + }, + }, + { + .name = "DDI C IO", + .domains = ICL_DDI_IO_C_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_C, + }, + }, + { + .name = "DDI D IO", + .domains = ICL_DDI_IO_D_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_D, + }, + }, + { + .name = "DDI E IO", + .domains = ICL_DDI_IO_E_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_E, + }, + }, + { + .name = "DDI F IO", + .domains = ICL_DDI_IO_F_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_F, + }, + }, + { + .name = "AUX A", + .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .ops = &icl_combo_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_A, + }, + }, + { + .name = "AUX B", + .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .ops = &icl_combo_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_B, + }, + }, + { + .name = "AUX C", + .domains = ICL_AUX_C_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_C, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX D", + .domains = ICL_AUX_D_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_D, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX E", + .domains = ICL_AUX_E_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_E, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX F", + .domains = ICL_AUX_F_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_F, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX TBT1", + .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "AUX TBT2", + .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "AUX TBT3", + .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "AUX TBT4", + .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "power well 4", + .domains = ICL_PW_4_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_4, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_C), + }, + }, +}; + +static int +sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, + int disable_power_well) +{ + if (disable_power_well >= 0) + return !!disable_power_well; + + return 1; +} + +static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, + int enable_dc) +{ + u32 mask; + int requested_dc; + int max_dc; + + if (INTEL_GEN(dev_priv) >= 11) { + max_dc = 2; + /* + * DC9 has a separate HW flow from the rest of the DC states, + * not depending on the DMC firmware. It's needed by system + * suspend/resume, so allow it unconditionally. + */ + mask = DC_STATE_EN_DC9; + } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) { + max_dc = 2; + mask = 0; + } else if (IS_GEN9_LP(dev_priv)) { + max_dc = 1; + mask = DC_STATE_EN_DC9; + } else { + max_dc = 0; + mask = 0; + } + + if (!i915_modparams.disable_power_well) + max_dc = 0; + + if (enable_dc >= 0 && enable_dc <= max_dc) { + requested_dc = enable_dc; + } else if (enable_dc == -1) { + requested_dc = max_dc; + } else if (enable_dc > max_dc && enable_dc <= 2) { + DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n", + enable_dc, max_dc); + requested_dc = max_dc; + } else { + DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc); + requested_dc = max_dc; + } + + if (requested_dc > 1) + mask |= DC_STATE_EN_UPTO_DC6; + if (requested_dc > 0) + mask |= DC_STATE_EN_UPTO_DC5; + + DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask); + + return mask; +} + +static int +__set_power_wells(struct i915_power_domains *power_domains, + const struct i915_power_well_desc *power_well_descs, + int power_well_count) +{ + u64 power_well_ids = 0; + int i; + + power_domains->power_well_count = power_well_count; + power_domains->power_wells = + kcalloc(power_well_count, + sizeof(*power_domains->power_wells), + GFP_KERNEL); + if (!power_domains->power_wells) + return -ENOMEM; + + for (i = 0; i < power_well_count; i++) { + enum i915_power_well_id id = power_well_descs[i].id; + + power_domains->power_wells[i].desc = &power_well_descs[i]; + + if (id == DISP_PW_ID_NONE) + continue; + + WARN_ON(id >= sizeof(power_well_ids) * 8); + WARN_ON(power_well_ids & BIT_ULL(id)); + power_well_ids |= BIT_ULL(id); + } + + return 0; +} + +#define set_power_wells(power_domains, __power_well_descs) \ + __set_power_wells(power_domains, __power_well_descs, \ + ARRAY_SIZE(__power_well_descs)) + +/** + * intel_power_domains_init - initializes the power domain structures + * @dev_priv: i915 device instance + * + * Initializes the power domain structures for @dev_priv depending upon the + * supported platform. + */ +int intel_power_domains_init(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + int err; + + i915_modparams.disable_power_well = + sanitize_disable_power_well_option(dev_priv, + i915_modparams.disable_power_well); + dev_priv->csr.allowed_dc_mask = + get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc); + + BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); + + mutex_init(&power_domains->lock); + + INIT_DELAYED_WORK(&power_domains->async_put_work, + intel_display_power_put_async_work); + + /* + * The enabling order will be from lower to higher indexed wells, + * the disabling order is reversed. + */ + if (IS_GEN(dev_priv, 11)) { + err = set_power_wells(power_domains, icl_power_wells); + } else if (IS_CANNONLAKE(dev_priv)) { + err = set_power_wells(power_domains, cnl_power_wells); + + /* + * DDI and Aux IO are getting enabled for all ports + * regardless the presence or use. So, in order to avoid + * timeouts, lets remove them from the list + * for the SKUs without port F. + */ + if (!IS_CNL_WITH_PORT_F(dev_priv)) + power_domains->power_well_count -= 2; + } else if (IS_GEMINILAKE(dev_priv)) { + err = set_power_wells(power_domains, glk_power_wells); + } else if (IS_BROXTON(dev_priv)) { + err = set_power_wells(power_domains, bxt_power_wells); + } else if (IS_GEN9_BC(dev_priv)) { + err = set_power_wells(power_domains, skl_power_wells); + } else if (IS_CHERRYVIEW(dev_priv)) { + err = set_power_wells(power_domains, chv_power_wells); + } else if (IS_BROADWELL(dev_priv)) { + err = set_power_wells(power_domains, bdw_power_wells); + } else if (IS_HASWELL(dev_priv)) { + err = set_power_wells(power_domains, hsw_power_wells); + } else if (IS_VALLEYVIEW(dev_priv)) { + err = set_power_wells(power_domains, vlv_power_wells); + } else if (IS_I830(dev_priv)) { + err = set_power_wells(power_domains, i830_power_wells); + } else { + err = set_power_wells(power_domains, i9xx_always_on_power_well); + } + + return err; +} + +/** + * intel_power_domains_cleanup - clean up power domains resources + * @dev_priv: i915 device instance + * + * Release any resources acquired by intel_power_domains_init() + */ +void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) +{ + kfree(dev_priv->power_domains.power_wells); +} + +static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *power_well; + + mutex_lock(&power_domains->lock); + for_each_power_well(dev_priv, power_well) { + power_well->desc->ops->sync_hw(dev_priv, power_well); + power_well->hw_enabled = + power_well->desc->ops->is_enabled(dev_priv, power_well); + } + mutex_unlock(&power_domains->lock); +} + +static inline +bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv, + i915_reg_t reg, bool enable) +{ + u32 val, status; + + val = I915_READ(reg); + val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST); + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(10); + + status = I915_READ(reg) & DBUF_POWER_STATE; + if ((enable && !status) || (!enable && status)) { + DRM_ERROR("DBus power %s timeout!\n", + enable ? "enable" : "disable"); + return false; + } + return true; +} + +static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) +{ + intel_dbuf_slice_set(dev_priv, DBUF_CTL, true); +} + +static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) +{ + intel_dbuf_slice_set(dev_priv, DBUF_CTL, false); +} + +static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv) +{ + if (INTEL_GEN(dev_priv) < 11) + return 1; + return 2; +} + +void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, + u8 req_slices) +{ + const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; + bool ret; + + if (req_slices > intel_dbuf_max_slices(dev_priv)) { + DRM_ERROR("Invalid number of dbuf slices requested\n"); + return; + } + + if (req_slices == hw_enabled_slices || req_slices == 0) + return; + + if (req_slices > hw_enabled_slices) + ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); + else + ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false); + + if (ret) + dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices; +} + +static void icl_dbuf_enable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST); + I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST); + POSTING_READ(DBUF_CTL_S2); + + udelay(10); + + if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || + !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) + DRM_ERROR("DBuf power enable timeout\n"); + else + /* + * FIXME: for now pretend that we only have 1 slice, see + * intel_enabled_dbuf_slices_num(). + */ + dev_priv->wm.skl_hw.ddb.enabled_slices = 1; +} + +static void icl_dbuf_disable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST); + I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST); + POSTING_READ(DBUF_CTL_S2); + + udelay(10); + + if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || + (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) + DRM_ERROR("DBuf power disable timeout!\n"); + else + /* + * FIXME: for now pretend that the first slice is always + * enabled, see intel_enabled_dbuf_slices_num(). + */ + dev_priv->wm.skl_hw.ddb.enabled_slices = 1; +} + +static void icl_mbus_init(struct drm_i915_private *dev_priv) +{ + u32 val; + + val = MBUS_ABOX_BT_CREDIT_POOL1(16) | + MBUS_ABOX_BT_CREDIT_POOL2(16) | + MBUS_ABOX_B_CREDIT(1) | + MBUS_ABOX_BW_CREDIT(1); + + I915_WRITE(MBUS_ABOX_CTL, val); +} + +static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) +{ + u32 val = I915_READ(LCPLL_CTL); + + /* + * The LCPLL register should be turned on by the BIOS. For now + * let's just check its state and print errors in case + * something is wrong. Don't even try to turn it on. + */ + + if (val & LCPLL_CD_SOURCE_FCLK) + DRM_ERROR("CDCLK source is not LCPLL\n"); + + if (val & LCPLL_PLL_DISABLE) + DRM_ERROR("LCPLL is disabled\n"); +} + +static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = &dev_priv->drm; + struct intel_crtc *crtc; + + for_each_intel_crtc(dev, crtc) + I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", + pipe_name(crtc->pipe)); + + I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2), + "Display power well on\n"); + I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, + "SPLL enabled\n"); + I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, + "WRPLL1 enabled\n"); + I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, + "WRPLL2 enabled\n"); + I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, + "Panel power on\n"); + I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, + "CPU PWM1 enabled\n"); + if (IS_HASWELL(dev_priv)) + I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, + "CPU PWM2 enabled\n"); + I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, + "PCH PWM1 enabled\n"); + I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, + "Utility pin enabled\n"); + I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, + "PCH GTC enabled\n"); + + /* + * In theory we can still leave IRQs enabled, as long as only the HPD + * interrupts remain enabled. We used to check for that, but since it's + * gen-specific and since we only disable LCPLL after we fully disable + * the interrupts, the check below should be enough. + */ + I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); +} + +static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) +{ + if (IS_HASWELL(dev_priv)) + return I915_READ(D_COMP_HSW); + else + return I915_READ(D_COMP_BDW); +} + +static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) +{ + if (IS_HASWELL(dev_priv)) { + if (sandybridge_pcode_write(dev_priv, + GEN6_PCODE_WRITE_D_COMP, val)) + DRM_DEBUG_KMS("Failed to write to D_COMP\n"); + } else { + I915_WRITE(D_COMP_BDW, val); + POSTING_READ(D_COMP_BDW); + } +} + +/* + * This function implements pieces of two sequences from BSpec: + * - Sequence for display software to disable LCPLL + * - Sequence for display software to allow package C8+ + * The steps implemented here are just the steps that actually touch the LCPLL + * register. Callers should take care of disabling all the display engine + * functions, doing the mode unset, fixing interrupts, etc. + */ +static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, + bool switch_to_fclk, bool allow_power_down) +{ + u32 val; + + assert_can_disable_lcpll(dev_priv); + + val = I915_READ(LCPLL_CTL); + + if (switch_to_fclk) { + val |= LCPLL_CD_SOURCE_FCLK; + I915_WRITE(LCPLL_CTL, val); + + if (wait_for_us(I915_READ(LCPLL_CTL) & + LCPLL_CD_SOURCE_FCLK_DONE, 1)) + DRM_ERROR("Switching to FCLK failed\n"); + + val = I915_READ(LCPLL_CTL); + } + + val |= LCPLL_PLL_DISABLE; + I915_WRITE(LCPLL_CTL, val); + POSTING_READ(LCPLL_CTL); + + if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL, + LCPLL_PLL_LOCK, 0, 1)) + DRM_ERROR("LCPLL still locked\n"); + + val = hsw_read_dcomp(dev_priv); + val |= D_COMP_COMP_DISABLE; + hsw_write_dcomp(dev_priv, val); + ndelay(100); + + if (wait_for((hsw_read_dcomp(dev_priv) & + D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) + DRM_ERROR("D_COMP RCOMP still in progress\n"); + + if (allow_power_down) { + val = I915_READ(LCPLL_CTL); + val |= LCPLL_POWER_DOWN_ALLOW; + I915_WRITE(LCPLL_CTL, val); + POSTING_READ(LCPLL_CTL); + } +} + +/* + * Fully restores LCPLL, disallowing power down and switching back to LCPLL + * source. + */ +static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) +{ + u32 val; + + val = I915_READ(LCPLL_CTL); + + if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | + LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) + return; + + /* + * Make sure we're not on PC8 state before disabling PC8, otherwise + * we'll hang the machine. To prevent PC8 state, just enable force_wake. + */ + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); + + if (val & LCPLL_POWER_DOWN_ALLOW) { + val &= ~LCPLL_POWER_DOWN_ALLOW; + I915_WRITE(LCPLL_CTL, val); + POSTING_READ(LCPLL_CTL); + } + + val = hsw_read_dcomp(dev_priv); + val |= D_COMP_COMP_FORCE; + val &= ~D_COMP_COMP_DISABLE; + hsw_write_dcomp(dev_priv, val); + + val = I915_READ(LCPLL_CTL); + val &= ~LCPLL_PLL_DISABLE; + I915_WRITE(LCPLL_CTL, val); + + if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL, + LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5)) + DRM_ERROR("LCPLL not locked yet\n"); + + if (val & LCPLL_CD_SOURCE_FCLK) { + val = I915_READ(LCPLL_CTL); + val &= ~LCPLL_CD_SOURCE_FCLK; + I915_WRITE(LCPLL_CTL, val); + + if (wait_for_us((I915_READ(LCPLL_CTL) & + LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) + DRM_ERROR("Switching back to LCPLL failed\n"); + } + + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); + + intel_update_cdclk(dev_priv); + intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); +} + +/* + * Package states C8 and deeper are really deep PC states that can only be + * reached when all the devices on the system allow it, so even if the graphics + * device allows PC8+, it doesn't mean the system will actually get to these + * states. Our driver only allows PC8+ when going into runtime PM. + * + * The requirements for PC8+ are that all the outputs are disabled, the power + * well is disabled and most interrupts are disabled, and these are also + * requirements for runtime PM. When these conditions are met, we manually do + * the other conditions: disable the interrupts, clocks and switch LCPLL refclk + * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard + * hang the machine. + * + * When we really reach PC8 or deeper states (not just when we allow it) we lose + * the state of some registers, so when we come back from PC8+ we need to + * restore this state. We don't get into PC8+ if we're not in RC6, so we don't + * need to take care of the registers kept by RC6. Notice that this happens even + * if we don't put the device in PCI D3 state (which is what currently happens + * because of the runtime PM support). + * + * For more, read "Display Sequences for Package C8" on the hardware + * documentation. + */ +void hsw_enable_pc8(struct drm_i915_private *dev_priv) +{ + u32 val; + + DRM_DEBUG_KMS("Enabling package C8+\n"); + + if (HAS_PCH_LPT_LP(dev_priv)) { + val = I915_READ(SOUTH_DSPCLK_GATE_D); + val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; + I915_WRITE(SOUTH_DSPCLK_GATE_D, val); + } + + lpt_disable_clkout_dp(dev_priv); + hsw_disable_lcpll(dev_priv, true, true); +} + +void hsw_disable_pc8(struct drm_i915_private *dev_priv) +{ + u32 val; + + DRM_DEBUG_KMS("Disabling package C8+\n"); + + hsw_restore_lcpll(dev_priv); + intel_init_pch_refclk(dev_priv); + + if (HAS_PCH_LPT_LP(dev_priv)) { + val = I915_READ(SOUTH_DSPCLK_GATE_D); + val |= PCH_LP_PARTITION_LEVEL_DISABLE; + I915_WRITE(SOUTH_DSPCLK_GATE_D, val); + } +} + +static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, + bool enable) +{ + i915_reg_t reg; + u32 reset_bits, val; + + if (IS_IVYBRIDGE(dev_priv)) { + reg = GEN7_MSG_CTL; + reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; + } else { + reg = HSW_NDE_RSTWRN_OPT; + reset_bits = RESET_PCH_HANDSHAKE_ENABLE; + } + + val = I915_READ(reg); + + if (enable) + val |= reset_bits; + else + val &= ~reset_bits; + + I915_WRITE(reg, val); +} + +static void skl_display_core_init(struct drm_i915_private *dev_priv, + bool resume) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *well; + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + /* enable PCH reset handshake */ + intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); + + /* enable PG1 and Misc I/O */ + mutex_lock(&power_domains->lock); + + well = lookup_power_well(dev_priv, SKL_DISP_PW_1); + intel_power_well_enable(dev_priv, well); + + well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); + intel_power_well_enable(dev_priv, well); + + mutex_unlock(&power_domains->lock); + + intel_cdclk_init(dev_priv); + + gen9_dbuf_enable(dev_priv); + + if (resume && dev_priv->csr.dmc_payload) + intel_csr_load_program(dev_priv); +} + +static void skl_display_core_uninit(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *well; + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + gen9_dbuf_disable(dev_priv); + + intel_cdclk_uninit(dev_priv); + + /* The spec doesn't call for removing the reset handshake flag */ + /* disable PG1 and Misc I/O */ + + mutex_lock(&power_domains->lock); + + /* + * BSpec says to keep the MISC IO power well enabled here, only + * remove our request for power well 1. + * Note that even though the driver's request is removed power well 1 + * may stay enabled after this due to DMC's own request on it. + */ + well = lookup_power_well(dev_priv, SKL_DISP_PW_1); + intel_power_well_disable(dev_priv, well); + + mutex_unlock(&power_domains->lock); + + usleep_range(10, 30); /* 10 us delay per Bspec */ +} + +void bxt_display_core_init(struct drm_i915_private *dev_priv, + bool resume) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *well; + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + /* + * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT + * or else the reset will hang because there is no PCH to respond. + * Move the handshake programming to initialization sequence. + * Previously was left up to BIOS. + */ + intel_pch_reset_handshake(dev_priv, false); + + /* Enable PG1 */ + mutex_lock(&power_domains->lock); + + well = lookup_power_well(dev_priv, SKL_DISP_PW_1); + intel_power_well_enable(dev_priv, well); + + mutex_unlock(&power_domains->lock); + + intel_cdclk_init(dev_priv); + + gen9_dbuf_enable(dev_priv); + + if (resume && dev_priv->csr.dmc_payload) + intel_csr_load_program(dev_priv); +} + +void bxt_display_core_uninit(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *well; + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + gen9_dbuf_disable(dev_priv); + + intel_cdclk_uninit(dev_priv); + + /* The spec doesn't call for removing the reset handshake flag */ + + /* + * Disable PW1 (PG1). + * Note that even though the driver's request is removed power well 1 + * may stay enabled after this due to DMC's own request on it. + */ + mutex_lock(&power_domains->lock); + + well = lookup_power_well(dev_priv, SKL_DISP_PW_1); + intel_power_well_disable(dev_priv, well); + + mutex_unlock(&power_domains->lock); + + usleep_range(10, 30); /* 10 us delay per Bspec */ +} + +static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *well; + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + /* 1. Enable PCH Reset Handshake */ + intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); + + /* 2-3. */ + intel_combo_phy_init(dev_priv); + + /* + * 4. Enable Power Well 1 (PG1). + * The AUX IO power wells will be enabled on demand. + */ + mutex_lock(&power_domains->lock); + well = lookup_power_well(dev_priv, SKL_DISP_PW_1); + intel_power_well_enable(dev_priv, well); + mutex_unlock(&power_domains->lock); + + /* 5. Enable CD clock */ + intel_cdclk_init(dev_priv); + + /* 6. Enable DBUF */ + gen9_dbuf_enable(dev_priv); + + if (resume && dev_priv->csr.dmc_payload) + intel_csr_load_program(dev_priv); +} + +static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *well; + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + /* 1. Disable all display engine functions -> aready done */ + + /* 2. Disable DBUF */ + gen9_dbuf_disable(dev_priv); + + /* 3. Disable CD clock */ + intel_cdclk_uninit(dev_priv); + + /* + * 4. Disable Power Well 1 (PG1). + * The AUX IO power wells are toggled on demand, so they are already + * disabled at this point. + */ + mutex_lock(&power_domains->lock); + well = lookup_power_well(dev_priv, SKL_DISP_PW_1); + intel_power_well_disable(dev_priv, well); + mutex_unlock(&power_domains->lock); + + usleep_range(10, 30); /* 10 us delay per Bspec */ + + /* 5. */ + intel_combo_phy_uninit(dev_priv); +} + +void icl_display_core_init(struct drm_i915_private *dev_priv, + bool resume) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *well; + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + /* 1. Enable PCH reset handshake. */ + intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); + + /* 2. Initialize all combo phys */ + intel_combo_phy_init(dev_priv); + + /* + * 3. Enable Power Well 1 (PG1). + * The AUX IO power wells will be enabled on demand. + */ + mutex_lock(&power_domains->lock); + well = lookup_power_well(dev_priv, SKL_DISP_PW_1); + intel_power_well_enable(dev_priv, well); + mutex_unlock(&power_domains->lock); + + /* 4. Enable CDCLK. */ + intel_cdclk_init(dev_priv); + + /* 5. Enable DBUF. */ + icl_dbuf_enable(dev_priv); + + /* 6. Setup MBUS. */ + icl_mbus_init(dev_priv); + + if (resume && dev_priv->csr.dmc_payload) + intel_csr_load_program(dev_priv); +} + +void icl_display_core_uninit(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *well; + + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + + /* 1. Disable all display engine functions -> aready done */ + + /* 2. Disable DBUF */ + icl_dbuf_disable(dev_priv); + + /* 3. Disable CD clock */ + intel_cdclk_uninit(dev_priv); + + /* + * 4. Disable Power Well 1 (PG1). + * The AUX IO power wells are toggled on demand, so they are already + * disabled at this point. + */ + mutex_lock(&power_domains->lock); + well = lookup_power_well(dev_priv, SKL_DISP_PW_1); + intel_power_well_disable(dev_priv, well); + mutex_unlock(&power_domains->lock); + + /* 5. */ + intel_combo_phy_uninit(dev_priv); +} + +static void chv_phy_control_init(struct drm_i915_private *dev_priv) +{ + struct i915_power_well *cmn_bc = + lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); + struct i915_power_well *cmn_d = + lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); + + /* + * DISPLAY_PHY_CONTROL can get corrupted if read. As a + * workaround never ever read DISPLAY_PHY_CONTROL, and + * instead maintain a shadow copy ourselves. Use the actual + * power well state and lane status to reconstruct the + * expected initial value. + */ + dev_priv->chv_phy_control = + PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | + PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | + PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | + PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | + PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); + + /* + * If all lanes are disabled we leave the override disabled + * with all power down bits cleared to match the state we + * would use after disabling the port. Otherwise enable the + * override and set the lane powerdown bits accding to the + * current lane status. + */ + if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { + u32 status = I915_READ(DPLL(PIPE_A)); + unsigned int mask; + + mask = status & DPLL_PORTB_READY_MASK; + if (mask == 0xf) + mask = 0x0; + else + dev_priv->chv_phy_control |= + PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); + + dev_priv->chv_phy_control |= + PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); + + mask = (status & DPLL_PORTC_READY_MASK) >> 4; + if (mask == 0xf) + mask = 0x0; + else + dev_priv->chv_phy_control |= + PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); + + dev_priv->chv_phy_control |= + PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); + + dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); + + dev_priv->chv_phy_assert[DPIO_PHY0] = false; + } else { + dev_priv->chv_phy_assert[DPIO_PHY0] = true; + } + + if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { + u32 status = I915_READ(DPIO_PHY_STATUS); + unsigned int mask; + + mask = status & DPLL_PORTD_READY_MASK; + + if (mask == 0xf) + mask = 0x0; + else + dev_priv->chv_phy_control |= + PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); + + dev_priv->chv_phy_control |= + PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); + + dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); + + dev_priv->chv_phy_assert[DPIO_PHY1] = false; + } else { + dev_priv->chv_phy_assert[DPIO_PHY1] = true; + } + + I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); + + DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", + dev_priv->chv_phy_control); +} + +static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) +{ + struct i915_power_well *cmn = + lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); + struct i915_power_well *disp2d = + lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); + + /* If the display might be already active skip this */ + if (cmn->desc->ops->is_enabled(dev_priv, cmn) && + disp2d->desc->ops->is_enabled(dev_priv, disp2d) && + I915_READ(DPIO_CTL) & DPIO_CMNRST) + return; + + DRM_DEBUG_KMS("toggling display PHY side reset\n"); + + /* cmnlane needs DPLL registers */ + disp2d->desc->ops->enable(dev_priv, disp2d); + + /* + * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: + * Need to assert and de-assert PHY SB reset by gating the + * common lane power, then un-gating it. + * Simply ungating isn't enough to reset the PHY enough to get + * ports and lanes running. + */ + cmn->desc->ops->disable(dev_priv, cmn); +} + +static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) +{ + bool ret; + + vlv_punit_get(dev_priv); + ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; + vlv_punit_put(dev_priv); + + return ret; +} + +static void assert_ved_power_gated(struct drm_i915_private *dev_priv) +{ + WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), + "VED not power gated\n"); +} + +static void assert_isp_power_gated(struct drm_i915_private *dev_priv) +{ + static const struct pci_device_id isp_ids[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, + {} + }; + + WARN(!pci_dev_present(isp_ids) && + !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), + "ISP not power gated\n"); +} + +static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); + +/** + * intel_power_domains_init_hw - initialize hardware power domain state + * @i915: i915 device instance + * @resume: Called from resume code paths or not + * + * This function initializes the hardware power domain state and enables all + * power wells belonging to the INIT power domain. Power wells in other + * domains (and not in the INIT domain) are referenced or disabled by + * intel_modeset_readout_hw_state(). After that the reference count of each + * power well must match its HW enabled state, see + * intel_power_domains_verify_state(). + * + * It will return with power domains disabled (to be enabled later by + * intel_power_domains_enable()) and must be paired with + * intel_power_domains_fini_hw(). + */ +void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) +{ + struct i915_power_domains *power_domains = &i915->power_domains; + + power_domains->initializing = true; + + if (INTEL_GEN(i915) >= 11) { + icl_display_core_init(i915, resume); + } else if (IS_CANNONLAKE(i915)) { + cnl_display_core_init(i915, resume); + } else if (IS_GEN9_BC(i915)) { + skl_display_core_init(i915, resume); + } else if (IS_GEN9_LP(i915)) { + bxt_display_core_init(i915, resume); + } else if (IS_CHERRYVIEW(i915)) { + mutex_lock(&power_domains->lock); + chv_phy_control_init(i915); + mutex_unlock(&power_domains->lock); + assert_isp_power_gated(i915); + } else if (IS_VALLEYVIEW(i915)) { + mutex_lock(&power_domains->lock); + vlv_cmnlane_wa(i915); + mutex_unlock(&power_domains->lock); + assert_ved_power_gated(i915); + assert_isp_power_gated(i915); + } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { + hsw_assert_cdclk(i915); + intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); + } else if (IS_IVYBRIDGE(i915)) { + intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); + } + + /* + * Keep all power wells enabled for any dependent HW access during + * initialization and to make sure we keep BIOS enabled display HW + * resources powered until display HW readout is complete. We drop + * this reference in intel_power_domains_enable(). + */ + power_domains->wakeref = + intel_display_power_get(i915, POWER_DOMAIN_INIT); + + /* Disable power support if the user asked so. */ + if (!i915_modparams.disable_power_well) + intel_display_power_get(i915, POWER_DOMAIN_INIT); + intel_power_domains_sync_hw(i915); + + power_domains->initializing = false; +} + +/** + * intel_power_domains_fini_hw - deinitialize hw power domain state + * @i915: i915 device instance + * + * De-initializes the display power domain HW state. It also ensures that the + * device stays powered up so that the driver can be reloaded. + * + * It must be called with power domains already disabled (after a call to + * intel_power_domains_disable()) and must be paired with + * intel_power_domains_init_hw(). + */ +void intel_power_domains_fini_hw(struct drm_i915_private *i915) +{ + intel_wakeref_t wakeref __maybe_unused = + fetch_and_zero(&i915->power_domains.wakeref); + + /* Remove the refcount we took to keep power well support disabled. */ + if (!i915_modparams.disable_power_well) + intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); + + intel_display_power_flush_work_sync(i915); + + intel_power_domains_verify_state(i915); + + /* Keep the power well enabled, but cancel its rpm wakeref. */ + intel_runtime_pm_put(i915, wakeref); +} + +/** + * intel_power_domains_enable - enable toggling of display power wells + * @i915: i915 device instance + * + * Enable the ondemand enabling/disabling of the display power wells. Note that + * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled + * only at specific points of the display modeset sequence, thus they are not + * affected by the intel_power_domains_enable()/disable() calls. The purpose + * of these function is to keep the rest of power wells enabled until the end + * of display HW readout (which will acquire the power references reflecting + * the current HW state). + */ +void intel_power_domains_enable(struct drm_i915_private *i915) +{ + intel_wakeref_t wakeref __maybe_unused = + fetch_and_zero(&i915->power_domains.wakeref); + + intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); + intel_power_domains_verify_state(i915); +} + +/** + * intel_power_domains_disable - disable toggling of display power wells + * @i915: i915 device instance + * + * Disable the ondemand enabling/disabling of the display power wells. See + * intel_power_domains_enable() for which power wells this call controls. + */ +void intel_power_domains_disable(struct drm_i915_private *i915) +{ + struct i915_power_domains *power_domains = &i915->power_domains; + + WARN_ON(power_domains->wakeref); + power_domains->wakeref = + intel_display_power_get(i915, POWER_DOMAIN_INIT); + + intel_power_domains_verify_state(i915); +} + +/** + * intel_power_domains_suspend - suspend power domain state + * @i915: i915 device instance + * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) + * + * This function prepares the hardware power domain state before entering + * system suspend. + * + * It must be called with power domains already disabled (after a call to + * intel_power_domains_disable()) and paired with intel_power_domains_resume(). + */ +void intel_power_domains_suspend(struct drm_i915_private *i915, + enum i915_drm_suspend_mode suspend_mode) +{ + struct i915_power_domains *power_domains = &i915->power_domains; + intel_wakeref_t wakeref __maybe_unused = + fetch_and_zero(&power_domains->wakeref); + + intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); + + /* + * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 + * support don't manually deinit the power domains. This also means the + * CSR/DMC firmware will stay active, it will power down any HW + * resources as required and also enable deeper system power states + * that would be blocked if the firmware was inactive. + */ + if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) && + suspend_mode == I915_DRM_SUSPEND_IDLE && + i915->csr.dmc_payload) { + intel_display_power_flush_work(i915); + intel_power_domains_verify_state(i915); + return; + } + + /* + * Even if power well support was disabled we still want to disable + * power wells if power domains must be deinitialized for suspend. + */ + if (!i915_modparams.disable_power_well) + intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); + + intel_display_power_flush_work(i915); + intel_power_domains_verify_state(i915); + + if (INTEL_GEN(i915) >= 11) + icl_display_core_uninit(i915); + else if (IS_CANNONLAKE(i915)) + cnl_display_core_uninit(i915); + else if (IS_GEN9_BC(i915)) + skl_display_core_uninit(i915); + else if (IS_GEN9_LP(i915)) + bxt_display_core_uninit(i915); + + power_domains->display_core_suspended = true; +} + +/** + * intel_power_domains_resume - resume power domain state + * @i915: i915 device instance + * + * This function resume the hardware power domain state during system resume. + * + * It will return with power domain support disabled (to be enabled later by + * intel_power_domains_enable()) and must be paired with + * intel_power_domains_suspend(). + */ +void intel_power_domains_resume(struct drm_i915_private *i915) +{ + struct i915_power_domains *power_domains = &i915->power_domains; + + if (power_domains->display_core_suspended) { + intel_power_domains_init_hw(i915, true); + power_domains->display_core_suspended = false; + } else { + WARN_ON(power_domains->wakeref); + power_domains->wakeref = + intel_display_power_get(i915, POWER_DOMAIN_INIT); + } + + intel_power_domains_verify_state(i915); +} + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + +static void intel_power_domains_dump_info(struct drm_i915_private *i915) +{ + struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_well *power_well; + + for_each_power_well(i915, power_well) { + enum intel_display_power_domain domain; + + DRM_DEBUG_DRIVER("%-25s %d\n", + power_well->desc->name, power_well->count); + + for_each_power_domain(domain, power_well->desc->domains) + DRM_DEBUG_DRIVER(" %-23s %d\n", + intel_display_power_domain_str(domain), + power_domains->domain_use_count[domain]); + } +} + +/** + * intel_power_domains_verify_state - verify the HW/SW state for all power wells + * @i915: i915 device instance + * + * Verify if the reference count of each power well matches its HW enabled + * state and the total refcount of the domains it belongs to. This must be + * called after modeset HW state sanitization, which is responsible for + * acquiring reference counts for any power wells in use and disabling the + * ones left on by BIOS but not required by any active output. + */ +static void intel_power_domains_verify_state(struct drm_i915_private *i915) +{ + struct i915_power_domains *power_domains = &i915->power_domains; + struct i915_power_well *power_well; + bool dump_domain_info; + + mutex_lock(&power_domains->lock); + + verify_async_put_domains_state(power_domains); + + dump_domain_info = false; + for_each_power_well(i915, power_well) { + enum intel_display_power_domain domain; + int domains_count; + bool enabled; + + enabled = power_well->desc->ops->is_enabled(i915, power_well); + if ((power_well->count || power_well->desc->always_on) != + enabled) + DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)", + power_well->desc->name, + power_well->count, enabled); + + domains_count = 0; + for_each_power_domain(domain, power_well->desc->domains) + domains_count += power_domains->domain_use_count[domain]; + + if (power_well->count != domains_count) { + DRM_ERROR("power well %s refcount/domain refcount mismatch " + "(refcount %d/domains refcount %d)\n", + power_well->desc->name, power_well->count, + domains_count); + dump_domain_info = true; + } + } + + if (dump_domain_info) { + static bool dumped; + + if (!dumped) { + intel_power_domains_dump_info(i915); + dumped = true; + } + } + + mutex_unlock(&power_domains->lock); +} + +#else + +static void intel_power_domains_verify_state(struct drm_i915_private *i915) +{ +} + +#endif diff --git a/drivers/gpu/drm/i915/intel_display_power.h b/drivers/gpu/drm/i915/intel_display_power.h new file mode 100644 index 000000000000..95fcbe8e2346 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_display_power.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_DISPLAY_POWER_H__ +#define __INTEL_DISPLAY_POWER_H__ + +#include "intel_display.h" +#include "intel_runtime_pm.h" + +struct drm_i915_private; +struct intel_encoder; + +void skl_enable_dc6(struct drm_i915_private *dev_priv); +void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv); +void bxt_enable_dc9(struct drm_i915_private *dev_priv); +void bxt_disable_dc9(struct drm_i915_private *dev_priv); +void gen9_enable_dc5(struct drm_i915_private *dev_priv); + +int intel_power_domains_init(struct drm_i915_private *dev_priv); +void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); +void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); +void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv); +void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume); +void icl_display_core_uninit(struct drm_i915_private *dev_priv); +void intel_power_domains_enable(struct drm_i915_private *dev_priv); +void intel_power_domains_disable(struct drm_i915_private *dev_priv); +void intel_power_domains_suspend(struct drm_i915_private *dev_priv, + enum i915_drm_suspend_mode); +void intel_power_domains_resume(struct drm_i915_private *dev_priv); +void hsw_enable_pc8(struct drm_i915_private *dev_priv); +void hsw_disable_pc8(struct drm_i915_private *dev_priv); +void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume); +void bxt_display_core_uninit(struct drm_i915_private *dev_priv); + +const char * +intel_display_power_domain_str(enum intel_display_power_domain domain); + +bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); +bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); +intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); +intel_wakeref_t +intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); +void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); +void __intel_display_power_put_async(struct drm_i915_private *i915, + enum intel_display_power_domain domain, + intel_wakeref_t wakeref); +void intel_display_power_flush_work(struct drm_i915_private *i915); +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) +void intel_display_power_put(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain, + intel_wakeref_t wakeref); +static inline void +intel_display_power_put_async(struct drm_i915_private *i915, + enum intel_display_power_domain domain, + intel_wakeref_t wakeref) +{ + __intel_display_power_put_async(i915, domain, wakeref); +} +#else +static inline void +intel_display_power_put(struct drm_i915_private *i915, + enum intel_display_power_domain domain, + intel_wakeref_t wakeref) +{ + intel_display_power_put_unchecked(i915, domain); +} + +static inline void +intel_display_power_put_async(struct drm_i915_private *i915, + enum intel_display_power_domain domain, + intel_wakeref_t wakeref) +{ + __intel_display_power_put_async(i915, domain, -1); +} +#endif + +#define with_intel_display_power(i915, domain, wf) \ + for ((wf) = intel_display_power_get((i915), (domain)); (wf); \ + intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0) + +void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, + u8 req_slices); + +void chv_phy_powergate_lanes(struct intel_encoder *encoder, + bool override, unsigned int mask); +bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, + enum dpio_channel ch, bool override); + +#endif /* __INTEL_DISPLAY_POWER_H__ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 12f5b669f20e..3bdeea596ad5 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -32,16 +32,6 @@ #include #include "i915_drv.h" -#include "i915_irq.h" -#include "intel_cdclk.h" -#include "intel_combo_phy.h" -#include "intel_crt.h" -#include "intel_csr.h" -#include "intel_dp.h" -#include "intel_dpio_phy.h" -#include "intel_drv.h" -#include "intel_hotplug.h" -#include "intel_sideband.h" /** * DOC: runtime pm @@ -60,22 +50,6 @@ * present for a given platform. */ -static intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915); -static void -__intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref, - bool wakelock); - -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) -static void -intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref); -#else -static inline void intel_runtime_pm_put_raw(struct drm_i915_private *i915, - intel_wakeref_t wref) -{ - __intel_runtime_pm_put(i915, -1, false); -} -#endif - #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) #include @@ -391,4727 +365,160 @@ intel_runtime_pm_release(struct drm_i915_private *i915, int wakelock) __intel_wakeref_dec_and_check_tracking(i915); } -bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, - enum i915_power_well_id power_well_id); - -const char * -intel_display_power_domain_str(enum intel_display_power_domain domain) -{ - switch (domain) { - case POWER_DOMAIN_DISPLAY_CORE: - return "DISPLAY_CORE"; - case POWER_DOMAIN_PIPE_A: - return "PIPE_A"; - case POWER_DOMAIN_PIPE_B: - return "PIPE_B"; - case POWER_DOMAIN_PIPE_C: - return "PIPE_C"; - case POWER_DOMAIN_PIPE_A_PANEL_FITTER: - return "PIPE_A_PANEL_FITTER"; - case POWER_DOMAIN_PIPE_B_PANEL_FITTER: - return "PIPE_B_PANEL_FITTER"; - case POWER_DOMAIN_PIPE_C_PANEL_FITTER: - return "PIPE_C_PANEL_FITTER"; - case POWER_DOMAIN_TRANSCODER_A: - return "TRANSCODER_A"; - case POWER_DOMAIN_TRANSCODER_B: - return "TRANSCODER_B"; - case POWER_DOMAIN_TRANSCODER_C: - return "TRANSCODER_C"; - case POWER_DOMAIN_TRANSCODER_EDP: - return "TRANSCODER_EDP"; - case POWER_DOMAIN_TRANSCODER_EDP_VDSC: - return "TRANSCODER_EDP_VDSC"; - case POWER_DOMAIN_TRANSCODER_DSI_A: - return "TRANSCODER_DSI_A"; - case POWER_DOMAIN_TRANSCODER_DSI_C: - return "TRANSCODER_DSI_C"; - case POWER_DOMAIN_PORT_DDI_A_LANES: - return "PORT_DDI_A_LANES"; - case POWER_DOMAIN_PORT_DDI_B_LANES: - return "PORT_DDI_B_LANES"; - case POWER_DOMAIN_PORT_DDI_C_LANES: - return "PORT_DDI_C_LANES"; - case POWER_DOMAIN_PORT_DDI_D_LANES: - return "PORT_DDI_D_LANES"; - case POWER_DOMAIN_PORT_DDI_E_LANES: - return "PORT_DDI_E_LANES"; - case POWER_DOMAIN_PORT_DDI_F_LANES: - return "PORT_DDI_F_LANES"; - case POWER_DOMAIN_PORT_DDI_A_IO: - return "PORT_DDI_A_IO"; - case POWER_DOMAIN_PORT_DDI_B_IO: - return "PORT_DDI_B_IO"; - case POWER_DOMAIN_PORT_DDI_C_IO: - return "PORT_DDI_C_IO"; - case POWER_DOMAIN_PORT_DDI_D_IO: - return "PORT_DDI_D_IO"; - case POWER_DOMAIN_PORT_DDI_E_IO: - return "PORT_DDI_E_IO"; - case POWER_DOMAIN_PORT_DDI_F_IO: - return "PORT_DDI_F_IO"; - case POWER_DOMAIN_PORT_DSI: - return "PORT_DSI"; - case POWER_DOMAIN_PORT_CRT: - return "PORT_CRT"; - case POWER_DOMAIN_PORT_OTHER: - return "PORT_OTHER"; - case POWER_DOMAIN_VGA: - return "VGA"; - case POWER_DOMAIN_AUDIO: - return "AUDIO"; - case POWER_DOMAIN_AUX_A: - return "AUX_A"; - case POWER_DOMAIN_AUX_B: - return "AUX_B"; - case POWER_DOMAIN_AUX_C: - return "AUX_C"; - case POWER_DOMAIN_AUX_D: - return "AUX_D"; - case POWER_DOMAIN_AUX_E: - return "AUX_E"; - case POWER_DOMAIN_AUX_F: - return "AUX_F"; - case POWER_DOMAIN_AUX_IO_A: - return "AUX_IO_A"; - case POWER_DOMAIN_AUX_TBT1: - return "AUX_TBT1"; - case POWER_DOMAIN_AUX_TBT2: - return "AUX_TBT2"; - case POWER_DOMAIN_AUX_TBT3: - return "AUX_TBT3"; - case POWER_DOMAIN_AUX_TBT4: - return "AUX_TBT4"; - case POWER_DOMAIN_GMBUS: - return "GMBUS"; - case POWER_DOMAIN_INIT: - return "INIT"; - case POWER_DOMAIN_MODESET: - return "MODESET"; - case POWER_DOMAIN_GT_IRQ: - return "GT_IRQ"; - default: - MISSING_CASE(domain); - return "?"; - } -} - -static void intel_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name); - power_well->desc->ops->enable(dev_priv, power_well); - power_well->hw_enabled = true; -} - -static void intel_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) +static intel_wakeref_t __intel_runtime_pm_get(struct drm_i915_private *i915, + bool wakelock) { - DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name); - power_well->hw_enabled = false; - power_well->desc->ops->disable(dev_priv, power_well); -} + struct pci_dev *pdev = i915->drm.pdev; + struct device *kdev = &pdev->dev; + int ret; -static void intel_power_well_get(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - if (!power_well->count++) - intel_power_well_enable(dev_priv, power_well); -} + ret = pm_runtime_get_sync(kdev); + WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); -static void intel_power_well_put(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - WARN(!power_well->count, "Use count on power well %s is already zero", - power_well->desc->name); + intel_runtime_pm_acquire(i915, wakelock); - if (!--power_well->count) - intel_power_well_disable(dev_priv, power_well); + return track_intel_runtime_pm_wakeref(i915); } /** - * __intel_display_power_is_enabled - unlocked check for a power domain - * @dev_priv: i915 device instance - * @domain: power domain to check + * intel_runtime_pm_get_raw - grab a raw runtime pm reference + * @i915: i915 device instance * * This is the unlocked version of intel_display_power_is_enabled() and should * only be used from error capture and recovery code where deadlocks are * possible. + * This function grabs a device-level runtime pm reference (mostly used for + * asynchronous PM management from display code) and ensures that it is powered + * up. Raw references are not considered during wakelock assert checks. * * Returns: * True when the power domain is enabled, false otherwise. + * Any runtime pm reference obtained by this function must have a symmetric + * call to intel_runtime_pm_put_raw() to release the reference again. + * + * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates + * as True if the wakeref was acquired, or False otherwise. */ -bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) -{ - struct i915_power_well *power_well; - bool is_enabled; - - if (dev_priv->runtime_pm.suspended) - return false; - - is_enabled = true; - - for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { - if (power_well->desc->always_on) - continue; - - if (!power_well->hw_enabled) { - is_enabled = false; - break; - } - } - return is_enabled; +intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915) +{ + return __intel_runtime_pm_get(i915, false); } /** - * intel_display_power_is_enabled - check for a power domain - * @dev_priv: i915 device instance - * @domain: power domain to check + * intel_runtime_pm_get - grab a runtime pm reference + * @i915: i915 device instance * - * This function can be used to check the hw power domain state. It is mostly - * used in hardware state readout functions. Everywhere else code should rely - * upon explicit power domain reference counting to ensure that the hardware - * block is powered up before accessing it. + * This function grabs a device-level runtime pm reference (mostly used for GEM + * code to ensure the GTT or GT is on) and ensures that it is powered up. * - * Callers must hold the relevant modesetting locks to ensure that concurrent - * threads can't disable the power well while the caller tries to read a few - * registers. + * Any runtime pm reference obtained by this function must have a symmetric + * call to intel_runtime_pm_put() to release the reference again. * - * Returns: - * True when the power domain is enabled, false otherwise. + * Returns: the wakeref cookie to pass to intel_runtime_pm_put() */ -bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) +intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915) { - struct i915_power_domains *power_domains; - bool ret; - - power_domains = &dev_priv->power_domains; - - mutex_lock(&power_domains->lock); - ret = __intel_display_power_is_enabled(dev_priv, domain); - mutex_unlock(&power_domains->lock); - - return ret; + return __intel_runtime_pm_get(i915, true); } -/* - * Starting with Haswell, we have a "Power Down Well" that can be turned off - * when not needed anymore. We have 4 registers that can request the power well - * to be enabled, and it will only be disabled if none of the registers is - * requesting it to be enabled. +/** + * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use + * @i915: i915 device instance + * + * This function grabs a device-level runtime pm reference if the device is + * already in use and ensures that it is powered up. It is illegal to try + * and access the HW should intel_runtime_pm_get_if_in_use() report failure. + * + * Any runtime pm reference obtained by this function must have a symmetric + * call to intel_runtime_pm_put() to release the reference again. + * + * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates + * as True if the wakeref was acquired, or False otherwise. */ -static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, - u8 irq_pipe_mask, bool has_vga) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; - - /* - * After we re-enable the power well, if we touch VGA register 0x3d5 - * we'll get unclaimed register interrupts. This stops after we write - * anything to the VGA MSR register. The vgacon module uses this - * register all the time, so if we unbind our driver and, as a - * consequence, bind vgacon, we'll get stuck in an infinite loop at - * console_unlock(). So make here we touch the VGA MSR register, making - * sure vgacon can keep working normally without triggering interrupts - * and error messages. - */ - if (has_vga) { - vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); - outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); - vga_put(pdev, VGA_RSRC_LEGACY_IO); - } - - if (irq_pipe_mask) - gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); -} - -static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, - u8 irq_pipe_mask) -{ - if (irq_pipe_mask) - gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); -} - - -static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; - int pw_idx = power_well->desc->hsw.idx; - - /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ - WARN_ON(intel_wait_for_register(&dev_priv->uncore, - regs->driver, - HSW_PWR_WELL_CTL_STATE(pw_idx), - HSW_PWR_WELL_CTL_STATE(pw_idx), - 1)); -} - -static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, - const struct i915_power_well_regs *regs, - int pw_idx) -{ - u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); - u32 ret; - - ret = I915_READ(regs->bios) & req_mask ? 1 : 0; - ret |= I915_READ(regs->driver) & req_mask ? 2 : 0; - if (regs->kvmr.reg) - ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0; - ret |= I915_READ(regs->debug) & req_mask ? 8 : 0; - - return ret; -} - -static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; - int pw_idx = power_well->desc->hsw.idx; - bool disabled; - u32 reqs; - - /* - * Bspec doesn't require waiting for PWs to get disabled, but still do - * this for paranoia. The known cases where a PW will be forced on: - * - a KVMR request on any power well via the KVMR request register - * - a DMC request on PW1 and MISC_IO power wells via the BIOS and - * DEBUG request registers - * Skip the wait in case any of the request bits are set and print a - * diagnostic message. - */ - wait_for((disabled = !(I915_READ(regs->driver) & - HSW_PWR_WELL_CTL_STATE(pw_idx))) || - (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); - if (disabled) - return; - - DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", - power_well->desc->name, - !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); -} - -static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, - enum skl_power_gate pg) -{ - /* Timeout 5us for PG#0, for other PGs 1us */ - WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS, - SKL_FUSE_PG_DIST_STATUS(pg), - SKL_FUSE_PG_DIST_STATUS(pg), 1)); -} - -static void hsw_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) +intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) { - const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; - int pw_idx = power_well->desc->hsw.idx; - bool wait_fuses = power_well->desc->hsw.has_fuses; - enum skl_power_gate uninitialized_var(pg); - u32 val; + if (IS_ENABLED(CONFIG_PM)) { + struct pci_dev *pdev = i915->drm.pdev; + struct device *kdev = &pdev->dev; - if (wait_fuses) { - pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : - SKL_PW_CTL_IDX_TO_PG(pw_idx); /* - * For PW1 we have to wait both for the PW0/PG0 fuse state - * before enabling the power well and PW1/PG1's own fuse - * state after the enabling. For all other power wells with - * fuses we only have to wait for that PW/PG's fuse state - * after the enabling. + * In cases runtime PM is disabled by the RPM core and we get + * an -EINVAL return value we are not supposed to call this + * function, since the power state is undefined. This applies + * atm to the late/early system suspend/resume handlers. */ - if (pg == SKL_PG1) - gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); - } - - val = I915_READ(regs->driver); - I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); - hsw_wait_for_power_well_enable(dev_priv, power_well); - - /* Display WA #1178: cnl */ - if (IS_CANNONLAKE(dev_priv) && - pw_idx >= GLK_PW_CTL_IDX_AUX_B && - pw_idx <= CNL_PW_CTL_IDX_AUX_F) { - val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx)); - val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS; - I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val); - } - - if (wait_fuses) - gen9_wait_for_power_well_fuses(dev_priv, pg); - - hsw_power_well_post_enable(dev_priv, - power_well->desc->hsw.irq_pipe_mask, - power_well->desc->hsw.has_vga); -} - -static void hsw_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; - int pw_idx = power_well->desc->hsw.idx; - u32 val; - - hsw_power_well_pre_disable(dev_priv, - power_well->desc->hsw.irq_pipe_mask); - - val = I915_READ(regs->driver); - I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); - hsw_wait_for_power_well_disable(dev_priv, power_well); -} - -#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) - -static void -icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; - int pw_idx = power_well->desc->hsw.idx; - enum port port = ICL_AUX_PW_TO_PORT(pw_idx); - u32 val; - - val = I915_READ(regs->driver); - I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); - - val = I915_READ(ICL_PORT_CL_DW12(port)); - I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX); - - hsw_wait_for_power_well_enable(dev_priv, power_well); - - /* Display WA #1178: icl */ - if (IS_ICELAKE(dev_priv) && - pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && - !intel_bios_is_port_edp(dev_priv, port)) { - val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx)); - val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; - I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val); + if (pm_runtime_get_if_in_use(kdev) <= 0) + return 0; } -} - -static void -icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; - int pw_idx = power_well->desc->hsw.idx; - enum port port = ICL_AUX_PW_TO_PORT(pw_idx); - u32 val; - - val = I915_READ(ICL_PORT_CL_DW12(port)); - I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX); - - val = I915_READ(regs->driver); - I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); - - hsw_wait_for_power_well_disable(dev_priv, power_well); -} - -#define ICL_AUX_PW_TO_CH(pw_idx) \ - ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) - -static void -icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx); - u32 val; - - val = I915_READ(DP_AUX_CH_CTL(aux_ch)); - val &= ~DP_AUX_CH_CTL_TBT_IO; - if (power_well->desc->hsw.is_tc_tbt) - val |= DP_AUX_CH_CTL_TBT_IO; - I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); - - hsw_power_well_enable(dev_priv, power_well); -} - -/* - * We should only use the power well if we explicitly asked the hardware to - * enable it, so check if it's enabled and also check if we've requested it to - * be enabled. - */ -static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; - enum i915_power_well_id id = power_well->desc->id; - int pw_idx = power_well->desc->hsw.idx; - u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | - HSW_PWR_WELL_CTL_STATE(pw_idx); - u32 val; - - val = I915_READ(regs->driver); - - /* - * On GEN9 big core due to a DMC bug the driver's request bits for PW1 - * and the MISC_IO PW will be not restored, so check instead for the - * BIOS's own request bits, which are forced-on for these power wells - * when exiting DC5/6. - */ - if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) && - (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) - val |= I915_READ(regs->bios); - - return (val & mask) == mask; -} - -static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) -{ - WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), - "DC9 already programmed to be enabled.\n"); - WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, - "DC5 still not disabled to enable DC9.\n"); - WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) & - HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), - "Power well 2 on.\n"); - WARN_ONCE(intel_irqs_enabled(dev_priv), - "Interrupts not disabled yet.\n"); - - /* - * TODO: check for the following to verify the conditions to enter DC9 - * state are satisfied: - * 1] Check relevant display engine registers to verify if mode set - * disable sequence was followed. - * 2] Check if display uninitialize sequence is initialized. - */ -} - -static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) -{ - WARN_ONCE(intel_irqs_enabled(dev_priv), - "Interrupts not disabled yet.\n"); - WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, - "DC5 still not disabled.\n"); - - /* - * TODO: check for the following to verify DC9 state was indeed - * entered before programming to disable it: - * 1] Check relevant display engine registers to verify if mode - * set disable sequence was followed. - * 2] Check if display uninitialize sequence is initialized. - */ -} - -static void gen9_write_dc_state(struct drm_i915_private *dev_priv, - u32 state) -{ - int rewrites = 0; - int rereads = 0; - u32 v; - - I915_WRITE(DC_STATE_EN, state); - - /* It has been observed that disabling the dc6 state sometimes - * doesn't stick and dmc keeps returning old value. Make sure - * the write really sticks enough times and also force rewrite until - * we are confident that state is exactly what we want. - */ - do { - v = I915_READ(DC_STATE_EN); - - if (v != state) { - I915_WRITE(DC_STATE_EN, state); - rewrites++; - rereads = 0; - } else if (rereads++ > 5) { - break; - } - - } while (rewrites < 100); - if (v != state) - DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n", - state, v); - - /* Most of the times we need one retry, avoid spam */ - if (rewrites > 1) - DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n", - state, rewrites); -} - -static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) -{ - u32 mask; - - mask = DC_STATE_EN_UPTO_DC5; - if (INTEL_GEN(dev_priv) >= 11) - mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; - else if (IS_GEN9_LP(dev_priv)) - mask |= DC_STATE_EN_DC9; - else - mask |= DC_STATE_EN_UPTO_DC6; - - return mask; -} - -void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) -{ - u32 val; - - val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv); + intel_runtime_pm_acquire(i915, true); - DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n", - dev_priv->csr.dc_state, val); - dev_priv->csr.dc_state = val; + return track_intel_runtime_pm_wakeref(i915); } /** - * gen9_set_dc_state - set target display C power state - * @dev_priv: i915 device instance - * @state: target DC power state - * - DC_STATE_DISABLE - * - DC_STATE_EN_UPTO_DC5 - * - DC_STATE_EN_UPTO_DC6 - * - DC_STATE_EN_DC9 + * intel_runtime_pm_get_noresume - grab a runtime pm reference + * @i915: i915 device instance + * + * This function grabs a device-level runtime pm reference (mostly used for GEM + * code to ensure the GTT or GT is on). + * + * It will _not_ power up the device but instead only check that it's powered + * on. Therefore it is only valid to call this functions from contexts where + * the device is known to be powered up and where trying to power it up would + * result in hilarity and deadlocks. That pretty much means only the system + * suspend/resume code where this is used to grab runtime pm references for + * delayed setup down in work items. * - * Signal to DMC firmware/HW the target DC power state passed in @state. - * DMC/HW can turn off individual display clocks and power rails when entering - * a deeper DC power state (higher in number) and turns these back when exiting - * that state to a shallower power state (lower in number). The HW will decide - * when to actually enter a given state on an on-demand basis, for instance - * depending on the active state of display pipes. The state of display - * registers backed by affected power rails are saved/restored as needed. + * Any runtime pm reference obtained by this function must have a symmetric + * call to intel_runtime_pm_put() to release the reference again. * - * Based on the above enabling a deeper DC power state is asynchronous wrt. - * enabling it. Disabling a deeper power state is synchronous: for instance - * setting %DC_STATE_DISABLE won't complete until all HW resources are turned - * back on and register state is restored. This is guaranteed by the MMIO write - * to DC_STATE_EN blocking until the state is restored. + * Returns: the wakeref cookie to pass to intel_runtime_pm_put() */ -static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) -{ - u32 val; - u32 mask; - - if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) - state &= dev_priv->csr.allowed_dc_mask; - - val = I915_READ(DC_STATE_EN); - mask = gen9_dc_mask(dev_priv); - DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", - val & mask, state); - - /* Check if DMC is ignoring our DC state requests */ - if ((val & mask) != dev_priv->csr.dc_state) - DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n", - dev_priv->csr.dc_state, val & mask); - - val &= ~mask; - val |= state; - - gen9_write_dc_state(dev_priv, val); - - dev_priv->csr.dc_state = val & mask; -} - -void bxt_enable_dc9(struct drm_i915_private *dev_priv) -{ - assert_can_enable_dc9(dev_priv); - - DRM_DEBUG_KMS("Enabling DC9\n"); - /* - * Power sequencer reset is not needed on - * platforms with South Display Engine on PCH, - * because PPS registers are always on. - */ - if (!HAS_PCH_SPLIT(dev_priv)) - intel_power_sequencer_reset(dev_priv); - gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); -} - -void bxt_disable_dc9(struct drm_i915_private *dev_priv) +intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915) { - assert_can_disable_dc9(dev_priv); + struct pci_dev *pdev = i915->drm.pdev; + struct device *kdev = &pdev->dev; - DRM_DEBUG_KMS("Disabling DC9\n"); + assert_rpm_wakelock_held(i915); + pm_runtime_get_noresume(kdev); - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + intel_runtime_pm_acquire(i915, true); - intel_pps_unlock_regs_wa(dev_priv); + return track_intel_runtime_pm_wakeref(i915); } -static void assert_csr_loaded(struct drm_i915_private *dev_priv) +static void __intel_runtime_pm_put(struct drm_i915_private *i915, + intel_wakeref_t wref, + bool wakelock) { - WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), - "CSR program storage start is NULL\n"); - WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); - WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); -} + struct pci_dev *pdev = i915->drm.pdev; + struct device *kdev = &pdev->dev; -static struct i915_power_well * -lookup_power_well(struct drm_i915_private *dev_priv, - enum i915_power_well_id power_well_id) -{ - struct i915_power_well *power_well; + untrack_intel_runtime_pm_wakeref(i915, wref); - for_each_power_well(dev_priv, power_well) - if (power_well->desc->id == power_well_id) - return power_well; + intel_runtime_pm_release(i915, wakelock); - /* - * It's not feasible to add error checking code to the callers since - * this condition really shouldn't happen and it doesn't even make sense - * to abort things like display initialization sequences. Just return - * the first power well and hope the WARN gets reported so we can fix - * our driver. - */ - WARN(1, "Power well %d not defined for this platform\n", power_well_id); - return &dev_priv->power_domains.power_wells[0]; + pm_runtime_mark_last_busy(kdev); + pm_runtime_put_autosuspend(kdev); } -static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) +/** + * intel_runtime_pm_put_raw - release a raw runtime pm reference + * @i915: i915 device instance + * @wref: wakeref acquired for the reference that is being released + * + * This function drops the device-level runtime pm reference obtained by + * intel_runtime_pm_get_raw() and might power down the corresponding + * hardware block right away if this is the last reference. + */ +void +intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref) { - bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, - SKL_DISP_PW_2); - - WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); - - WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), - "DC5 already programmed to be enabled.\n"); - assert_rpm_wakelock_held(dev_priv); - - assert_csr_loaded(dev_priv); + __intel_runtime_pm_put(i915, wref, false); } -void gen9_enable_dc5(struct drm_i915_private *dev_priv) -{ - assert_can_enable_dc5(dev_priv); - - DRM_DEBUG_KMS("Enabling DC5\n"); - - /* Wa Display #1183: skl,kbl,cfl */ - if (IS_GEN9_BC(dev_priv)) - I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | - SKL_SELECT_ALTERNATE_DC_EXIT); - - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); -} - -static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) -{ - WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, - "Backlight is not disabled.\n"); - WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), - "DC6 already programmed to be enabled.\n"); - - assert_csr_loaded(dev_priv); -} - -void skl_enable_dc6(struct drm_i915_private *dev_priv) -{ - assert_can_enable_dc6(dev_priv); - - DRM_DEBUG_KMS("Enabling DC6\n"); - - /* Wa Display #1183: skl,kbl,cfl */ - if (IS_GEN9_BC(dev_priv)) - I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | - SKL_SELECT_ALTERNATE_DC_EXIT); - - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); -} - -static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; - int pw_idx = power_well->desc->hsw.idx; - u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); - u32 bios_req = I915_READ(regs->bios); - - /* Take over the request bit if set by BIOS. */ - if (bios_req & mask) { - u32 drv_req = I915_READ(regs->driver); - - if (!(drv_req & mask)) - I915_WRITE(regs->driver, drv_req | mask); - I915_WRITE(regs->bios, bios_req & ~mask); - } -} - -static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); -} - -static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); -} - -static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); -} - -static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) -{ - struct i915_power_well *power_well; - - power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); - if (power_well->count > 0) - bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); - - power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); - if (power_well->count > 0) - bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); - - if (IS_GEMINILAKE(dev_priv)) { - power_well = lookup_power_well(dev_priv, - GLK_DISP_PW_DPIO_CMN_C); - if (power_well->count > 0) - bxt_ddi_phy_verify_state(dev_priv, - power_well->desc->bxt.phy); - } -} - -static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; -} - -static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) -{ - u32 tmp = I915_READ(DBUF_CTL); - - WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != - (DBUF_POWER_STATE | DBUF_POWER_REQUEST), - "Unexpected DBuf power power state (0x%08x)\n", tmp); -} - -static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - struct intel_cdclk_state cdclk_state = {}; - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - - dev_priv->display.get_cdclk(dev_priv, &cdclk_state); - /* Can't read out voltage_level so can't use intel_cdclk_changed() */ - WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state)); - - gen9_assert_dbuf_enabled(dev_priv); - - if (IS_GEN9_LP(dev_priv)) - bxt_verify_ddi_phy_power_wells(dev_priv); - - if (INTEL_GEN(dev_priv) >= 11) - /* - * DMC retains HW context only for port A, the other combo - * PHY's HW context for port B is lost after DC transitions, - * so we need to restore it manually. - */ - intel_combo_phy_init(dev_priv); -} - -static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - if (!dev_priv->csr.dmc_payload) - return; - - if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6) - skl_enable_dc6(dev_priv); - else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) - gen9_enable_dc5(dev_priv); -} - -static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ -} - -static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ -} - -static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - return true; -} - -static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) - i830_enable_pipe(dev_priv, PIPE_A); - if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) - i830_enable_pipe(dev_priv, PIPE_B); -} - -static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - i830_disable_pipe(dev_priv, PIPE_B); - i830_disable_pipe(dev_priv, PIPE_A); -} - -static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE && - I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE; -} - -static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - if (power_well->count > 0) - i830_pipes_power_well_enable(dev_priv, power_well); - else - i830_pipes_power_well_disable(dev_priv, power_well); -} - -static void vlv_set_power_well(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, bool enable) -{ - int pw_idx = power_well->desc->vlv.idx; - u32 mask; - u32 state; - u32 ctrl; - - mask = PUNIT_PWRGT_MASK(pw_idx); - state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : - PUNIT_PWRGT_PWR_GATE(pw_idx); - - vlv_punit_get(dev_priv); - -#define COND \ - ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) - - if (COND) - goto out; - - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); - ctrl &= ~mask; - ctrl |= state; - vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); - - if (wait_for(COND, 100)) - DRM_ERROR("timeout setting power well state %08x (%08x)\n", - state, - vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); - -#undef COND - -out: - vlv_punit_put(dev_priv); -} - -static void vlv_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_set_power_well(dev_priv, power_well, true); -} - -static void vlv_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_set_power_well(dev_priv, power_well, false); -} - -static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - int pw_idx = power_well->desc->vlv.idx; - bool enabled = false; - u32 mask; - u32 state; - u32 ctrl; - - mask = PUNIT_PWRGT_MASK(pw_idx); - ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); - - vlv_punit_get(dev_priv); - - state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; - /* - * We only ever set the power-on and power-gate states, anything - * else is unexpected. - */ - WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) && - state != PUNIT_PWRGT_PWR_GATE(pw_idx)); - if (state == ctrl) - enabled = true; - - /* - * A transient state at this point would mean some unexpected party - * is poking at the power controls too. - */ - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; - WARN_ON(ctrl != state); - - vlv_punit_put(dev_priv); - - return enabled; -} - -static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) -{ - u32 val; - - /* - * On driver load, a pipe may be active and driving a DSI display. - * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck - * (and never recovering) in this case. intel_dsi_post_disable() will - * clear it when we turn off the display. - */ - val = I915_READ(DSPCLK_GATE_D); - val &= DPOUNIT_CLOCK_GATE_DISABLE; - val |= VRHUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(DSPCLK_GATE_D, val); - - /* - * Disable trickle feed and enable pnd deadline calculation - */ - I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); - I915_WRITE(CBR1_VLV, 0); - - WARN_ON(dev_priv->rawclk_freq == 0); - - I915_WRITE(RAWCLK_FREQ_VLV, - DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); -} - -static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) -{ - struct intel_encoder *encoder; - enum pipe pipe; - - /* - * Enable the CRI clock source so we can get at the - * display and the reference clock for VGA - * hotplug / manual detection. Supposedly DSI also - * needs the ref clock up and running. - * - * CHV DPLL B/C have some issues if VGA mode is enabled. - */ - for_each_pipe(dev_priv, pipe) { - u32 val = I915_READ(DPLL(pipe)); - - val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; - if (pipe != PIPE_A) - val |= DPLL_INTEGRATED_CRI_CLK_VLV; - - I915_WRITE(DPLL(pipe), val); - } - - vlv_init_display_clock_gating(dev_priv); - - spin_lock_irq(&dev_priv->irq_lock); - valleyview_enable_display_irqs(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); - - /* - * During driver initialization/resume we can avoid restoring the - * part of the HW/SW state that will be inited anyway explicitly. - */ - if (dev_priv->power_domains.initializing) - return; - - intel_hpd_init(dev_priv); - - /* Re-enable the ADPA, if we have one */ - for_each_intel_encoder(&dev_priv->drm, encoder) { - if (encoder->type == INTEL_OUTPUT_ANALOG) - intel_crt_reset(&encoder->base); - } - - i915_redisable_vga_power_on(dev_priv); - - intel_pps_unlock_regs_wa(dev_priv); -} - -static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) -{ - spin_lock_irq(&dev_priv->irq_lock); - valleyview_disable_display_irqs(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); - - /* make sure we're done processing display irqs */ - synchronize_irq(dev_priv->drm.irq); - - intel_power_sequencer_reset(dev_priv); - - /* Prevent us from re-enabling polling on accident in late suspend */ - if (!dev_priv->drm.dev->power.is_suspended) - intel_hpd_poll_init(dev_priv); -} - -static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_set_power_well(dev_priv, power_well, true); - - vlv_display_power_well_init(dev_priv); -} - -static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_display_power_well_deinit(dev_priv); - - vlv_set_power_well(dev_priv, power_well, false); -} - -static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - /* since ref/cri clock was enabled */ - udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ - - vlv_set_power_well(dev_priv, power_well, true); - - /* - * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - - * 6. De-assert cmn_reset/side_reset. Same as VLV X0. - * a. GUnit 0x2110 bit[0] set to 1 (def 0) - * b. The other bits such as sfr settings / modesel may all - * be set to 0. - * - * This should only be done on init and resume from S3 with - * both PLLs disabled, or we risk losing DPIO and PLL - * synchronization. - */ - I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); -} - -static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) - assert_pll_disabled(dev_priv, pipe); - - /* Assert common reset */ - I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); - - vlv_set_power_well(dev_priv, power_well, false); -} - -#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) - -#define BITS_SET(val, bits) (((val) & (bits)) == (bits)) - -static void assert_chv_phy_status(struct drm_i915_private *dev_priv) -{ - struct i915_power_well *cmn_bc = - lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); - struct i915_power_well *cmn_d = - lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); - u32 phy_control = dev_priv->chv_phy_control; - u32 phy_status = 0; - u32 phy_status_mask = 0xffffffff; - - /* - * The BIOS can leave the PHY is some weird state - * where it doesn't fully power down some parts. - * Disable the asserts until the PHY has been fully - * reset (ie. the power well has been disabled at - * least once). - */ - if (!dev_priv->chv_phy_assert[DPIO_PHY0]) - phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | - PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); - - if (!dev_priv->chv_phy_assert[DPIO_PHY1]) - phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | - PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); - - if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { - phy_status |= PHY_POWERGOOD(DPIO_PHY0); - - /* this assumes override is only used to enable lanes */ - if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) - phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); - - if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) - phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); - - /* CL1 is on whenever anything is on in either channel */ - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | - PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) - phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); - - /* - * The DPLLB check accounts for the pipe B + port A usage - * with CL2 powered up but all the lanes in the second channel - * powered down. - */ - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && - (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) - phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); - - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); - - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); - } - - if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { - phy_status |= PHY_POWERGOOD(DPIO_PHY1); - - /* this assumes override is only used to enable lanes */ - if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) - phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); - - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) - phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); - - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); - if (BITS_SET(phy_control, - PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) - phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); - } - - phy_status &= phy_status_mask; - - /* - * The PHY may be busy with some initial calibration and whatnot, - * so the power state can take a while to actually change. - */ - if (intel_wait_for_register(&dev_priv->uncore, - DISPLAY_PHY_STATUS, - phy_status_mask, - phy_status, - 10)) - DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", - I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask, - phy_status, dev_priv->chv_phy_control); -} - -#undef BITS_SET - -static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum dpio_phy phy; - enum pipe pipe; - u32 tmp; - - WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && - power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); - - if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { - pipe = PIPE_A; - phy = DPIO_PHY0; - } else { - pipe = PIPE_C; - phy = DPIO_PHY1; - } - - /* since ref/cri clock was enabled */ - udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ - vlv_set_power_well(dev_priv, power_well, true); - - /* Poll for phypwrgood signal */ - if (intel_wait_for_register(&dev_priv->uncore, - DISPLAY_PHY_STATUS, - PHY_POWERGOOD(phy), - PHY_POWERGOOD(phy), - 1)) - DRM_ERROR("Display PHY %d is not power up\n", phy); - - vlv_dpio_get(dev_priv); - - /* Enable dynamic power down */ - tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); - tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | - DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); - - if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { - tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); - tmp |= DPIO_DYNPWRDOWNEN_CH1; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); - } else { - /* - * Force the non-existing CL2 off. BXT does this - * too, so maybe it saves some power even though - * CL2 doesn't exist? - */ - tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); - tmp |= DPIO_CL2_LDOFUSE_PWRENB; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); - } - - vlv_dpio_put(dev_priv); - - dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); - I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); - - DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->chv_phy_control); - - assert_chv_phy_status(dev_priv); -} - -static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum dpio_phy phy; - - WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && - power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); - - if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { - phy = DPIO_PHY0; - assert_pll_disabled(dev_priv, PIPE_A); - assert_pll_disabled(dev_priv, PIPE_B); - } else { - phy = DPIO_PHY1; - assert_pll_disabled(dev_priv, PIPE_C); - } - - dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); - I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); - - vlv_set_power_well(dev_priv, power_well, false); - - DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->chv_phy_control); - - /* PHY is fully reset now, so we can enable the PHY state asserts */ - dev_priv->chv_phy_assert[phy] = true; - - assert_chv_phy_status(dev_priv); -} - -static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, - enum dpio_channel ch, bool override, unsigned int mask) -{ - enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; - u32 reg, val, expected, actual; - - /* - * The BIOS can leave the PHY is some weird state - * where it doesn't fully power down some parts. - * Disable the asserts until the PHY has been fully - * reset (ie. the power well has been disabled at - * least once). - */ - if (!dev_priv->chv_phy_assert[phy]) - return; - - if (ch == DPIO_CH0) - reg = _CHV_CMN_DW0_CH0; - else - reg = _CHV_CMN_DW6_CH1; - - vlv_dpio_get(dev_priv); - val = vlv_dpio_read(dev_priv, pipe, reg); - vlv_dpio_put(dev_priv); - - /* - * This assumes !override is only used when the port is disabled. - * All lanes should power down even without the override when - * the port is disabled. - */ - if (!override || mask == 0xf) { - expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; - /* - * If CH1 common lane is not active anymore - * (eg. for pipe B DPLL) the entire channel will - * shut down, which causes the common lane registers - * to read as 0. That means we can't actually check - * the lane power down status bits, but as the entire - * register reads as 0 it's a good indication that the - * channel is indeed entirely powered down. - */ - if (ch == DPIO_CH1 && val == 0) - expected = 0; - } else if (mask != 0x0) { - expected = DPIO_ANYDL_POWERDOWN; - } else { - expected = 0; - } - - if (ch == DPIO_CH0) - actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; - else - actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; - actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; - - WARN(actual != expected, - "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", - !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), - !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), - reg, val); -} - -bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, - enum dpio_channel ch, bool override) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - bool was_override; - - mutex_lock(&power_domains->lock); - - was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - - if (override == was_override) - goto out; - - if (override) - dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - else - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - - I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); - - DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", - phy, ch, dev_priv->chv_phy_control); - - assert_chv_phy_status(dev_priv); - -out: - mutex_unlock(&power_domains->lock); - - return was_override; -} - -void chv_phy_powergate_lanes(struct intel_encoder *encoder, - bool override, unsigned int mask) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_power_domains *power_domains = &dev_priv->power_domains; - enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(&encoder->base)); - enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); - - mutex_lock(&power_domains->lock); - - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); - dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); - - if (override) - dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - else - dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - - I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); - - DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", - phy, ch, mask, dev_priv->chv_phy_control); - - assert_chv_phy_status(dev_priv); - - assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); - - mutex_unlock(&power_domains->lock); -} - -static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - enum pipe pipe = PIPE_A; - bool enabled; - u32 state, ctrl; - - vlv_punit_get(dev_priv); - - state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); - /* - * We only ever set the power-on and power-gate states, anything - * else is unexpected. - */ - WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); - enabled = state == DP_SSS_PWR_ON(pipe); - - /* - * A transient state at this point would mean some unexpected party - * is poking at the power controls too. - */ - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); - WARN_ON(ctrl << 16 != state); - - vlv_punit_put(dev_priv); - - return enabled; -} - -static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well, - bool enable) -{ - enum pipe pipe = PIPE_A; - u32 state; - u32 ctrl; - - state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); - - vlv_punit_get(dev_priv); - -#define COND \ - ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) - - if (COND) - goto out; - - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); - ctrl &= ~DP_SSC_MASK(pipe); - ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); - vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); - - if (wait_for(COND, 100)) - DRM_ERROR("timeout setting power well state %08x (%08x)\n", - state, - vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); - -#undef COND - -out: - vlv_punit_put(dev_priv); -} - -static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - chv_set_pipe_power_well(dev_priv, power_well, true); - - vlv_display_power_well_init(dev_priv); -} - -static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - vlv_display_power_well_deinit(dev_priv); - - chv_set_pipe_power_well(dev_priv, power_well, false); -} - -static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) -{ - return power_domains->async_put_domains[0] | - power_domains->async_put_domains[1]; -} - -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) - -static bool -assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) -{ - return !WARN_ON(power_domains->async_put_domains[0] & - power_domains->async_put_domains[1]); -} - -static bool -__async_put_domains_state_ok(struct i915_power_domains *power_domains) -{ - enum intel_display_power_domain domain; - bool err = false; - - err |= !assert_async_put_domain_masks_disjoint(power_domains); - err |= WARN_ON(!!power_domains->async_put_wakeref != - !!__async_put_domains_mask(power_domains)); - - for_each_power_domain(domain, __async_put_domains_mask(power_domains)) - err |= WARN_ON(power_domains->domain_use_count[domain] != 1); - - return !err; -} - -static void print_power_domains(struct i915_power_domains *power_domains, - const char *prefix, u64 mask) -{ - enum intel_display_power_domain domain; - - DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask)); - for_each_power_domain(domain, mask) - DRM_DEBUG_DRIVER("%s use_count %d\n", - intel_display_power_domain_str(domain), - power_domains->domain_use_count[domain]); -} - -static void -print_async_put_domains_state(struct i915_power_domains *power_domains) -{ - DRM_DEBUG_DRIVER("async_put_wakeref %u\n", - power_domains->async_put_wakeref); - - print_power_domains(power_domains, "async_put_domains[0]", - power_domains->async_put_domains[0]); - print_power_domains(power_domains, "async_put_domains[1]", - power_domains->async_put_domains[1]); -} - -static void -verify_async_put_domains_state(struct i915_power_domains *power_domains) -{ - if (!__async_put_domains_state_ok(power_domains)) - print_async_put_domains_state(power_domains); -} - -#else - -static void -assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) -{ -} - -static void -verify_async_put_domains_state(struct i915_power_domains *power_domains) -{ -} - -#endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ - -static u64 async_put_domains_mask(struct i915_power_domains *power_domains) -{ - assert_async_put_domain_masks_disjoint(power_domains); - - return __async_put_domains_mask(power_domains); -} - -static void -async_put_domains_clear_domain(struct i915_power_domains *power_domains, - enum intel_display_power_domain domain) -{ - assert_async_put_domain_masks_disjoint(power_domains); - - power_domains->async_put_domains[0] &= ~BIT_ULL(domain); - power_domains->async_put_domains[1] &= ~BIT_ULL(domain); -} - -static bool -intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - bool ret = false; - - if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) - goto out_verify; - - async_put_domains_clear_domain(power_domains, domain); - - ret = true; - - if (async_put_domains_mask(power_domains)) - goto out_verify; - - cancel_delayed_work(&power_domains->async_put_work); - intel_runtime_pm_put_raw(dev_priv, - fetch_and_zero(&power_domains->async_put_wakeref)); -out_verify: - verify_async_put_domains_state(power_domains); - - return ret; -} - -static void -__intel_display_power_get_domain(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - struct i915_power_well *power_well; - - if (intel_display_power_grab_async_put_ref(dev_priv, domain)) - return; - - for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) - intel_power_well_get(dev_priv, power_well); - - power_domains->domain_use_count[domain]++; -} - -/** - * intel_display_power_get - grab a power domain reference - * @dev_priv: i915 device instance - * @domain: power domain to reference - * - * This function grabs a power domain reference for @domain and ensures that the - * power domain and all its parents are powered up. Therefore users should only - * grab a reference to the innermost power domain they need. - * - * Any power domain reference obtained by this function must have a symmetric - * call to intel_display_power_put() to release the reference again. - */ -intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv); - - mutex_lock(&power_domains->lock); - __intel_display_power_get_domain(dev_priv, domain); - mutex_unlock(&power_domains->lock); - - return wakeref; -} - -/** - * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain - * @dev_priv: i915 device instance - * @domain: power domain to reference - * - * This function grabs a power domain reference for @domain and ensures that the - * power domain and all its parents are powered up. Therefore users should only - * grab a reference to the innermost power domain they need. - * - * Any power domain reference obtained by this function must have a symmetric - * call to intel_display_power_put() to release the reference again. - */ -intel_wakeref_t -intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - intel_wakeref_t wakeref; - bool is_enabled; - - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); - if (!wakeref) - return false; - - mutex_lock(&power_domains->lock); - - if (__intel_display_power_is_enabled(dev_priv, domain)) { - __intel_display_power_get_domain(dev_priv, domain); - is_enabled = true; - } else { - is_enabled = false; - } - - mutex_unlock(&power_domains->lock); - - if (!is_enabled) { - intel_runtime_pm_put(dev_priv, wakeref); - wakeref = 0; - } - - return wakeref; -} - -static void -__intel_display_power_put_domain(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) -{ - struct i915_power_domains *power_domains; - struct i915_power_well *power_well; - const char *name = intel_display_power_domain_str(domain); - - power_domains = &dev_priv->power_domains; - - WARN(!power_domains->domain_use_count[domain], - "Use count on domain %s is already zero\n", - name); - WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain), - "Async disabling of domain %s is pending\n", - name); - - power_domains->domain_use_count[domain]--; - - for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) - intel_power_well_put(dev_priv, power_well); -} - -static void __intel_display_power_put(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - - mutex_lock(&power_domains->lock); - __intel_display_power_put_domain(dev_priv, domain); - mutex_unlock(&power_domains->lock); -} - -/** - * intel_display_power_put_unchecked - release an unchecked power domain reference - * @dev_priv: i915 device instance - * @domain: power domain to reference - * - * This function drops the power domain reference obtained by - * intel_display_power_get() and might power down the corresponding hardware - * block right away if this is the last reference. - * - * This function exists only for historical reasons and should be avoided in - * new code, as the correctness of its use cannot be checked. Always use - * intel_display_power_put() instead. - */ -void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) -{ - __intel_display_power_put(dev_priv, domain); - intel_runtime_pm_put_unchecked(dev_priv); -} - -static void -queue_async_put_domains_work(struct i915_power_domains *power_domains, - intel_wakeref_t wakeref) -{ - WARN_ON(power_domains->async_put_wakeref); - power_domains->async_put_wakeref = wakeref; - WARN_ON(!queue_delayed_work(system_unbound_wq, - &power_domains->async_put_work, - msecs_to_jiffies(100))); -} - -static void -release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) -{ - struct drm_i915_private *dev_priv = - container_of(power_domains, struct drm_i915_private, - power_domains); - enum intel_display_power_domain domain; - intel_wakeref_t wakeref; - - /* - * The caller must hold already raw wakeref, upgrade that to a proper - * wakeref to make the state checker happy about the HW access during - * power well disabling. - */ - assert_rpm_raw_wakeref_held(dev_priv); - wakeref = intel_runtime_pm_get(dev_priv); - - for_each_power_domain(domain, mask) { - /* Clear before put, so put's sanity check is happy. */ - async_put_domains_clear_domain(power_domains, domain); - __intel_display_power_put_domain(dev_priv, domain); - } - - intel_runtime_pm_put(dev_priv, wakeref); -} - -static void -intel_display_power_put_async_work(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, - power_domains.async_put_work.work); - struct i915_power_domains *power_domains = &dev_priv->power_domains; - intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(dev_priv); - intel_wakeref_t old_work_wakeref = 0; - - mutex_lock(&power_domains->lock); - - /* - * Bail out if all the domain refs pending to be released were grabbed - * by subsequent gets or a flush_work. - */ - old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); - if (!old_work_wakeref) - goto out_verify; - - release_async_put_domains(power_domains, - power_domains->async_put_domains[0]); - - /* Requeue the work if more domains were async put meanwhile. */ - if (power_domains->async_put_domains[1]) { - power_domains->async_put_domains[0] = - fetch_and_zero(&power_domains->async_put_domains[1]); - queue_async_put_domains_work(power_domains, - fetch_and_zero(&new_work_wakeref)); - } - -out_verify: - verify_async_put_domains_state(power_domains); - - mutex_unlock(&power_domains->lock); - - if (old_work_wakeref) - intel_runtime_pm_put_raw(dev_priv, old_work_wakeref); - if (new_work_wakeref) - intel_runtime_pm_put_raw(dev_priv, new_work_wakeref); -} - -/** - * intel_display_power_put_async - release a power domain reference asynchronously - * @i915: i915 device instance - * @domain: power domain to reference - * @wakeref: wakeref acquired for the reference that is being released - * - * This function drops the power domain reference obtained by - * intel_display_power_get*() and schedules a work to power down the - * corresponding hardware block if this is the last reference. - */ -void __intel_display_power_put_async(struct drm_i915_private *i915, - enum intel_display_power_domain domain, - intel_wakeref_t wakeref) -{ - struct i915_power_domains *power_domains = &i915->power_domains; - intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(i915); - - mutex_lock(&power_domains->lock); - - if (power_domains->domain_use_count[domain] > 1) { - __intel_display_power_put_domain(i915, domain); - - goto out_verify; - } - - WARN_ON(power_domains->domain_use_count[domain] != 1); - - /* Let a pending work requeue itself or queue a new one. */ - if (power_domains->async_put_wakeref) { - power_domains->async_put_domains[1] |= BIT_ULL(domain); - } else { - power_domains->async_put_domains[0] |= BIT_ULL(domain); - queue_async_put_domains_work(power_domains, - fetch_and_zero(&work_wakeref)); - } - -out_verify: - verify_async_put_domains_state(power_domains); - - mutex_unlock(&power_domains->lock); - - if (work_wakeref) - intel_runtime_pm_put_raw(i915, work_wakeref); - - intel_runtime_pm_put(i915, wakeref); -} - -/** - * intel_display_power_flush_work - flushes the async display power disabling work - * @i915: i915 device instance - * - * Flushes any pending work that was scheduled by a preceding - * intel_display_power_put_async() call, completing the disabling of the - * corresponding power domains. - * - * Note that the work handler function may still be running after this - * function returns; to ensure that the work handler isn't running use - * intel_display_power_flush_work_sync() instead. - */ -void intel_display_power_flush_work(struct drm_i915_private *i915) -{ - struct i915_power_domains *power_domains = &i915->power_domains; - intel_wakeref_t work_wakeref; - - mutex_lock(&power_domains->lock); - - work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); - if (!work_wakeref) - goto out_verify; - - release_async_put_domains(power_domains, - async_put_domains_mask(power_domains)); - cancel_delayed_work(&power_domains->async_put_work); - -out_verify: - verify_async_put_domains_state(power_domains); - - mutex_unlock(&power_domains->lock); - - if (work_wakeref) - intel_runtime_pm_put_raw(i915, work_wakeref); -} - -/** - * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work - * @i915: i915 device instance - * - * Like intel_display_power_flush_work(), but also ensure that the work - * handler function is not running any more when this function returns. - */ -static void -intel_display_power_flush_work_sync(struct drm_i915_private *i915) -{ - struct i915_power_domains *power_domains = &i915->power_domains; - - intel_display_power_flush_work(i915); - cancel_delayed_work_sync(&power_domains->async_put_work); - - verify_async_put_domains_state(power_domains); - - WARN_ON(power_domains->async_put_wakeref); -} - -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) -/** - * intel_display_power_put - release a power domain reference - * @dev_priv: i915 device instance - * @domain: power domain to reference - * @wakeref: wakeref acquired for the reference that is being released - * - * This function drops the power domain reference obtained by - * intel_display_power_get() and might power down the corresponding hardware - * block right away if this is the last reference. - */ -void intel_display_power_put(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain, - intel_wakeref_t wakeref) -{ - __intel_display_power_put(dev_priv, domain); - intel_runtime_pm_put(dev_priv, wakeref); -} -#endif - -#define I830_PIPES_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define CHV_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define HSW_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define BDW_DISPLAY_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_AUDIO) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUDIO) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUDIO) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) -#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) -#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) -#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_GMBUS) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_AUX_F) | \ - BIT_ULL(POWER_DOMAIN_AUDIO) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_F) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) -#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -/* - * ICL PW_0/PG_0 domains (HW/DMC control): - * - PCI - * - clocks except port PLL - * - central power except FBC - * - shared functions except pipe interrupts, pipe MBUS, DBUF registers - * ICL PW_1/PG_1 domains (HW/DMC control): - * - DBUF function - * - PIPE_A and its planes, except VGA - * - transcoder EDP + PSR - * - transcoder DSI - * - DDI_A - * - FBC - */ -#define ICL_PW_4_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PIPE_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - /* VDSC/joining */ -#define ICL_PW_3_POWER_DOMAINS ( \ - ICL_PW_4_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_PIPE_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ - BIT_ULL(POWER_DOMAIN_AUX_B) | \ - BIT_ULL(POWER_DOMAIN_AUX_C) | \ - BIT_ULL(POWER_DOMAIN_AUX_D) | \ - BIT_ULL(POWER_DOMAIN_AUX_E) | \ - BIT_ULL(POWER_DOMAIN_AUX_F) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ - BIT_ULL(POWER_DOMAIN_VGA) | \ - BIT_ULL(POWER_DOMAIN_AUDIO) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - /* - * - transcoder WD - * - KVMR (HW control) - */ -#define ICL_PW_2_POWER_DOMAINS ( \ - ICL_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - /* - * - KVMR (HW control) - */ -#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - ICL_PW_2_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_MODESET) | \ - BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_INIT)) - -#define ICL_DDI_IO_A_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) -#define ICL_DDI_IO_B_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) -#define ICL_DDI_IO_C_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) -#define ICL_DDI_IO_D_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) -#define ICL_DDI_IO_E_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) -#define ICL_DDI_IO_F_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) - -#define ICL_AUX_A_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ - BIT_ULL(POWER_DOMAIN_AUX_A)) -#define ICL_AUX_B_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_B)) -#define ICL_AUX_C_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_C)) -#define ICL_AUX_D_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_D)) -#define ICL_AUX_E_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_E)) -#define ICL_AUX_F_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_F)) -#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1)) -#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2)) -#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3)) -#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4)) - -static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = i9xx_always_on_power_well_noop, - .disable = i9xx_always_on_power_well_noop, - .is_enabled = i9xx_always_on_power_well_enabled, -}; - -static const struct i915_power_well_ops chv_pipe_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = chv_pipe_power_well_enable, - .disable = chv_pipe_power_well_disable, - .is_enabled = chv_pipe_power_well_enabled, -}; - -static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = chv_dpio_cmn_power_well_enable, - .disable = chv_dpio_cmn_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - -static const struct i915_power_well_desc i9xx_always_on_power_well[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, -}; - -static const struct i915_power_well_ops i830_pipes_power_well_ops = { - .sync_hw = i830_pipes_power_well_sync_hw, - .enable = i830_pipes_power_well_enable, - .disable = i830_pipes_power_well_disable, - .is_enabled = i830_pipes_power_well_enabled, -}; - -static const struct i915_power_well_desc i830_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "pipes", - .domains = I830_PIPES_POWER_DOMAINS, - .ops = &i830_pipes_power_well_ops, - .id = DISP_PW_ID_NONE, - }, -}; - -static const struct i915_power_well_ops hsw_power_well_ops = { - .sync_hw = hsw_power_well_sync_hw, - .enable = hsw_power_well_enable, - .disable = hsw_power_well_disable, - .is_enabled = hsw_power_well_enabled, -}; - -static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = gen9_dc_off_power_well_enable, - .disable = gen9_dc_off_power_well_disable, - .is_enabled = gen9_dc_off_power_well_enabled, -}; - -static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = bxt_dpio_cmn_power_well_enable, - .disable = bxt_dpio_cmn_power_well_disable, - .is_enabled = bxt_dpio_cmn_power_well_enabled, -}; - -static const struct i915_power_well_regs hsw_power_well_regs = { - .bios = HSW_PWR_WELL_CTL1, - .driver = HSW_PWR_WELL_CTL2, - .kvmr = HSW_PWR_WELL_CTL3, - .debug = HSW_PWR_WELL_CTL4, -}; - -static const struct i915_power_well_desc hsw_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "display", - .domains = HSW_DISPLAY_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = HSW_DISP_PW_GLOBAL, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, - .hsw.has_vga = true, - }, - }, -}; - -static const struct i915_power_well_desc bdw_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "display", - .domains = BDW_DISPLAY_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = HSW_DISP_PW_GLOBAL, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - }, - }, -}; - -static const struct i915_power_well_ops vlv_display_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = vlv_display_power_well_enable, - .disable = vlv_display_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - -static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = vlv_dpio_cmn_power_well_enable, - .disable = vlv_dpio_cmn_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - -static const struct i915_power_well_ops vlv_dpio_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, - .enable = vlv_power_well_enable, - .disable = vlv_power_well_disable, - .is_enabled = vlv_power_well_enabled, -}; - -static const struct i915_power_well_desc vlv_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "display", - .domains = VLV_DISPLAY_POWER_DOMAINS, - .ops = &vlv_display_power_well_ops, - .id = VLV_DISP_PW_DISP2D, - { - .vlv.idx = PUNIT_PWGT_IDX_DISP2D, - }, - }, - { - .name = "dpio-tx-b-01", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, - }, - }, - { - .name = "dpio-tx-b-23", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, - }, - }, - { - .name = "dpio-tx-c-01", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, - }, - }, - { - .name = "dpio-tx-c-23", - .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | - VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, - .ops = &vlv_dpio_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, - }, - }, - { - .name = "dpio-common", - .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, - .ops = &vlv_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, - }, - }, -}; - -static const struct i915_power_well_desc chv_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "display", - /* - * Pipe A power well is the new disp2d well. Pipe B and C - * power wells don't actually exist. Pipe A power well is - * required for any pipe to work. - */ - .domains = CHV_DISPLAY_POWER_DOMAINS, - .ops = &chv_pipe_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "dpio-common-bc", - .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, - .ops = &chv_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, - }, - }, - { - .name = "dpio-common-d", - .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, - .ops = &chv_dpio_cmn_power_well_ops, - .id = CHV_DISP_PW_DPIO_CMN_D, - { - .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, - }, - }, -}; - -bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, - enum i915_power_well_id power_well_id) -{ - struct i915_power_well *power_well; - bool ret; - - power_well = lookup_power_well(dev_priv, power_well_id); - ret = power_well->desc->ops->is_enabled(dev_priv, power_well); - - return ret; -} - -static const struct i915_power_well_desc skl_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "MISC IO power well", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_MISC_IO, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, - }, - }, - { - .name = "DC off", - .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 2", - .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_PW_2, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "DDI A/E IO power well", - .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, - }, - }, - { - .name = "DDI B IO power well", - .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_DDI_B, - }, - }, - { - .name = "DDI C IO power well", - .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_DDI_C, - }, - }, - { - .name = "DDI D IO power well", - .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_DDI_D, - }, - }, -}; - -static const struct i915_power_well_desc bxt_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 2", - .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_PW_2, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "dpio-common-a", - .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DISP_PW_DPIO_CMN_A, - { - .bxt.phy = DPIO_PHY1, - }, - }, - { - .name = "dpio-common-bc", - .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .bxt.phy = DPIO_PHY0, - }, - }, -}; - -static const struct i915_power_well_desc glk_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 2", - .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_PW_2, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "dpio-common-a", - .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = BXT_DISP_PW_DPIO_CMN_A, - { - .bxt.phy = DPIO_PHY1, - }, - }, - { - .name = "dpio-common-b", - .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = VLV_DISP_PW_DPIO_CMN_BC, - { - .bxt.phy = DPIO_PHY0, - }, - }, - { - .name = "dpio-common-c", - .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, - .ops = &bxt_dpio_cmn_power_well_ops, - .id = GLK_DISP_PW_DPIO_CMN_C, - { - .bxt.phy = DPIO_PHY2, - }, - }, - { - .name = "AUX A", - .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = GLK_PW_CTL_IDX_AUX_A, - }, - }, - { - .name = "AUX B", - .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = GLK_PW_CTL_IDX_AUX_B, - }, - }, - { - .name = "AUX C", - .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = GLK_PW_CTL_IDX_AUX_C, - }, - }, - { - .name = "DDI A IO power well", - .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = GLK_PW_CTL_IDX_DDI_A, - }, - }, - { - .name = "DDI B IO power well", - .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_DDI_B, - }, - }, - { - .name = "DDI C IO power well", - .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_DDI_C, - }, - }, -}; - -static const struct i915_power_well_desc cnl_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "AUX A", - .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = GLK_PW_CTL_IDX_AUX_A, - }, - }, - { - .name = "AUX B", - .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = GLK_PW_CTL_IDX_AUX_B, - }, - }, - { - .name = "AUX C", - .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = GLK_PW_CTL_IDX_AUX_C, - }, - }, - { - .name = "AUX D", - .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = CNL_PW_CTL_IDX_AUX_D, - }, - }, - { - .name = "DC off", - .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 2", - .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_PW_2, - .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "DDI A IO power well", - .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = GLK_PW_CTL_IDX_DDI_A, - }, - }, - { - .name = "DDI B IO power well", - .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_DDI_B, - }, - }, - { - .name = "DDI C IO power well", - .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_DDI_C, - }, - }, - { - .name = "DDI D IO power well", - .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = SKL_PW_CTL_IDX_DDI_D, - }, - }, - { - .name = "DDI F IO power well", - .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = CNL_PW_CTL_IDX_DDI_F, - }, - }, - { - .name = "AUX F", - .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = CNL_PW_CTL_IDX_AUX_F, - }, - }, -}; - -static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { - .sync_hw = hsw_power_well_sync_hw, - .enable = icl_combo_phy_aux_power_well_enable, - .disable = icl_combo_phy_aux_power_well_disable, - .is_enabled = hsw_power_well_enabled, -}; - -static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = { - .sync_hw = hsw_power_well_sync_hw, - .enable = icl_tc_phy_aux_power_well_enable, - .disable = hsw_power_well_disable, - .is_enabled = hsw_power_well_enabled, -}; - -static const struct i915_power_well_regs icl_aux_power_well_regs = { - .bios = ICL_PWR_WELL_CTL_AUX1, - .driver = ICL_PWR_WELL_CTL_AUX2, - .debug = ICL_PWR_WELL_CTL_AUX4, -}; - -static const struct i915_power_well_regs icl_ddi_power_well_regs = { - .bios = ICL_PWR_WELL_CTL_DDI1, - .driver = ICL_PWR_WELL_CTL_DDI2, - .debug = ICL_PWR_WELL_CTL_DDI4, -}; - -static const struct i915_power_well_desc icl_power_wells[] = { - { - .name = "always-on", - .always_on = true, - .domains = POWER_DOMAIN_MASK, - .ops = &i9xx_always_on_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 1", - /* Handled by the DMC firmware */ - .always_on = true, - .domains = 0, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_1, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_PW_1, - .hsw.has_fuses = true, - }, - }, - { - .name = "DC off", - .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, - .ops = &gen9_dc_off_power_well_ops, - .id = DISP_PW_ID_NONE, - }, - { - .name = "power well 2", - .domains = ICL_PW_2_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = SKL_DISP_PW_2, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_PW_2, - .hsw.has_fuses = true, - }, - }, - { - .name = "power well 3", - .domains = ICL_PW_3_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_PW_3, - .hsw.irq_pipe_mask = BIT(PIPE_B), - .hsw.has_vga = true, - .hsw.has_fuses = true, - }, - }, - { - .name = "DDI A IO", - .domains = ICL_DDI_IO_A_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_ddi_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_DDI_A, - }, - }, - { - .name = "DDI B IO", - .domains = ICL_DDI_IO_B_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_ddi_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_DDI_B, - }, - }, - { - .name = "DDI C IO", - .domains = ICL_DDI_IO_C_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_ddi_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_DDI_C, - }, - }, - { - .name = "DDI D IO", - .domains = ICL_DDI_IO_D_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_ddi_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_DDI_D, - }, - }, - { - .name = "DDI E IO", - .domains = ICL_DDI_IO_E_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_ddi_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_DDI_E, - }, - }, - { - .name = "DDI F IO", - .domains = ICL_DDI_IO_F_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_ddi_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_DDI_F, - }, - }, - { - .name = "AUX A", - .domains = ICL_AUX_A_IO_POWER_DOMAINS, - .ops = &icl_combo_phy_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_aux_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_AUX_A, - }, - }, - { - .name = "AUX B", - .domains = ICL_AUX_B_IO_POWER_DOMAINS, - .ops = &icl_combo_phy_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_aux_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_AUX_B, - }, - }, - { - .name = "AUX C", - .domains = ICL_AUX_C_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_aux_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_AUX_C, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX D", - .domains = ICL_AUX_D_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_aux_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_AUX_D, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX E", - .domains = ICL_AUX_E_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_aux_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_AUX_E, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX F", - .domains = ICL_AUX_F_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_aux_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_AUX_F, - .hsw.is_tc_tbt = false, - }, - }, - { - .name = "AUX TBT1", - .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_aux_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT2", - .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_aux_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT3", - .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_aux_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "AUX TBT4", - .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, - .ops = &icl_tc_phy_aux_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &icl_aux_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, - .hsw.is_tc_tbt = true, - }, - }, - { - .name = "power well 4", - .domains = ICL_PW_4_POWER_DOMAINS, - .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, - { - .hsw.regs = &hsw_power_well_regs, - .hsw.idx = ICL_PW_CTL_IDX_PW_4, - .hsw.has_fuses = true, - .hsw.irq_pipe_mask = BIT(PIPE_C), - }, - }, -}; - -static int -sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, - int disable_power_well) -{ - if (disable_power_well >= 0) - return !!disable_power_well; - - return 1; -} - -static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, - int enable_dc) -{ - u32 mask; - int requested_dc; - int max_dc; - - if (INTEL_GEN(dev_priv) >= 11) { - max_dc = 2; - /* - * DC9 has a separate HW flow from the rest of the DC states, - * not depending on the DMC firmware. It's needed by system - * suspend/resume, so allow it unconditionally. - */ - mask = DC_STATE_EN_DC9; - } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) { - max_dc = 2; - mask = 0; - } else if (IS_GEN9_LP(dev_priv)) { - max_dc = 1; - mask = DC_STATE_EN_DC9; - } else { - max_dc = 0; - mask = 0; - } - - if (!i915_modparams.disable_power_well) - max_dc = 0; - - if (enable_dc >= 0 && enable_dc <= max_dc) { - requested_dc = enable_dc; - } else if (enable_dc == -1) { - requested_dc = max_dc; - } else if (enable_dc > max_dc && enable_dc <= 2) { - DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n", - enable_dc, max_dc); - requested_dc = max_dc; - } else { - DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc); - requested_dc = max_dc; - } - - if (requested_dc > 1) - mask |= DC_STATE_EN_UPTO_DC6; - if (requested_dc > 0) - mask |= DC_STATE_EN_UPTO_DC5; - - DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask); - - return mask; -} - -static int -__set_power_wells(struct i915_power_domains *power_domains, - const struct i915_power_well_desc *power_well_descs, - int power_well_count) -{ - u64 power_well_ids = 0; - int i; - - power_domains->power_well_count = power_well_count; - power_domains->power_wells = - kcalloc(power_well_count, - sizeof(*power_domains->power_wells), - GFP_KERNEL); - if (!power_domains->power_wells) - return -ENOMEM; - - for (i = 0; i < power_well_count; i++) { - enum i915_power_well_id id = power_well_descs[i].id; - - power_domains->power_wells[i].desc = &power_well_descs[i]; - - if (id == DISP_PW_ID_NONE) - continue; - - WARN_ON(id >= sizeof(power_well_ids) * 8); - WARN_ON(power_well_ids & BIT_ULL(id)); - power_well_ids |= BIT_ULL(id); - } - - return 0; -} - -#define set_power_wells(power_domains, __power_well_descs) \ - __set_power_wells(power_domains, __power_well_descs, \ - ARRAY_SIZE(__power_well_descs)) - -/** - * intel_power_domains_init - initializes the power domain structures - * @dev_priv: i915 device instance - * - * Initializes the power domain structures for @dev_priv depending upon the - * supported platform. - */ -int intel_power_domains_init(struct drm_i915_private *dev_priv) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - int err; - - i915_modparams.disable_power_well = - sanitize_disable_power_well_option(dev_priv, - i915_modparams.disable_power_well); - dev_priv->csr.allowed_dc_mask = - get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc); - - BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); - - mutex_init(&power_domains->lock); - - INIT_DELAYED_WORK(&power_domains->async_put_work, - intel_display_power_put_async_work); - - /* - * The enabling order will be from lower to higher indexed wells, - * the disabling order is reversed. - */ - if (IS_GEN(dev_priv, 11)) { - err = set_power_wells(power_domains, icl_power_wells); - } else if (IS_CANNONLAKE(dev_priv)) { - err = set_power_wells(power_domains, cnl_power_wells); - - /* - * DDI and Aux IO are getting enabled for all ports - * regardless the presence or use. So, in order to avoid - * timeouts, lets remove them from the list - * for the SKUs without port F. - */ - if (!IS_CNL_WITH_PORT_F(dev_priv)) - power_domains->power_well_count -= 2; - } else if (IS_GEMINILAKE(dev_priv)) { - err = set_power_wells(power_domains, glk_power_wells); - } else if (IS_BROXTON(dev_priv)) { - err = set_power_wells(power_domains, bxt_power_wells); - } else if (IS_GEN9_BC(dev_priv)) { - err = set_power_wells(power_domains, skl_power_wells); - } else if (IS_CHERRYVIEW(dev_priv)) { - err = set_power_wells(power_domains, chv_power_wells); - } else if (IS_BROADWELL(dev_priv)) { - err = set_power_wells(power_domains, bdw_power_wells); - } else if (IS_HASWELL(dev_priv)) { - err = set_power_wells(power_domains, hsw_power_wells); - } else if (IS_VALLEYVIEW(dev_priv)) { - err = set_power_wells(power_domains, vlv_power_wells); - } else if (IS_I830(dev_priv)) { - err = set_power_wells(power_domains, i830_power_wells); - } else { - err = set_power_wells(power_domains, i9xx_always_on_power_well); - } - - return err; -} - -/** - * intel_power_domains_cleanup - clean up power domains resources - * @dev_priv: i915 device instance - * - * Release any resources acquired by intel_power_domains_init() - */ -void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) -{ - kfree(dev_priv->power_domains.power_wells); -} - -static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - struct i915_power_well *power_well; - - mutex_lock(&power_domains->lock); - for_each_power_well(dev_priv, power_well) { - power_well->desc->ops->sync_hw(dev_priv, power_well); - power_well->hw_enabled = - power_well->desc->ops->is_enabled(dev_priv, power_well); - } - mutex_unlock(&power_domains->lock); -} - -static inline -bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv, - i915_reg_t reg, bool enable) -{ - u32 val, status; - - val = I915_READ(reg); - val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST); - I915_WRITE(reg, val); - POSTING_READ(reg); - udelay(10); - - status = I915_READ(reg) & DBUF_POWER_STATE; - if ((enable && !status) || (!enable && status)) { - DRM_ERROR("DBus power %s timeout!\n", - enable ? "enable" : "disable"); - return false; - } - return true; -} - -static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) -{ - intel_dbuf_slice_set(dev_priv, DBUF_CTL, true); -} - -static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) -{ - intel_dbuf_slice_set(dev_priv, DBUF_CTL, false); -} - -static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv) -{ - if (INTEL_GEN(dev_priv) < 11) - return 1; - return 2; -} - -void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, - u8 req_slices) -{ - const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; - bool ret; - - if (req_slices > intel_dbuf_max_slices(dev_priv)) { - DRM_ERROR("Invalid number of dbuf slices requested\n"); - return; - } - - if (req_slices == hw_enabled_slices || req_slices == 0) - return; - - if (req_slices > hw_enabled_slices) - ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); - else - ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false); - - if (ret) - dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices; -} - -static void icl_dbuf_enable(struct drm_i915_private *dev_priv) -{ - I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST); - I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL_S2); - - udelay(10); - - if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || - !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) - DRM_ERROR("DBuf power enable timeout\n"); - else - /* - * FIXME: for now pretend that we only have 1 slice, see - * intel_enabled_dbuf_slices_num(). - */ - dev_priv->wm.skl_hw.ddb.enabled_slices = 1; -} - -static void icl_dbuf_disable(struct drm_i915_private *dev_priv) -{ - I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST); - I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL_S2); - - udelay(10); - - if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || - (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) - DRM_ERROR("DBuf power disable timeout!\n"); - else - /* - * FIXME: for now pretend that the first slice is always - * enabled, see intel_enabled_dbuf_slices_num(). - */ - dev_priv->wm.skl_hw.ddb.enabled_slices = 1; -} - -static void icl_mbus_init(struct drm_i915_private *dev_priv) -{ - u32 val; - - val = MBUS_ABOX_BT_CREDIT_POOL1(16) | - MBUS_ABOX_BT_CREDIT_POOL2(16) | - MBUS_ABOX_B_CREDIT(1) | - MBUS_ABOX_BW_CREDIT(1); - - I915_WRITE(MBUS_ABOX_CTL, val); -} - -static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) -{ - u32 val = I915_READ(LCPLL_CTL); - - /* - * The LCPLL register should be turned on by the BIOS. For now - * let's just check its state and print errors in case - * something is wrong. Don't even try to turn it on. - */ - - if (val & LCPLL_CD_SOURCE_FCLK) - DRM_ERROR("CDCLK source is not LCPLL\n"); - - if (val & LCPLL_PLL_DISABLE) - DRM_ERROR("LCPLL is disabled\n"); -} - -static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) -{ - struct drm_device *dev = &dev_priv->drm; - struct intel_crtc *crtc; - - for_each_intel_crtc(dev, crtc) - I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", - pipe_name(crtc->pipe)); - - I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2), - "Display power well on\n"); - I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, - "SPLL enabled\n"); - I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, - "WRPLL1 enabled\n"); - I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, - "WRPLL2 enabled\n"); - I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, - "Panel power on\n"); - I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, - "CPU PWM1 enabled\n"); - if (IS_HASWELL(dev_priv)) - I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, - "CPU PWM2 enabled\n"); - I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, - "PCH PWM1 enabled\n"); - I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, - "Utility pin enabled\n"); - I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, - "PCH GTC enabled\n"); - - /* - * In theory we can still leave IRQs enabled, as long as only the HPD - * interrupts remain enabled. We used to check for that, but since it's - * gen-specific and since we only disable LCPLL after we fully disable - * the interrupts, the check below should be enough. - */ - I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); -} - -static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) -{ - if (IS_HASWELL(dev_priv)) - return I915_READ(D_COMP_HSW); - else - return I915_READ(D_COMP_BDW); -} - -static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) -{ - if (IS_HASWELL(dev_priv)) { - if (sandybridge_pcode_write(dev_priv, - GEN6_PCODE_WRITE_D_COMP, val)) - DRM_DEBUG_KMS("Failed to write to D_COMP\n"); - } else { - I915_WRITE(D_COMP_BDW, val); - POSTING_READ(D_COMP_BDW); - } -} - -/* - * This function implements pieces of two sequences from BSpec: - * - Sequence for display software to disable LCPLL - * - Sequence for display software to allow package C8+ - * The steps implemented here are just the steps that actually touch the LCPLL - * register. Callers should take care of disabling all the display engine - * functions, doing the mode unset, fixing interrupts, etc. - */ -static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, - bool switch_to_fclk, bool allow_power_down) -{ - u32 val; - - assert_can_disable_lcpll(dev_priv); - - val = I915_READ(LCPLL_CTL); - - if (switch_to_fclk) { - val |= LCPLL_CD_SOURCE_FCLK; - I915_WRITE(LCPLL_CTL, val); - - if (wait_for_us(I915_READ(LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE, 1)) - DRM_ERROR("Switching to FCLK failed\n"); - - val = I915_READ(LCPLL_CTL); - } - - val |= LCPLL_PLL_DISABLE; - I915_WRITE(LCPLL_CTL, val); - POSTING_READ(LCPLL_CTL); - - if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL, - LCPLL_PLL_LOCK, 0, 1)) - DRM_ERROR("LCPLL still locked\n"); - - val = hsw_read_dcomp(dev_priv); - val |= D_COMP_COMP_DISABLE; - hsw_write_dcomp(dev_priv, val); - ndelay(100); - - if (wait_for((hsw_read_dcomp(dev_priv) & - D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) - DRM_ERROR("D_COMP RCOMP still in progress\n"); - - if (allow_power_down) { - val = I915_READ(LCPLL_CTL); - val |= LCPLL_POWER_DOWN_ALLOW; - I915_WRITE(LCPLL_CTL, val); - POSTING_READ(LCPLL_CTL); - } -} - -/* - * Fully restores LCPLL, disallowing power down and switching back to LCPLL - * source. - */ -static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) -{ - u32 val; - - val = I915_READ(LCPLL_CTL); - - if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | - LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) - return; - - /* - * Make sure we're not on PC8 state before disabling PC8, otherwise - * we'll hang the machine. To prevent PC8 state, just enable force_wake. - */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - if (val & LCPLL_POWER_DOWN_ALLOW) { - val &= ~LCPLL_POWER_DOWN_ALLOW; - I915_WRITE(LCPLL_CTL, val); - POSTING_READ(LCPLL_CTL); - } - - val = hsw_read_dcomp(dev_priv); - val |= D_COMP_COMP_FORCE; - val &= ~D_COMP_COMP_DISABLE; - hsw_write_dcomp(dev_priv, val); - - val = I915_READ(LCPLL_CTL); - val &= ~LCPLL_PLL_DISABLE; - I915_WRITE(LCPLL_CTL, val); - - if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL, - LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5)) - DRM_ERROR("LCPLL not locked yet\n"); - - if (val & LCPLL_CD_SOURCE_FCLK) { - val = I915_READ(LCPLL_CTL); - val &= ~LCPLL_CD_SOURCE_FCLK; - I915_WRITE(LCPLL_CTL, val); - - if (wait_for_us((I915_READ(LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) - DRM_ERROR("Switching back to LCPLL failed\n"); - } - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - - intel_update_cdclk(dev_priv); - intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); -} - -/* - * Package states C8 and deeper are really deep PC states that can only be - * reached when all the devices on the system allow it, so even if the graphics - * device allows PC8+, it doesn't mean the system will actually get to these - * states. Our driver only allows PC8+ when going into runtime PM. - * - * The requirements for PC8+ are that all the outputs are disabled, the power - * well is disabled and most interrupts are disabled, and these are also - * requirements for runtime PM. When these conditions are met, we manually do - * the other conditions: disable the interrupts, clocks and switch LCPLL refclk - * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard - * hang the machine. - * - * When we really reach PC8 or deeper states (not just when we allow it) we lose - * the state of some registers, so when we come back from PC8+ we need to - * restore this state. We don't get into PC8+ if we're not in RC6, so we don't - * need to take care of the registers kept by RC6. Notice that this happens even - * if we don't put the device in PCI D3 state (which is what currently happens - * because of the runtime PM support). - * - * For more, read "Display Sequences for Package C8" on the hardware - * documentation. - */ -void hsw_enable_pc8(struct drm_i915_private *dev_priv) -{ - u32 val; - - DRM_DEBUG_KMS("Enabling package C8+\n"); - - if (HAS_PCH_LPT_LP(dev_priv)) { - val = I915_READ(SOUTH_DSPCLK_GATE_D); - val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; - I915_WRITE(SOUTH_DSPCLK_GATE_D, val); - } - - lpt_disable_clkout_dp(dev_priv); - hsw_disable_lcpll(dev_priv, true, true); -} - -void hsw_disable_pc8(struct drm_i915_private *dev_priv) -{ - u32 val; - - DRM_DEBUG_KMS("Disabling package C8+\n"); - - hsw_restore_lcpll(dev_priv); - intel_init_pch_refclk(dev_priv); - - if (HAS_PCH_LPT_LP(dev_priv)) { - val = I915_READ(SOUTH_DSPCLK_GATE_D); - val |= PCH_LP_PARTITION_LEVEL_DISABLE; - I915_WRITE(SOUTH_DSPCLK_GATE_D, val); - } -} - -static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, - bool enable) -{ - i915_reg_t reg; - u32 reset_bits, val; - - if (IS_IVYBRIDGE(dev_priv)) { - reg = GEN7_MSG_CTL; - reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; - } else { - reg = HSW_NDE_RSTWRN_OPT; - reset_bits = RESET_PCH_HANDSHAKE_ENABLE; - } - - val = I915_READ(reg); - - if (enable) - val |= reset_bits; - else - val &= ~reset_bits; - - I915_WRITE(reg, val); -} - -static void skl_display_core_init(struct drm_i915_private *dev_priv, - bool resume) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - struct i915_power_well *well; - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - - /* enable PCH reset handshake */ - intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); - - /* enable PG1 and Misc I/O */ - mutex_lock(&power_domains->lock); - - well = lookup_power_well(dev_priv, SKL_DISP_PW_1); - intel_power_well_enable(dev_priv, well); - - well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); - intel_power_well_enable(dev_priv, well); - - mutex_unlock(&power_domains->lock); - - intel_cdclk_init(dev_priv); - - gen9_dbuf_enable(dev_priv); - - if (resume && dev_priv->csr.dmc_payload) - intel_csr_load_program(dev_priv); -} - -static void skl_display_core_uninit(struct drm_i915_private *dev_priv) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - struct i915_power_well *well; - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - - gen9_dbuf_disable(dev_priv); - - intel_cdclk_uninit(dev_priv); - - /* The spec doesn't call for removing the reset handshake flag */ - /* disable PG1 and Misc I/O */ - - mutex_lock(&power_domains->lock); - - /* - * BSpec says to keep the MISC IO power well enabled here, only - * remove our request for power well 1. - * Note that even though the driver's request is removed power well 1 - * may stay enabled after this due to DMC's own request on it. - */ - well = lookup_power_well(dev_priv, SKL_DISP_PW_1); - intel_power_well_disable(dev_priv, well); - - mutex_unlock(&power_domains->lock); - - usleep_range(10, 30); /* 10 us delay per Bspec */ -} - -void bxt_display_core_init(struct drm_i915_private *dev_priv, - bool resume) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - struct i915_power_well *well; - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - - /* - * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT - * or else the reset will hang because there is no PCH to respond. - * Move the handshake programming to initialization sequence. - * Previously was left up to BIOS. - */ - intel_pch_reset_handshake(dev_priv, false); - - /* Enable PG1 */ - mutex_lock(&power_domains->lock); - - well = lookup_power_well(dev_priv, SKL_DISP_PW_1); - intel_power_well_enable(dev_priv, well); - - mutex_unlock(&power_domains->lock); - - intel_cdclk_init(dev_priv); - - gen9_dbuf_enable(dev_priv); - - if (resume && dev_priv->csr.dmc_payload) - intel_csr_load_program(dev_priv); -} - -void bxt_display_core_uninit(struct drm_i915_private *dev_priv) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - struct i915_power_well *well; - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - - gen9_dbuf_disable(dev_priv); - - intel_cdclk_uninit(dev_priv); - - /* The spec doesn't call for removing the reset handshake flag */ - - /* - * Disable PW1 (PG1). - * Note that even though the driver's request is removed power well 1 - * may stay enabled after this due to DMC's own request on it. - */ - mutex_lock(&power_domains->lock); - - well = lookup_power_well(dev_priv, SKL_DISP_PW_1); - intel_power_well_disable(dev_priv, well); - - mutex_unlock(&power_domains->lock); - - usleep_range(10, 30); /* 10 us delay per Bspec */ -} - -static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - struct i915_power_well *well; - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - - /* 1. Enable PCH Reset Handshake */ - intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); - - /* 2-3. */ - intel_combo_phy_init(dev_priv); - - /* - * 4. Enable Power Well 1 (PG1). - * The AUX IO power wells will be enabled on demand. - */ - mutex_lock(&power_domains->lock); - well = lookup_power_well(dev_priv, SKL_DISP_PW_1); - intel_power_well_enable(dev_priv, well); - mutex_unlock(&power_domains->lock); - - /* 5. Enable CD clock */ - intel_cdclk_init(dev_priv); - - /* 6. Enable DBUF */ - gen9_dbuf_enable(dev_priv); - - if (resume && dev_priv->csr.dmc_payload) - intel_csr_load_program(dev_priv); -} - -static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - struct i915_power_well *well; - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - - /* 1. Disable all display engine functions -> aready done */ - - /* 2. Disable DBUF */ - gen9_dbuf_disable(dev_priv); - - /* 3. Disable CD clock */ - intel_cdclk_uninit(dev_priv); - - /* - * 4. Disable Power Well 1 (PG1). - * The AUX IO power wells are toggled on demand, so they are already - * disabled at this point. - */ - mutex_lock(&power_domains->lock); - well = lookup_power_well(dev_priv, SKL_DISP_PW_1); - intel_power_well_disable(dev_priv, well); - mutex_unlock(&power_domains->lock); - - usleep_range(10, 30); /* 10 us delay per Bspec */ - - /* 5. */ - intel_combo_phy_uninit(dev_priv); -} - -void icl_display_core_init(struct drm_i915_private *dev_priv, - bool resume) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - struct i915_power_well *well; - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - - /* 1. Enable PCH reset handshake. */ - intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); - - /* 2. Initialize all combo phys */ - intel_combo_phy_init(dev_priv); - - /* - * 3. Enable Power Well 1 (PG1). - * The AUX IO power wells will be enabled on demand. - */ - mutex_lock(&power_domains->lock); - well = lookup_power_well(dev_priv, SKL_DISP_PW_1); - intel_power_well_enable(dev_priv, well); - mutex_unlock(&power_domains->lock); - - /* 4. Enable CDCLK. */ - intel_cdclk_init(dev_priv); - - /* 5. Enable DBUF. */ - icl_dbuf_enable(dev_priv); - - /* 6. Setup MBUS. */ - icl_mbus_init(dev_priv); - - if (resume && dev_priv->csr.dmc_payload) - intel_csr_load_program(dev_priv); -} - -void icl_display_core_uninit(struct drm_i915_private *dev_priv) -{ - struct i915_power_domains *power_domains = &dev_priv->power_domains; - struct i915_power_well *well; - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - - /* 1. Disable all display engine functions -> aready done */ - - /* 2. Disable DBUF */ - icl_dbuf_disable(dev_priv); - - /* 3. Disable CD clock */ - intel_cdclk_uninit(dev_priv); - - /* - * 4. Disable Power Well 1 (PG1). - * The AUX IO power wells are toggled on demand, so they are already - * disabled at this point. - */ - mutex_lock(&power_domains->lock); - well = lookup_power_well(dev_priv, SKL_DISP_PW_1); - intel_power_well_disable(dev_priv, well); - mutex_unlock(&power_domains->lock); - - /* 5. */ - intel_combo_phy_uninit(dev_priv); -} - -static void chv_phy_control_init(struct drm_i915_private *dev_priv) -{ - struct i915_power_well *cmn_bc = - lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); - struct i915_power_well *cmn_d = - lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); - - /* - * DISPLAY_PHY_CONTROL can get corrupted if read. As a - * workaround never ever read DISPLAY_PHY_CONTROL, and - * instead maintain a shadow copy ourselves. Use the actual - * power well state and lane status to reconstruct the - * expected initial value. - */ - dev_priv->chv_phy_control = - PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | - PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | - PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | - PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | - PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); - - /* - * If all lanes are disabled we leave the override disabled - * with all power down bits cleared to match the state we - * would use after disabling the port. Otherwise enable the - * override and set the lane powerdown bits accding to the - * current lane status. - */ - if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { - u32 status = I915_READ(DPLL(PIPE_A)); - unsigned int mask; - - mask = status & DPLL_PORTB_READY_MASK; - if (mask == 0xf) - mask = 0x0; - else - dev_priv->chv_phy_control |= - PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); - - dev_priv->chv_phy_control |= - PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); - - mask = (status & DPLL_PORTC_READY_MASK) >> 4; - if (mask == 0xf) - mask = 0x0; - else - dev_priv->chv_phy_control |= - PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); - - dev_priv->chv_phy_control |= - PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); - - dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); - - dev_priv->chv_phy_assert[DPIO_PHY0] = false; - } else { - dev_priv->chv_phy_assert[DPIO_PHY0] = true; - } - - if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { - u32 status = I915_READ(DPIO_PHY_STATUS); - unsigned int mask; - - mask = status & DPLL_PORTD_READY_MASK; - - if (mask == 0xf) - mask = 0x0; - else - dev_priv->chv_phy_control |= - PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); - - dev_priv->chv_phy_control |= - PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); - - dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); - - dev_priv->chv_phy_assert[DPIO_PHY1] = false; - } else { - dev_priv->chv_phy_assert[DPIO_PHY1] = true; - } - - I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); - - DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", - dev_priv->chv_phy_control); -} - -static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) -{ - struct i915_power_well *cmn = - lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); - struct i915_power_well *disp2d = - lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); - - /* If the display might be already active skip this */ - if (cmn->desc->ops->is_enabled(dev_priv, cmn) && - disp2d->desc->ops->is_enabled(dev_priv, disp2d) && - I915_READ(DPIO_CTL) & DPIO_CMNRST) - return; - - DRM_DEBUG_KMS("toggling display PHY side reset\n"); - - /* cmnlane needs DPLL registers */ - disp2d->desc->ops->enable(dev_priv, disp2d); - - /* - * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: - * Need to assert and de-assert PHY SB reset by gating the - * common lane power, then un-gating it. - * Simply ungating isn't enough to reset the PHY enough to get - * ports and lanes running. - */ - cmn->desc->ops->disable(dev_priv, cmn); -} - -static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) -{ - bool ret; - - vlv_punit_get(dev_priv); - ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; - vlv_punit_put(dev_priv); - - return ret; -} - -static void assert_ved_power_gated(struct drm_i915_private *dev_priv) -{ - WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), - "VED not power gated\n"); -} - -static void assert_isp_power_gated(struct drm_i915_private *dev_priv) -{ - static const struct pci_device_id isp_ids[] = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, - {} - }; - - WARN(!pci_dev_present(isp_ids) && - !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), - "ISP not power gated\n"); -} - -static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); - -/** - * intel_power_domains_init_hw - initialize hardware power domain state - * @i915: i915 device instance - * @resume: Called from resume code paths or not - * - * This function initializes the hardware power domain state and enables all - * power wells belonging to the INIT power domain. Power wells in other - * domains (and not in the INIT domain) are referenced or disabled by - * intel_modeset_readout_hw_state(). After that the reference count of each - * power well must match its HW enabled state, see - * intel_power_domains_verify_state(). - * - * It will return with power domains disabled (to be enabled later by - * intel_power_domains_enable()) and must be paired with - * intel_power_domains_fini_hw(). - */ -void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) -{ - struct i915_power_domains *power_domains = &i915->power_domains; - - power_domains->initializing = true; - - if (INTEL_GEN(i915) >= 11) { - icl_display_core_init(i915, resume); - } else if (IS_CANNONLAKE(i915)) { - cnl_display_core_init(i915, resume); - } else if (IS_GEN9_BC(i915)) { - skl_display_core_init(i915, resume); - } else if (IS_GEN9_LP(i915)) { - bxt_display_core_init(i915, resume); - } else if (IS_CHERRYVIEW(i915)) { - mutex_lock(&power_domains->lock); - chv_phy_control_init(i915); - mutex_unlock(&power_domains->lock); - assert_isp_power_gated(i915); - } else if (IS_VALLEYVIEW(i915)) { - mutex_lock(&power_domains->lock); - vlv_cmnlane_wa(i915); - mutex_unlock(&power_domains->lock); - assert_ved_power_gated(i915); - assert_isp_power_gated(i915); - } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { - hsw_assert_cdclk(i915); - intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); - } else if (IS_IVYBRIDGE(i915)) { - intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); - } - - /* - * Keep all power wells enabled for any dependent HW access during - * initialization and to make sure we keep BIOS enabled display HW - * resources powered until display HW readout is complete. We drop - * this reference in intel_power_domains_enable(). - */ - power_domains->wakeref = - intel_display_power_get(i915, POWER_DOMAIN_INIT); - - /* Disable power support if the user asked so. */ - if (!i915_modparams.disable_power_well) - intel_display_power_get(i915, POWER_DOMAIN_INIT); - intel_power_domains_sync_hw(i915); - - power_domains->initializing = false; -} - -/** - * intel_power_domains_fini_hw - deinitialize hw power domain state - * @i915: i915 device instance - * - * De-initializes the display power domain HW state. It also ensures that the - * device stays powered up so that the driver can be reloaded. - * - * It must be called with power domains already disabled (after a call to - * intel_power_domains_disable()) and must be paired with - * intel_power_domains_init_hw(). - */ -void intel_power_domains_fini_hw(struct drm_i915_private *i915) -{ - intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&i915->power_domains.wakeref); - - /* Remove the refcount we took to keep power well support disabled. */ - if (!i915_modparams.disable_power_well) - intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); - - intel_display_power_flush_work_sync(i915); - - intel_power_domains_verify_state(i915); - - /* Keep the power well enabled, but cancel its rpm wakeref. */ - intel_runtime_pm_put(i915, wakeref); -} - -/** - * intel_power_domains_enable - enable toggling of display power wells - * @i915: i915 device instance - * - * Enable the ondemand enabling/disabling of the display power wells. Note that - * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled - * only at specific points of the display modeset sequence, thus they are not - * affected by the intel_power_domains_enable()/disable() calls. The purpose - * of these function is to keep the rest of power wells enabled until the end - * of display HW readout (which will acquire the power references reflecting - * the current HW state). - */ -void intel_power_domains_enable(struct drm_i915_private *i915) -{ - intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&i915->power_domains.wakeref); - - intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); - intel_power_domains_verify_state(i915); -} - -/** - * intel_power_domains_disable - disable toggling of display power wells - * @i915: i915 device instance - * - * Disable the ondemand enabling/disabling of the display power wells. See - * intel_power_domains_enable() for which power wells this call controls. - */ -void intel_power_domains_disable(struct drm_i915_private *i915) -{ - struct i915_power_domains *power_domains = &i915->power_domains; - - WARN_ON(power_domains->wakeref); - power_domains->wakeref = - intel_display_power_get(i915, POWER_DOMAIN_INIT); - - intel_power_domains_verify_state(i915); -} - -/** - * intel_power_domains_suspend - suspend power domain state - * @i915: i915 device instance - * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) - * - * This function prepares the hardware power domain state before entering - * system suspend. - * - * It must be called with power domains already disabled (after a call to - * intel_power_domains_disable()) and paired with intel_power_domains_resume(). - */ -void intel_power_domains_suspend(struct drm_i915_private *i915, - enum i915_drm_suspend_mode suspend_mode) -{ - struct i915_power_domains *power_domains = &i915->power_domains; - intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&power_domains->wakeref); - - intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); - - /* - * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 - * support don't manually deinit the power domains. This also means the - * CSR/DMC firmware will stay active, it will power down any HW - * resources as required and also enable deeper system power states - * that would be blocked if the firmware was inactive. - */ - if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) && - suspend_mode == I915_DRM_SUSPEND_IDLE && - i915->csr.dmc_payload) { - intel_display_power_flush_work(i915); - intel_power_domains_verify_state(i915); - return; - } - - /* - * Even if power well support was disabled we still want to disable - * power wells if power domains must be deinitialized for suspend. - */ - if (!i915_modparams.disable_power_well) - intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); - - intel_display_power_flush_work(i915); - intel_power_domains_verify_state(i915); - - if (INTEL_GEN(i915) >= 11) - icl_display_core_uninit(i915); - else if (IS_CANNONLAKE(i915)) - cnl_display_core_uninit(i915); - else if (IS_GEN9_BC(i915)) - skl_display_core_uninit(i915); - else if (IS_GEN9_LP(i915)) - bxt_display_core_uninit(i915); - - power_domains->display_core_suspended = true; -} - -/** - * intel_power_domains_resume - resume power domain state - * @i915: i915 device instance - * - * This function resume the hardware power domain state during system resume. - * - * It will return with power domain support disabled (to be enabled later by - * intel_power_domains_enable()) and must be paired with - * intel_power_domains_suspend(). - */ -void intel_power_domains_resume(struct drm_i915_private *i915) -{ - struct i915_power_domains *power_domains = &i915->power_domains; - - if (power_domains->display_core_suspended) { - intel_power_domains_init_hw(i915, true); - power_domains->display_core_suspended = false; - } else { - WARN_ON(power_domains->wakeref); - power_domains->wakeref = - intel_display_power_get(i915, POWER_DOMAIN_INIT); - } - - intel_power_domains_verify_state(i915); -} - -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) - -static void intel_power_domains_dump_info(struct drm_i915_private *i915) -{ - struct i915_power_domains *power_domains = &i915->power_domains; - struct i915_power_well *power_well; - - for_each_power_well(i915, power_well) { - enum intel_display_power_domain domain; - - DRM_DEBUG_DRIVER("%-25s %d\n", - power_well->desc->name, power_well->count); - - for_each_power_domain(domain, power_well->desc->domains) - DRM_DEBUG_DRIVER(" %-23s %d\n", - intel_display_power_domain_str(domain), - power_domains->domain_use_count[domain]); - } -} - -/** - * intel_power_domains_verify_state - verify the HW/SW state for all power wells - * @i915: i915 device instance - * - * Verify if the reference count of each power well matches its HW enabled - * state and the total refcount of the domains it belongs to. This must be - * called after modeset HW state sanitization, which is responsible for - * acquiring reference counts for any power wells in use and disabling the - * ones left on by BIOS but not required by any active output. - */ -static void intel_power_domains_verify_state(struct drm_i915_private *i915) -{ - struct i915_power_domains *power_domains = &i915->power_domains; - struct i915_power_well *power_well; - bool dump_domain_info; - - mutex_lock(&power_domains->lock); - - verify_async_put_domains_state(power_domains); - - dump_domain_info = false; - for_each_power_well(i915, power_well) { - enum intel_display_power_domain domain; - int domains_count; - bool enabled; - - enabled = power_well->desc->ops->is_enabled(i915, power_well); - if ((power_well->count || power_well->desc->always_on) != - enabled) - DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)", - power_well->desc->name, - power_well->count, enabled); - - domains_count = 0; - for_each_power_domain(domain, power_well->desc->domains) - domains_count += power_domains->domain_use_count[domain]; - - if (power_well->count != domains_count) { - DRM_ERROR("power well %s refcount/domain refcount mismatch " - "(refcount %d/domains refcount %d)\n", - power_well->desc->name, power_well->count, - domains_count); - dump_domain_info = true; - } - } - - if (dump_domain_info) { - static bool dumped; - - if (!dumped) { - intel_power_domains_dump_info(i915); - dumped = true; - } - } - - mutex_unlock(&power_domains->lock); -} - -#else - -static void intel_power_domains_verify_state(struct drm_i915_private *i915) -{ -} - -#endif - -static intel_wakeref_t __intel_runtime_pm_get(struct drm_i915_private *i915, - bool wakelock) -{ - struct pci_dev *pdev = i915->drm.pdev; - struct device *kdev = &pdev->dev; - int ret; - - ret = pm_runtime_get_sync(kdev); - WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); - - intel_runtime_pm_acquire(i915, wakelock); - - return track_intel_runtime_pm_wakeref(i915); -} - -static intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915) -{ - return __intel_runtime_pm_get(i915, false); -} - -/** - * intel_runtime_pm_get - grab a runtime pm reference - * @i915: i915 device instance - * - * This function grabs a device-level runtime pm reference (mostly used for GEM - * code to ensure the GTT or GT is on) and ensures that it is powered up. - * - * Any runtime pm reference obtained by this function must have a symmetric - * call to intel_runtime_pm_put() to release the reference again. - * - * Returns: the wakeref cookie to pass to intel_runtime_pm_put() - */ -intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915) -{ - return __intel_runtime_pm_get(i915, true); -} - -/** - * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use - * @i915: i915 device instance - * - * This function grabs a device-level runtime pm reference if the device is - * already in use and ensures that it is powered up. It is illegal to try - * and access the HW should intel_runtime_pm_get_if_in_use() report failure. - * - * Any runtime pm reference obtained by this function must have a symmetric - * call to intel_runtime_pm_put() to release the reference again. - * - * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates - * as True if the wakeref was acquired, or False otherwise. - */ -intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) -{ - if (IS_ENABLED(CONFIG_PM)) { - struct pci_dev *pdev = i915->drm.pdev; - struct device *kdev = &pdev->dev; - - /* - * In cases runtime PM is disabled by the RPM core and we get - * an -EINVAL return value we are not supposed to call this - * function, since the power state is undefined. This applies - * atm to the late/early system suspend/resume handlers. - */ - if (pm_runtime_get_if_in_use(kdev) <= 0) - return 0; - } - - intel_runtime_pm_acquire(i915, true); - - return track_intel_runtime_pm_wakeref(i915); -} - -/** - * intel_runtime_pm_get_noresume - grab a runtime pm reference - * @i915: i915 device instance - * - * This function grabs a device-level runtime pm reference (mostly used for GEM - * code to ensure the GTT or GT is on). - * - * It will _not_ power up the device but instead only check that it's powered - * on. Therefore it is only valid to call this functions from contexts where - * the device is known to be powered up and where trying to power it up would - * result in hilarity and deadlocks. That pretty much means only the system - * suspend/resume code where this is used to grab runtime pm references for - * delayed setup down in work items. - * - * Any runtime pm reference obtained by this function must have a symmetric - * call to intel_runtime_pm_put() to release the reference again. - * - * Returns: the wakeref cookie to pass to intel_runtime_pm_put() - */ -intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915) -{ - struct pci_dev *pdev = i915->drm.pdev; - struct device *kdev = &pdev->dev; - - assert_rpm_wakelock_held(i915); - pm_runtime_get_noresume(kdev); - - intel_runtime_pm_acquire(i915, true); - - return track_intel_runtime_pm_wakeref(i915); -} - -static void __intel_runtime_pm_put(struct drm_i915_private *i915, - intel_wakeref_t wref, - bool wakelock) -{ - struct pci_dev *pdev = i915->drm.pdev; - struct device *kdev = &pdev->dev; - - untrack_intel_runtime_pm_wakeref(i915, wref); - - intel_runtime_pm_release(i915, wakelock); - - pm_runtime_mark_last_busy(kdev); - pm_runtime_put_autosuspend(kdev); -} - -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) -static void -intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref) -{ - __intel_runtime_pm_put(i915, wref, false); -} -#endif - /** * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference * @i915: i915 device instance diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index 0a4c4b3aee7d..a7acceb13473 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -13,7 +13,6 @@ struct drm_i915_private; struct drm_printer; -struct intel_encoder; enum i915_drm_suspend_mode { I915_DRM_SUSPEND_IDLE, @@ -21,89 +20,15 @@ enum i915_drm_suspend_mode { I915_DRM_SUSPEND_HIBERNATE, }; -void skl_enable_dc6(struct drm_i915_private *dev_priv); -void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv); -void bxt_enable_dc9(struct drm_i915_private *dev_priv); -void bxt_disable_dc9(struct drm_i915_private *dev_priv); -void gen9_enable_dc5(struct drm_i915_private *dev_priv); - void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv); -int intel_power_domains_init(struct drm_i915_private *); -void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); -void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); -void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv); -void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume); -void icl_display_core_uninit(struct drm_i915_private *dev_priv); -void intel_power_domains_enable(struct drm_i915_private *dev_priv); -void intel_power_domains_disable(struct drm_i915_private *dev_priv); -void intel_power_domains_suspend(struct drm_i915_private *dev_priv, - enum i915_drm_suspend_mode); -void intel_power_domains_resume(struct drm_i915_private *dev_priv); -void hsw_enable_pc8(struct drm_i915_private *dev_priv); -void hsw_disable_pc8(struct drm_i915_private *dev_priv); -void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume); -void bxt_display_core_uninit(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); void intel_runtime_pm_disable(struct drm_i915_private *dev_priv); void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv); -const char * -intel_display_power_domain_str(enum intel_display_power_domain domain); - -bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain); -bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain); -intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain); -intel_wakeref_t -intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain); -void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain); -void __intel_display_power_put_async(struct drm_i915_private *i915, - enum intel_display_power_domain domain, - intel_wakeref_t wakeref); -void intel_display_power_flush_work(struct drm_i915_private *i915); -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) -void intel_display_power_put(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain, - intel_wakeref_t wakeref); -static inline void -intel_display_power_put_async(struct drm_i915_private *i915, - enum intel_display_power_domain domain, - intel_wakeref_t wakeref) -{ - __intel_display_power_put_async(i915, domain, wakeref); -} -#else -static inline void -intel_display_power_put(struct drm_i915_private *i915, - enum intel_display_power_domain domain, - intel_wakeref_t wakeref) -{ - intel_display_power_put_unchecked(i915, domain); -} - -static inline void -intel_display_power_put_async(struct drm_i915_private *i915, - enum intel_display_power_domain domain, - intel_wakeref_t wakeref) -{ - __intel_display_power_put_async(i915, domain, -1); -} -#endif - -#define with_intel_display_power(i915, domain, wf) \ - for ((wf) = intel_display_power_get((i915), (domain)); (wf); \ - intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0) - -void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, - u8 req_slices); - intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915); intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915); intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915); +intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915); #define with_intel_runtime_pm(i915, wf) \ for ((wf) = intel_runtime_pm_get(i915); (wf); \ @@ -123,6 +48,7 @@ intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) intel_runtime_pm_put_unchecked(i915); } #endif +void intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, @@ -134,9 +60,4 @@ static inline void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, } #endif -void chv_phy_powergate_lanes(struct intel_encoder *encoder, - bool override, unsigned int mask); -bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, - enum dpio_channel ch, bool override); - #endif /* __INTEL_RUNTIME_PM_H__ */ -- cgit v1.2.3 From 79b4df6827fe23eb7d0b4335d80c746dfda9b44a Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Fri, 31 May 2019 15:24:09 -0700 Subject: drm/i915: move more defs in intel_display_power.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move over structures, enums and macros from intel_display.h and i915_drv.h to have all the display PM defines in the same header. Signed-off-by: Daniele Ceraolo Spurio Cc: Imre Deak Cc: Ville Syrjälä Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190531222409.9177-3-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 110 ----------------- drivers/gpu/drm/i915/intel_display.h | 82 ------------ drivers/gpu/drm/i915/intel_display_power.h | 192 +++++++++++++++++++++++++++++ 3 files changed, 192 insertions(+), 192 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fd5450576728..eb8917df9149 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -729,116 +729,6 @@ struct intel_ilk_power_mgmt { int r_t; }; -struct drm_i915_private; -struct i915_power_well; - -struct i915_power_well_ops { - /* - * Synchronize the well's hw state to match the current sw state, for - * example enable/disable it based on the current refcount. Called - * during driver init and resume time, possibly after first calling - * the enable/disable handlers. - */ - void (*sync_hw)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); - /* - * Enable the well and resources that depend on it (for example - * interrupts located on the well). Called after the 0->1 refcount - * transition. - */ - void (*enable)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); - /* - * Disable the well and resources that depend on it. Called after - * the 1->0 refcount transition. - */ - void (*disable)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); - /* Returns the hw enabled state. */ - bool (*is_enabled)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); -}; - -struct i915_power_well_regs { - i915_reg_t bios; - i915_reg_t driver; - i915_reg_t kvmr; - i915_reg_t debug; -}; - -/* Power well structure for haswell */ -struct i915_power_well_desc { - const char *name; - bool always_on; - u64 domains; - /* unique identifier for this power well */ - enum i915_power_well_id id; - /* - * Arbitraty data associated with this power well. Platform and power - * well specific. - */ - union { - struct { - /* - * request/status flag index in the PUNIT power well - * control/status registers. - */ - u8 idx; - } vlv; - struct { - enum dpio_phy phy; - } bxt; - struct { - const struct i915_power_well_regs *regs; - /* - * request/status flag index in the power well - * constrol/status registers. - */ - u8 idx; - /* Mask of pipes whose IRQ logic is backed by the pw */ - u8 irq_pipe_mask; - /* The pw is backing the VGA functionality */ - bool has_vga:1; - bool has_fuses:1; - /* - * The pw is for an ICL+ TypeC PHY port in - * Thunderbolt mode. - */ - bool is_tc_tbt:1; - } hsw; - }; - const struct i915_power_well_ops *ops; -}; - -struct i915_power_well { - const struct i915_power_well_desc *desc; - /* power well enable/disable usage count */ - int count; - /* cached hw enabled state */ - bool hw_enabled; -}; - -struct i915_power_domains { - /* - * Power wells needed for initialization at driver init and suspend - * time are on. They are kept on until after the first modeset. - */ - bool initializing; - bool display_core_suspended; - int power_well_count; - - intel_wakeref_t wakeref; - - struct mutex lock; - int domain_use_count[POWER_DOMAIN_NUM]; - - struct delayed_work async_put_work; - intel_wakeref_t async_put_wakeref; - u64 async_put_domains[2]; - - struct i915_power_well *power_wells; -}; - #define MAX_L3_SLICES 2 struct intel_l3_parity { u32 *remap_info[MAX_L3_SLICES]; diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index a43d54089be3..ee6b8194a459 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h @@ -220,64 +220,6 @@ enum aux_ch { #define aux_ch_name(a) ((a) + 'A') -enum intel_display_power_domain { - POWER_DOMAIN_DISPLAY_CORE, - POWER_DOMAIN_PIPE_A, - POWER_DOMAIN_PIPE_B, - POWER_DOMAIN_PIPE_C, - POWER_DOMAIN_PIPE_A_PANEL_FITTER, - POWER_DOMAIN_PIPE_B_PANEL_FITTER, - POWER_DOMAIN_PIPE_C_PANEL_FITTER, - POWER_DOMAIN_TRANSCODER_A, - POWER_DOMAIN_TRANSCODER_B, - POWER_DOMAIN_TRANSCODER_C, - POWER_DOMAIN_TRANSCODER_EDP, - POWER_DOMAIN_TRANSCODER_EDP_VDSC, - POWER_DOMAIN_TRANSCODER_DSI_A, - POWER_DOMAIN_TRANSCODER_DSI_C, - POWER_DOMAIN_PORT_DDI_A_LANES, - POWER_DOMAIN_PORT_DDI_B_LANES, - POWER_DOMAIN_PORT_DDI_C_LANES, - POWER_DOMAIN_PORT_DDI_D_LANES, - POWER_DOMAIN_PORT_DDI_E_LANES, - POWER_DOMAIN_PORT_DDI_F_LANES, - POWER_DOMAIN_PORT_DDI_A_IO, - POWER_DOMAIN_PORT_DDI_B_IO, - POWER_DOMAIN_PORT_DDI_C_IO, - POWER_DOMAIN_PORT_DDI_D_IO, - POWER_DOMAIN_PORT_DDI_E_IO, - POWER_DOMAIN_PORT_DDI_F_IO, - POWER_DOMAIN_PORT_DSI, - POWER_DOMAIN_PORT_CRT, - POWER_DOMAIN_PORT_OTHER, - POWER_DOMAIN_VGA, - POWER_DOMAIN_AUDIO, - POWER_DOMAIN_AUX_A, - POWER_DOMAIN_AUX_B, - POWER_DOMAIN_AUX_C, - POWER_DOMAIN_AUX_D, - POWER_DOMAIN_AUX_E, - POWER_DOMAIN_AUX_F, - POWER_DOMAIN_AUX_IO_A, - POWER_DOMAIN_AUX_TBT1, - POWER_DOMAIN_AUX_TBT2, - POWER_DOMAIN_AUX_TBT3, - POWER_DOMAIN_AUX_TBT4, - POWER_DOMAIN_GMBUS, - POWER_DOMAIN_MODESET, - POWER_DOMAIN_GT_IRQ, - POWER_DOMAIN_INIT, - - POWER_DOMAIN_NUM, -}; - -#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) -#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ - ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) -#define POWER_DOMAIN_TRANSCODER(tran) \ - ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ - (tran) + POWER_DOMAIN_TRANSCODER_A) - /* Used by dp and fdi links */ struct intel_link_m_n { u32 tu; @@ -364,30 +306,6 @@ struct intel_link_m_n { list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ for_each_if((intel_connector)->base.encoder == (__encoder)) -#define for_each_power_domain(domain, mask) \ - for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ - for_each_if(BIT_ULL(domain) & (mask)) - -#define for_each_power_well(__dev_priv, __power_well) \ - for ((__power_well) = (__dev_priv)->power_domains.power_wells; \ - (__power_well) - (__dev_priv)->power_domains.power_wells < \ - (__dev_priv)->power_domains.power_well_count; \ - (__power_well)++) - -#define for_each_power_well_reverse(__dev_priv, __power_well) \ - for ((__power_well) = (__dev_priv)->power_domains.power_wells + \ - (__dev_priv)->power_domains.power_well_count - 1; \ - (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \ - (__power_well)--) - -#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \ - for_each_power_well(__dev_priv, __power_well) \ - for_each_if((__power_well)->desc->domains & (__domain_mask)) - -#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \ - for_each_power_well_reverse(__dev_priv, __power_well) \ - for_each_if((__power_well)->desc->domains & (__domain_mask)) - #define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->base.dev->mode_config.num_total_plane && \ diff --git a/drivers/gpu/drm/i915/intel_display_power.h b/drivers/gpu/drm/i915/intel_display_power.h index 95fcbe8e2346..ff57b0a7fe59 100644 --- a/drivers/gpu/drm/i915/intel_display_power.h +++ b/drivers/gpu/drm/i915/intel_display_power.h @@ -8,10 +8,202 @@ #include "intel_display.h" #include "intel_runtime_pm.h" +#include "i915_reg.h" struct drm_i915_private; struct intel_encoder; +enum intel_display_power_domain { + POWER_DOMAIN_DISPLAY_CORE, + POWER_DOMAIN_PIPE_A, + POWER_DOMAIN_PIPE_B, + POWER_DOMAIN_PIPE_C, + POWER_DOMAIN_PIPE_A_PANEL_FITTER, + POWER_DOMAIN_PIPE_B_PANEL_FITTER, + POWER_DOMAIN_PIPE_C_PANEL_FITTER, + POWER_DOMAIN_TRANSCODER_A, + POWER_DOMAIN_TRANSCODER_B, + POWER_DOMAIN_TRANSCODER_C, + POWER_DOMAIN_TRANSCODER_EDP, + POWER_DOMAIN_TRANSCODER_EDP_VDSC, + POWER_DOMAIN_TRANSCODER_DSI_A, + POWER_DOMAIN_TRANSCODER_DSI_C, + POWER_DOMAIN_PORT_DDI_A_LANES, + POWER_DOMAIN_PORT_DDI_B_LANES, + POWER_DOMAIN_PORT_DDI_C_LANES, + POWER_DOMAIN_PORT_DDI_D_LANES, + POWER_DOMAIN_PORT_DDI_E_LANES, + POWER_DOMAIN_PORT_DDI_F_LANES, + POWER_DOMAIN_PORT_DDI_A_IO, + POWER_DOMAIN_PORT_DDI_B_IO, + POWER_DOMAIN_PORT_DDI_C_IO, + POWER_DOMAIN_PORT_DDI_D_IO, + POWER_DOMAIN_PORT_DDI_E_IO, + POWER_DOMAIN_PORT_DDI_F_IO, + POWER_DOMAIN_PORT_DSI, + POWER_DOMAIN_PORT_CRT, + POWER_DOMAIN_PORT_OTHER, + POWER_DOMAIN_VGA, + POWER_DOMAIN_AUDIO, + POWER_DOMAIN_AUX_A, + POWER_DOMAIN_AUX_B, + POWER_DOMAIN_AUX_C, + POWER_DOMAIN_AUX_D, + POWER_DOMAIN_AUX_E, + POWER_DOMAIN_AUX_F, + POWER_DOMAIN_AUX_IO_A, + POWER_DOMAIN_AUX_TBT1, + POWER_DOMAIN_AUX_TBT2, + POWER_DOMAIN_AUX_TBT3, + POWER_DOMAIN_AUX_TBT4, + POWER_DOMAIN_GMBUS, + POWER_DOMAIN_MODESET, + POWER_DOMAIN_GT_IRQ, + POWER_DOMAIN_INIT, + + POWER_DOMAIN_NUM, +}; + +#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) +#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ + ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) +#define POWER_DOMAIN_TRANSCODER(tran) \ + ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ + (tran) + POWER_DOMAIN_TRANSCODER_A) + +struct i915_power_well; + +struct i915_power_well_ops { + /* + * Synchronize the well's hw state to match the current sw state, for + * example enable/disable it based on the current refcount. Called + * during driver init and resume time, possibly after first calling + * the enable/disable handlers. + */ + void (*sync_hw)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); + /* + * Enable the well and resources that depend on it (for example + * interrupts located on the well). Called after the 0->1 refcount + * transition. + */ + void (*enable)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); + /* + * Disable the well and resources that depend on it. Called after + * the 1->0 refcount transition. + */ + void (*disable)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); + /* Returns the hw enabled state. */ + bool (*is_enabled)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); +}; + +struct i915_power_well_regs { + i915_reg_t bios; + i915_reg_t driver; + i915_reg_t kvmr; + i915_reg_t debug; +}; + +/* Power well structure for haswell */ +struct i915_power_well_desc { + const char *name; + bool always_on; + u64 domains; + /* unique identifier for this power well */ + enum i915_power_well_id id; + /* + * Arbitraty data associated with this power well. Platform and power + * well specific. + */ + union { + struct { + /* + * request/status flag index in the PUNIT power well + * control/status registers. + */ + u8 idx; + } vlv; + struct { + enum dpio_phy phy; + } bxt; + struct { + const struct i915_power_well_regs *regs; + /* + * request/status flag index in the power well + * constrol/status registers. + */ + u8 idx; + /* Mask of pipes whose IRQ logic is backed by the pw */ + u8 irq_pipe_mask; + /* The pw is backing the VGA functionality */ + bool has_vga:1; + bool has_fuses:1; + /* + * The pw is for an ICL+ TypeC PHY port in + * Thunderbolt mode. + */ + bool is_tc_tbt:1; + } hsw; + }; + const struct i915_power_well_ops *ops; +}; + +struct i915_power_well { + const struct i915_power_well_desc *desc; + /* power well enable/disable usage count */ + int count; + /* cached hw enabled state */ + bool hw_enabled; +}; + +struct i915_power_domains { + /* + * Power wells needed for initialization at driver init and suspend + * time are on. They are kept on until after the first modeset. + */ + bool initializing; + bool display_core_suspended; + int power_well_count; + + intel_wakeref_t wakeref; + + struct mutex lock; + int domain_use_count[POWER_DOMAIN_NUM]; + + struct delayed_work async_put_work; + intel_wakeref_t async_put_wakeref; + u64 async_put_domains[2]; + + struct i915_power_well *power_wells; +}; + +#define for_each_power_domain(domain, mask) \ + for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ + for_each_if(BIT_ULL(domain) & (mask)) + +#define for_each_power_well(__dev_priv, __power_well) \ + for ((__power_well) = (__dev_priv)->power_domains.power_wells; \ + (__power_well) - (__dev_priv)->power_domains.power_wells < \ + (__dev_priv)->power_domains.power_well_count; \ + (__power_well)++) + +#define for_each_power_well_reverse(__dev_priv, __power_well) \ + for ((__power_well) = (__dev_priv)->power_domains.power_wells + \ + (__dev_priv)->power_domains.power_well_count - 1; \ + (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \ + (__power_well)--) + +#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \ + for_each_power_well(__dev_priv, __power_well) \ + for_each_if((__power_well)->desc->domains & (__domain_mask)) + +#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \ + for_each_power_well_reverse(__dev_priv, __power_well) \ + for_each_if((__power_well)->desc->domains & (__domain_mask)) + void skl_enable_dc6(struct drm_i915_private *dev_priv); void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv); void bxt_enable_dc9(struct drm_i915_private *dev_priv); -- cgit v1.2.3 From 7d09888ead1d90c4950c1c92b67946a214dfb91a Mon Sep 17 00:00:00 2001 From: Oleg Vasilev Date: Mon, 20 May 2019 18:06:42 +0300 Subject: drm/i915: add i2c symlink under hdmi connector MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, the i2c adapter is available only under DP connectors. Add i2c symlink under hdmi connector pointing to i2c adapter in order to make this behaviour consistent. The initial motivation was to make igt i2c subtest patch [1] work on all connectors. [1]: https://patchwork.freedesktop.org/series/60357/ v2: - Moved symlink remove to unregister (Ville) - Clarified commit message (Jani) - Changed WARN to DRM_ERROR (Jani) - Minor codestyle changes proposed by Jani v3: added blank line Cc: Arkadiusz Hiler Cc: Imre Deak Cc: Ville Syrjälä Cc: Jani Nikula Signed-off-by: Oleg Vasilev Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190520150642.3477-1-oleg.vasilev@intel.com --- drivers/gpu/drm/i915/intel_hdmi.c | 41 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 1b4f5528cbd0..097bfa504ece 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -2704,6 +2704,36 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder, chv_phy_release_cl2_override(encoder); } +static struct i2c_adapter * +intel_hdmi_get_i2c_adapter(struct drm_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + + return intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); +} + +static void intel_hdmi_create_i2c_symlink(struct drm_connector *connector) +{ + struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector); + struct kobject *i2c_kobj = &adapter->dev.kobj; + struct kobject *connector_kobj = &connector->kdev->kobj; + int ret; + + ret = sysfs_create_link(connector_kobj, i2c_kobj, i2c_kobj->name); + if (ret) + DRM_ERROR("Failed to create i2c symlink (%d)\n", ret); +} + +static void intel_hdmi_remove_i2c_symlink(struct drm_connector *connector) +{ + struct i2c_adapter *adapter = intel_hdmi_get_i2c_adapter(connector); + struct kobject *i2c_kobj = &adapter->dev.kobj; + struct kobject *connector_kobj = &connector->kdev->kobj; + + sysfs_remove_link(connector_kobj, i2c_kobj->name); +} + static int intel_hdmi_connector_register(struct drm_connector *connector) { @@ -2715,6 +2745,8 @@ intel_hdmi_connector_register(struct drm_connector *connector) i915_debugfs_connector_add(connector); + intel_hdmi_create_i2c_symlink(connector); + return ret; } @@ -2726,6 +2758,13 @@ static void intel_hdmi_destroy(struct drm_connector *connector) intel_connector_destroy(connector); } +static void intel_hdmi_connector_unregister(struct drm_connector *connector) +{ + intel_hdmi_remove_i2c_symlink(connector); + + intel_connector_unregister(connector); +} + static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .detect = intel_hdmi_detect, .force = intel_hdmi_force, @@ -2733,7 +2772,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, .late_register = intel_hdmi_connector_register, - .early_unregister = intel_connector_unregister, + .early_unregister = intel_hdmi_connector_unregister, .destroy = intel_hdmi_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, -- cgit v1.2.3 From 8b67896e3ba1c0c783a970a80a0ab1677ccf8f3a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 May 2019 22:31:19 +0300 Subject: drm/i915: Pass intel_atomic_state to cdclk funcs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass around intel_atomic_state rather than drm_atomic_state. This avoids some extra casts and annoing aliasing variables. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190517193132.8140-1-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/intel_cdclk.c | 167 ++++++++++++++++------------------- drivers/gpu/drm/i915/intel_display.c | 2 +- 3 files changed, 79 insertions(+), 92 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index eb8917df9149..a0539b837df5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -295,7 +295,7 @@ struct drm_i915_display_funcs { struct intel_crtc_state *cstate); int (*compute_global_watermarks)(struct intel_atomic_state *state); void (*update_wm)(struct intel_crtc *crtc); - int (*modeset_calc_cdclk)(struct drm_atomic_state *state); + int (*modeset_calc_cdclk)(struct intel_atomic_state *state); /* Returns the active state of the crtc, and if the crtc is active, * fills out the pipe-config with the hw state. */ bool (*get_pipe_config)(struct intel_crtc *, diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 78d9f619956c..6e712fec925f 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -2283,29 +2283,28 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) return min_cdclk; } -static int intel_compute_min_cdclk(struct drm_atomic_state *state) +static int intel_compute_min_cdclk(struct intel_atomic_state *state) { - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_i915_private *dev_priv = to_i915(state->dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; int min_cdclk, i; enum pipe pipe; - memcpy(intel_state->min_cdclk, dev_priv->min_cdclk, - sizeof(intel_state->min_cdclk)); + memcpy(state->min_cdclk, dev_priv->min_cdclk, + sizeof(state->min_cdclk)); - for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); if (min_cdclk < 0) return min_cdclk; - intel_state->min_cdclk[i] = min_cdclk; + state->min_cdclk[i] = min_cdclk; } - min_cdclk = intel_state->cdclk.force_min_cdclk; + min_cdclk = state->cdclk.force_min_cdclk; for_each_pipe(dev_priv, pipe) - min_cdclk = max(intel_state->min_cdclk[pipe], min_cdclk); + min_cdclk = max(state->min_cdclk[pipe], min_cdclk); return min_cdclk; } @@ -2347,10 +2346,9 @@ static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state) return min_voltage_level; } -static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state) +static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->dev); - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); int min_cdclk, cdclk; min_cdclk = intel_compute_min_cdclk(state); @@ -2359,28 +2357,25 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state) cdclk = vlv_calc_cdclk(dev_priv, min_cdclk); - intel_state->cdclk.logical.cdclk = cdclk; - intel_state->cdclk.logical.voltage_level = + state->cdclk.logical.cdclk = cdclk; + state->cdclk.logical.voltage_level = vlv_calc_voltage_level(dev_priv, cdclk); - if (!intel_state->active_crtcs) { - cdclk = vlv_calc_cdclk(dev_priv, - intel_state->cdclk.force_min_cdclk); + if (!state->active_crtcs) { + cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); - intel_state->cdclk.actual.cdclk = cdclk; - intel_state->cdclk.actual.voltage_level = + state->cdclk.actual.cdclk = cdclk; + state->cdclk.actual.voltage_level = vlv_calc_voltage_level(dev_priv, cdclk); } else { - intel_state->cdclk.actual = - intel_state->cdclk.logical; + state->cdclk.actual = state->cdclk.logical; } return 0; } -static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state) +static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state) { - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); int min_cdclk, cdclk; min_cdclk = intel_compute_min_cdclk(state); @@ -2393,36 +2388,35 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state) */ cdclk = bdw_calc_cdclk(min_cdclk); - intel_state->cdclk.logical.cdclk = cdclk; - intel_state->cdclk.logical.voltage_level = + state->cdclk.logical.cdclk = cdclk; + state->cdclk.logical.voltage_level = bdw_calc_voltage_level(cdclk); - if (!intel_state->active_crtcs) { - cdclk = bdw_calc_cdclk(intel_state->cdclk.force_min_cdclk); + if (!state->active_crtcs) { + cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk); - intel_state->cdclk.actual.cdclk = cdclk; - intel_state->cdclk.actual.voltage_level = + state->cdclk.actual.cdclk = cdclk; + state->cdclk.actual.voltage_level = bdw_calc_voltage_level(cdclk); } else { - intel_state->cdclk.actual = - intel_state->cdclk.logical; + state->cdclk.actual = state->cdclk.logical; } return 0; } -static int skl_dpll0_vco(struct intel_atomic_state *intel_state) +static int skl_dpll0_vco(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; int vco, i; - vco = intel_state->cdclk.logical.vco; + vco = state->cdclk.logical.vco; if (!vco) vco = dev_priv->skl_preferred_vco_freq; - for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { if (!crtc_state->base.enable) continue; @@ -2447,16 +2441,15 @@ static int skl_dpll0_vco(struct intel_atomic_state *intel_state) return vco; } -static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) +static int skl_modeset_calc_cdclk(struct intel_atomic_state *state) { - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); int min_cdclk, cdclk, vco; min_cdclk = intel_compute_min_cdclk(state); if (min_cdclk < 0) return min_cdclk; - vco = skl_dpll0_vco(intel_state); + vco = skl_dpll0_vco(state); /* * FIXME should also account for plane ratio @@ -2464,30 +2457,28 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) */ cdclk = skl_calc_cdclk(min_cdclk, vco); - intel_state->cdclk.logical.vco = vco; - intel_state->cdclk.logical.cdclk = cdclk; - intel_state->cdclk.logical.voltage_level = + state->cdclk.logical.vco = vco; + state->cdclk.logical.cdclk = cdclk; + state->cdclk.logical.voltage_level = skl_calc_voltage_level(cdclk); - if (!intel_state->active_crtcs) { - cdclk = skl_calc_cdclk(intel_state->cdclk.force_min_cdclk, vco); + if (!state->active_crtcs) { + cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco); - intel_state->cdclk.actual.vco = vco; - intel_state->cdclk.actual.cdclk = cdclk; - intel_state->cdclk.actual.voltage_level = + state->cdclk.actual.vco = vco; + state->cdclk.actual.cdclk = cdclk; + state->cdclk.actual.voltage_level = skl_calc_voltage_level(cdclk); } else { - intel_state->cdclk.actual = - intel_state->cdclk.logical; + state->cdclk.actual = state->cdclk.logical; } return 0; } -static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state) +static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->dev); - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); int min_cdclk, cdclk, vco; min_cdclk = intel_compute_min_cdclk(state); @@ -2502,36 +2493,34 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state) vco = bxt_de_pll_vco(dev_priv, cdclk); } - intel_state->cdclk.logical.vco = vco; - intel_state->cdclk.logical.cdclk = cdclk; - intel_state->cdclk.logical.voltage_level = + state->cdclk.logical.vco = vco; + state->cdclk.logical.cdclk = cdclk; + state->cdclk.logical.voltage_level = bxt_calc_voltage_level(cdclk); - if (!intel_state->active_crtcs) { + if (!state->active_crtcs) { if (IS_GEMINILAKE(dev_priv)) { - cdclk = glk_calc_cdclk(intel_state->cdclk.force_min_cdclk); + cdclk = glk_calc_cdclk(state->cdclk.force_min_cdclk); vco = glk_de_pll_vco(dev_priv, cdclk); } else { - cdclk = bxt_calc_cdclk(intel_state->cdclk.force_min_cdclk); + cdclk = bxt_calc_cdclk(state->cdclk.force_min_cdclk); vco = bxt_de_pll_vco(dev_priv, cdclk); } - intel_state->cdclk.actual.vco = vco; - intel_state->cdclk.actual.cdclk = cdclk; - intel_state->cdclk.actual.voltage_level = + state->cdclk.actual.vco = vco; + state->cdclk.actual.cdclk = cdclk; + state->cdclk.actual.voltage_level = bxt_calc_voltage_level(cdclk); } else { - intel_state->cdclk.actual = - intel_state->cdclk.logical; + state->cdclk.actual = state->cdclk.logical; } return 0; } -static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state) +static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->dev); - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); int min_cdclk, cdclk, vco; min_cdclk = intel_compute_min_cdclk(state); @@ -2541,33 +2530,31 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state) cdclk = cnl_calc_cdclk(min_cdclk); vco = cnl_cdclk_pll_vco(dev_priv, cdclk); - intel_state->cdclk.logical.vco = vco; - intel_state->cdclk.logical.cdclk = cdclk; - intel_state->cdclk.logical.voltage_level = + state->cdclk.logical.vco = vco; + state->cdclk.logical.cdclk = cdclk; + state->cdclk.logical.voltage_level = max(cnl_calc_voltage_level(cdclk), - cnl_compute_min_voltage_level(intel_state)); + cnl_compute_min_voltage_level(state)); - if (!intel_state->active_crtcs) { - cdclk = cnl_calc_cdclk(intel_state->cdclk.force_min_cdclk); + if (!state->active_crtcs) { + cdclk = cnl_calc_cdclk(state->cdclk.force_min_cdclk); vco = cnl_cdclk_pll_vco(dev_priv, cdclk); - intel_state->cdclk.actual.vco = vco; - intel_state->cdclk.actual.cdclk = cdclk; - intel_state->cdclk.actual.voltage_level = + state->cdclk.actual.vco = vco; + state->cdclk.actual.cdclk = cdclk; + state->cdclk.actual.voltage_level = cnl_calc_voltage_level(cdclk); } else { - intel_state->cdclk.actual = - intel_state->cdclk.logical; + state->cdclk.actual = state->cdclk.logical; } return 0; } -static int icl_modeset_calc_cdclk(struct drm_atomic_state *state) +static int icl_modeset_calc_cdclk(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->dev); - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - unsigned int ref = intel_state->cdclk.logical.ref; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + unsigned int ref = state->cdclk.logical.ref; int min_cdclk, cdclk, vco; min_cdclk = intel_compute_min_cdclk(state); @@ -2577,22 +2564,22 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state) cdclk = icl_calc_cdclk(min_cdclk, ref); vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk); - intel_state->cdclk.logical.vco = vco; - intel_state->cdclk.logical.cdclk = cdclk; - intel_state->cdclk.logical.voltage_level = + state->cdclk.logical.vco = vco; + state->cdclk.logical.cdclk = cdclk; + state->cdclk.logical.voltage_level = max(icl_calc_voltage_level(cdclk), - cnl_compute_min_voltage_level(intel_state)); + cnl_compute_min_voltage_level(state)); - if (!intel_state->active_crtcs) { - cdclk = icl_calc_cdclk(intel_state->cdclk.force_min_cdclk, ref); + if (!state->active_crtcs) { + cdclk = icl_calc_cdclk(state->cdclk.force_min_cdclk, ref); vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk); - intel_state->cdclk.actual.vco = vco; - intel_state->cdclk.actual.cdclk = cdclk; - intel_state->cdclk.actual.voltage_level = + state->cdclk.actual.vco = vco; + state->cdclk.actual.cdclk = cdclk; + state->cdclk.actual.voltage_level = icl_calc_voltage_level(cdclk); } else { - intel_state->cdclk.actual = intel_state->cdclk.logical; + state->cdclk.actual = state->cdclk.logical; } return 0; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c3e2b1178d55..45f01178c6b7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13271,7 +13271,7 @@ static int intel_modeset_checks(struct drm_atomic_state *state) if (dev_priv->display.modeset_calc_cdclk) { enum pipe pipe; - ret = dev_priv->display.modeset_calc_cdclk(state); + ret = dev_priv->display.modeset_calc_cdclk(intel_state); if (ret < 0) return ret; -- cgit v1.2.3 From 3d51b48fd16bd5b103613e94da95f87296e716f3 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 May 2019 22:31:20 +0300 Subject: drm/i915: Clean up cdclk vfunc assignments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thanks to using the short names for platoforms all the cdclk vfunc assignemtns now fit within 80 cols. Remove the extra line wraps. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190517193132.8140-2-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_cdclk.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 6e712fec925f..6988c6cbc362 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -2801,28 +2801,22 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk; } else if (IS_CANNONLAKE(dev_priv)) { dev_priv->display.set_cdclk = cnl_set_cdclk; - dev_priv->display.modeset_calc_cdclk = - cnl_modeset_calc_cdclk; + dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk; } else if (IS_GEN9_LP(dev_priv)) { dev_priv->display.set_cdclk = bxt_set_cdclk; - dev_priv->display.modeset_calc_cdclk = - bxt_modeset_calc_cdclk; + dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; } else if (IS_GEN9_BC(dev_priv)) { dev_priv->display.set_cdclk = skl_set_cdclk; - dev_priv->display.modeset_calc_cdclk = - skl_modeset_calc_cdclk; + dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk; } else if (IS_BROADWELL(dev_priv)) { dev_priv->display.set_cdclk = bdw_set_cdclk; - dev_priv->display.modeset_calc_cdclk = - bdw_modeset_calc_cdclk; + dev_priv->display.modeset_calc_cdclk = bdw_modeset_calc_cdclk; } else if (IS_CHERRYVIEW(dev_priv)) { dev_priv->display.set_cdclk = chv_set_cdclk; - dev_priv->display.modeset_calc_cdclk = - vlv_modeset_calc_cdclk; + dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk; } else if (IS_VALLEYVIEW(dev_priv)) { dev_priv->display.set_cdclk = vlv_set_cdclk; - dev_priv->display.modeset_calc_cdclk = - vlv_modeset_calc_cdclk; + dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk; } if (INTEL_GEN(dev_priv) >= 11) -- cgit v1.2.3 From 85829eb5ee1afc673befcf9ab8a4043e72df4ad2 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 May 2019 22:31:21 +0300 Subject: drm/i915: Pass intel_atomic state to check_digital_port_conflicts() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass around intel_atomic_state rather than drm_atomic_state. This avoids some extra casts and annoing aliasing variables. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190517193132.8140-3-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 45f01178c6b7..b628524bed5e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11917,9 +11917,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, } } -static bool check_digital_port_conflicts(struct drm_atomic_state *state) +static bool check_digital_port_conflicts(struct intel_atomic_state *state) { - struct drm_device *dev = state->dev; + struct drm_device *dev = state->base.dev; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; unsigned int used_ports = 0; @@ -11936,7 +11936,9 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state) struct drm_connector_state *connector_state; struct intel_encoder *encoder; - connector_state = drm_atomic_get_new_connector_state(state, connector); + connector_state = + drm_atomic_get_new_connector_state(&state->base, + connector); if (!connector_state) connector_state = connector->state; @@ -13235,7 +13237,7 @@ static int intel_modeset_checks(struct drm_atomic_state *state) struct drm_crtc_state *old_crtc_state, *new_crtc_state; int ret = 0, i; - if (!check_digital_port_conflicts(state)) { + if (!check_digital_port_conflicts(intel_state)) { DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); return -EINVAL; } -- cgit v1.2.3 From c3b1e6c67dd2d03ba326c7b47e72869a0624d1eb Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 May 2019 22:31:22 +0300 Subject: drm/i915: Use intel_ types in intel_modeset_clear_plls() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass around intel_atomic_state rather than drm_atomic_state. This avoids some extra casts and annoing aliasing variables. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190517193132.8140-4-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b628524bed5e..c47e827e3835 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13087,31 +13087,30 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state) crtc->scanline_offset = 1; } -static void intel_modeset_clear_plls(struct drm_atomic_state *state) +static void intel_modeset_clear_plls(struct intel_atomic_state *state) { - struct drm_device *dev = state->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_crtc *crtc; - struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *old_crtc_state, *new_crtc_state; + struct intel_crtc *crtc; int i; if (!dev_priv->display.crtc_compute_clock) return; - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { struct intel_shared_dpll *old_dpll = - to_intel_crtc_state(old_crtc_state)->shared_dpll; + old_crtc_state->shared_dpll; - if (!needs_modeset(new_crtc_state)) + if (!needs_modeset(&new_crtc_state->base)) continue; - to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL; + new_crtc_state->shared_dpll = NULL; if (!old_dpll) continue; - intel_release_shared_dpll(old_dpll, intel_crtc, state); + intel_release_shared_dpll(old_dpll, crtc, &state->base); } } @@ -13329,7 +13328,7 @@ static int intel_modeset_checks(struct drm_atomic_state *state) intel_state->cdclk.actual.voltage_level); } - intel_modeset_clear_plls(state); + intel_modeset_clear_plls(intel_state); if (IS_HASWELL(dev_priv)) return haswell_mode_set_planes_workaround(state); -- cgit v1.2.3 From bca0bfa31c1baccf4a3b7987241ab2cf85596b83 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 May 2019 22:31:23 +0300 Subject: drm/i915: Use intel_ types in haswell_mode_set_planes_workaround() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass around intel_atomic_state rather than drm_atomic_state. This avoids some extra casts and annoing aliasing variables. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190517193132.8140-5-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 40 ++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c47e827e3835..791a28e7ecbe 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13120,29 +13120,27 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state) * multiple pipes, and planes are enabled after the pipe, we need to wait at * least 2 vblanks on the first pipe before enabling planes on the second pipe. */ -static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state) +static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) { - struct drm_crtc_state *crtc_state; - struct intel_crtc *intel_crtc; - struct drm_crtc *crtc; + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; struct intel_crtc_state *first_crtc_state = NULL; struct intel_crtc_state *other_crtc_state = NULL; enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; int i; /* look at all crtc's that are going to be enabled in during modeset */ - for_each_new_crtc_in_state(state, crtc, crtc_state, i) { - intel_crtc = to_intel_crtc(crtc); - - if (!crtc_state->active || !needs_modeset(crtc_state)) + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + if (!crtc_state->base.active || + !needs_modeset(&crtc_state->base)) continue; if (first_crtc_state) { - other_crtc_state = to_intel_crtc_state(crtc_state); + other_crtc_state = crtc_state; break; } else { - first_crtc_state = to_intel_crtc_state(crtc_state); - first_pipe = intel_crtc->pipe; + first_crtc_state = crtc_state; + first_pipe = crtc->pipe; } } @@ -13151,24 +13149,22 @@ static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state) return 0; /* w/a possibly needed, check how many crtc's are already enabled. */ - for_each_intel_crtc(state->dev, intel_crtc) { - struct intel_crtc_state *pipe_config; - - pipe_config = intel_atomic_get_crtc_state(state, intel_crtc); - if (IS_ERR(pipe_config)) - return PTR_ERR(pipe_config); + for_each_intel_crtc(state->base.dev, crtc) { + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); - pipe_config->hsw_workaround_pipe = INVALID_PIPE; + crtc_state->hsw_workaround_pipe = INVALID_PIPE; - if (!pipe_config->base.active || - needs_modeset(&pipe_config->base)) + if (!crtc_state->base.active || + needs_modeset(&crtc_state->base)) continue; /* 2 or more enabled crtcs means no need for w/a */ if (enabled_pipe != INVALID_PIPE) return 0; - enabled_pipe = intel_crtc->pipe; + enabled_pipe = crtc->pipe; } if (enabled_pipe != INVALID_PIPE) @@ -13331,7 +13327,7 @@ static int intel_modeset_checks(struct drm_atomic_state *state) intel_modeset_clear_plls(intel_state); if (IS_HASWELL(dev_priv)) - return haswell_mode_set_planes_workaround(state); + return haswell_mode_set_planes_workaround(intel_state); return 0; } -- cgit v1.2.3 From 1b9994c789779fb53ca60e11fb6e67dac77d08d2 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 May 2019 22:31:24 +0300 Subject: drm/i915: Don't pass the crtc to intel_dump_pipe_config() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We already pass the crtc's state to intel_dump_pipe_config() so passing the crtc as well is redundant. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190517193132.8140-6-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 791a28e7ecbe..04fb75af48b2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11793,10 +11793,10 @@ static const char *output_formats(enum intel_output_format format) return output_format_str[format]; } -static void intel_dump_pipe_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config, +static void intel_dump_pipe_config(struct intel_crtc_state *pipe_config, const char *context) { + struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_plane *plane; @@ -12903,10 +12903,8 @@ verify_crtc_state(struct drm_crtc *crtc, if (!intel_pipe_config_compare(dev_priv, sw_config, pipe_config, false)) { I915_STATE_WARN(1, "pipe state doesn't match!\n"); - intel_dump_pipe_config(intel_crtc, pipe_config, - "[hw state]"); - intel_dump_pipe_config(intel_crtc, sw_config, - "[sw state]"); + intel_dump_pipe_config(pipe_config, "[hw state]"); + intel_dump_pipe_config(sw_config, "[sw state]"); } } @@ -13392,8 +13390,7 @@ static int intel_atomic_check(struct drm_device *dev, if (ret == -EDEADLK) return ret; if (ret) { - intel_dump_pipe_config(to_intel_crtc(crtc), - pipe_config, "[failed]"); + intel_dump_pipe_config(pipe_config, "[failed]"); return ret; } @@ -13407,7 +13404,7 @@ static int intel_atomic_check(struct drm_device *dev, if (needs_modeset(crtc_state)) any_ms = true; - intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, + intel_dump_pipe_config(pipe_config, needs_modeset(crtc_state) ? "[modeset]" : "[fastset]"); } @@ -16668,8 +16665,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, for_each_intel_crtc(&dev_priv->drm, crtc) { crtc_state = to_intel_crtc_state(crtc->base.state); intel_sanitize_crtc(crtc, ctx); - intel_dump_pipe_config(crtc, crtc_state, - "[setup_hw_state]"); + intel_dump_pipe_config(crtc_state, "[setup_hw_state]"); } intel_modeset_update_connector_atomic_state(dev); -- cgit v1.2.3 From f239b7998507419a46815d6df8c17bdddc04d8c3 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 May 2019 22:31:25 +0300 Subject: drm/i915: Don't pass the crtc to intel_modeset_pipe_config() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We already pass the crtc's state to intel_modeset_pipe_config() so passing the crtc as well is redundant. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190517193132.8140-7-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 04fb75af48b2..107206429745 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12017,9 +12017,9 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state) } static int -intel_modeset_pipe_config(struct drm_crtc *crtc, - struct intel_crtc_state *pipe_config) +intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) { + struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_atomic_state *state = pipe_config->base.state; struct intel_encoder *encoder; struct drm_connector *connector; @@ -13386,7 +13386,7 @@ static int intel_atomic_check(struct drm_device *dev, continue; } - ret = intel_modeset_pipe_config(crtc, pipe_config); + ret = intel_modeset_pipe_config(pipe_config); if (ret == -EDEADLK) return ret; if (ret) { -- cgit v1.2.3 From 5643dd9c7af4afc1ade0040ee9e23e34eea37973 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 May 2019 22:31:26 +0300 Subject: drm/i915: Use intel_ types in intel_modeset_checks() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Switch to using intel_ types instead of drm_ types. Avoids ugly casts and nasty aliasing variables with different types. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190517193132.8140-8-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 77 ++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 39 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 107206429745..bc9db3aa5e32 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13222,38 +13222,37 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state) return 0; } -static int intel_modeset_checks(struct drm_atomic_state *state) +static int intel_modeset_checks(struct intel_atomic_state *state) { - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_i915_private *dev_priv = to_i915(state->dev); - struct drm_crtc *crtc; - struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *old_crtc_state, *new_crtc_state; + struct intel_crtc *crtc; int ret = 0, i; - if (!check_digital_port_conflicts(intel_state)) { + if (!check_digital_port_conflicts(state)) { DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); return -EINVAL; } /* keep the current setting */ - if (!intel_state->cdclk.force_min_cdclk_changed) - intel_state->cdclk.force_min_cdclk = - dev_priv->cdclk.force_min_cdclk; + if (!state->cdclk.force_min_cdclk_changed) + state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk; - intel_state->modeset = true; - intel_state->active_crtcs = dev_priv->active_crtcs; - intel_state->cdclk.logical = dev_priv->cdclk.logical; - intel_state->cdclk.actual = dev_priv->cdclk.actual; - intel_state->cdclk.pipe = INVALID_PIPE; + state->modeset = true; + state->active_crtcs = dev_priv->active_crtcs; + state->cdclk.logical = dev_priv->cdclk.logical; + state->cdclk.actual = dev_priv->cdclk.actual; + state->cdclk.pipe = INVALID_PIPE; - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (new_crtc_state->active) - intel_state->active_crtcs |= 1 << i; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (new_crtc_state->base.active) + state->active_crtcs |= 1 << i; else - intel_state->active_crtcs &= ~(1 << i); + state->active_crtcs &= ~(1 << i); - if (old_crtc_state->active != new_crtc_state->active) - intel_state->active_pipe_changes |= drm_crtc_mask(crtc); + if (old_crtc_state->base.active != new_crtc_state->base.active) + state->active_pipe_changes |= drm_crtc_mask(&crtc->base); } /* @@ -13266,7 +13265,7 @@ static int intel_modeset_checks(struct drm_atomic_state *state) if (dev_priv->display.modeset_calc_cdclk) { enum pipe pipe; - ret = dev_priv->display.modeset_calc_cdclk(intel_state); + ret = dev_priv->display.modeset_calc_cdclk(state); if (ret < 0) return ret; @@ -13276,19 +13275,19 @@ static int intel_modeset_checks(struct drm_atomic_state *state) * touching the hardware */ if (intel_cdclk_changed(&dev_priv->cdclk.logical, - &intel_state->cdclk.logical)) { - ret = intel_lock_all_pipes(state); + &state->cdclk.logical)) { + ret = intel_lock_all_pipes(&state->base); if (ret < 0) return ret; } - if (is_power_of_2(intel_state->active_crtcs)) { + if (is_power_of_2(state->active_crtcs)) { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; - pipe = ilog2(intel_state->active_crtcs); + pipe = ilog2(state->active_crtcs); crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base; - crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc); if (crtc_state && needs_modeset(crtc_state)) pipe = INVALID_PIPE; } else { @@ -13299,33 +13298,33 @@ static int intel_modeset_checks(struct drm_atomic_state *state) if (pipe != INVALID_PIPE && intel_cdclk_needs_cd2x_update(dev_priv, &dev_priv->cdclk.actual, - &intel_state->cdclk.actual)) { - ret = intel_lock_all_pipes(state); + &state->cdclk.actual)) { + ret = intel_lock_all_pipes(&state->base); if (ret < 0) return ret; - intel_state->cdclk.pipe = pipe; + state->cdclk.pipe = pipe; } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual, - &intel_state->cdclk.actual)) { - ret = intel_modeset_all_pipes(state); + &state->cdclk.actual)) { + ret = intel_modeset_all_pipes(&state->base); if (ret < 0) return ret; - intel_state->cdclk.pipe = INVALID_PIPE; + state->cdclk.pipe = INVALID_PIPE; } DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n", - intel_state->cdclk.logical.cdclk, - intel_state->cdclk.actual.cdclk); + state->cdclk.logical.cdclk, + state->cdclk.actual.cdclk); DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n", - intel_state->cdclk.logical.voltage_level, - intel_state->cdclk.actual.voltage_level); + state->cdclk.logical.voltage_level, + state->cdclk.actual.voltage_level); } - intel_modeset_clear_plls(intel_state); + intel_modeset_clear_plls(state); if (IS_HASWELL(dev_priv)) - return haswell_mode_set_planes_workaround(intel_state); + return haswell_mode_set_planes_workaround(state); return 0; } @@ -13414,7 +13413,7 @@ static int intel_atomic_check(struct drm_device *dev, return ret; if (any_ms) { - ret = intel_modeset_checks(state); + ret = intel_modeset_checks(intel_state); if (ret) return ret; -- cgit v1.2.3 From 9a86a07c7e942c25954b9f71010af9aa6cad3997 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 May 2019 22:31:27 +0300 Subject: drm/i915: Use intel_ types in intel_atomic_check() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Switch to using intel_ types instead of drm_ types. Avoids ugly casts and nasty aliasing variables with different types. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190517193132.8140-9-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 68 +++++++++++++++++------------------- 1 file changed, 32 insertions(+), 36 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index bc9db3aa5e32..7ce61ecca78b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13352,89 +13352,85 @@ static int calc_watermark_data(struct intel_atomic_state *state) * @state: state to validate */ static int intel_atomic_check(struct drm_device *dev, - struct drm_atomic_state *state) + struct drm_atomic_state *_state) { struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_crtc *crtc; - struct drm_crtc_state *old_crtc_state, *crtc_state; + struct intel_atomic_state *state = to_intel_atomic_state(_state); + struct intel_crtc_state *old_crtc_state, *new_crtc_state; + struct intel_crtc *crtc; int ret, i; - bool any_ms = intel_state->cdclk.force_min_cdclk_changed; + bool any_ms = state->cdclk.force_min_cdclk_changed; /* Catch I915_MODE_FLAG_INHERITED */ - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, - crtc_state, i) { - if (crtc_state->mode.private_flags != - old_crtc_state->mode.private_flags) - crtc_state->mode_changed = true; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (new_crtc_state->base.mode.private_flags != + old_crtc_state->base.mode.private_flags) + new_crtc_state->base.mode_changed = true; } - ret = drm_atomic_helper_check_modeset(dev, state); + ret = drm_atomic_helper_check_modeset(dev, &state->base); if (ret) return ret; - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) { - struct intel_crtc_state *pipe_config = - to_intel_crtc_state(crtc_state); - - if (!needs_modeset(crtc_state)) + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (!needs_modeset(&new_crtc_state->base)) continue; - if (!crtc_state->enable) { + if (!new_crtc_state->base.enable) { any_ms = true; continue; } - ret = intel_modeset_pipe_config(pipe_config); + ret = intel_modeset_pipe_config(new_crtc_state); if (ret == -EDEADLK) return ret; if (ret) { - intel_dump_pipe_config(pipe_config, "[failed]"); + intel_dump_pipe_config(new_crtc_state, "[failed]"); return ret; } - if (intel_pipe_config_compare(dev_priv, - to_intel_crtc_state(old_crtc_state), - pipe_config, true)) { - crtc_state->mode_changed = false; - pipe_config->update_pipe = true; + if (intel_pipe_config_compare(dev_priv, old_crtc_state, + new_crtc_state, true)) { + new_crtc_state->base.mode_changed = false; + new_crtc_state->update_pipe = true; } - if (needs_modeset(crtc_state)) + if (needs_modeset(&new_crtc_state->base)) any_ms = true; - intel_dump_pipe_config(pipe_config, - needs_modeset(crtc_state) ? + intel_dump_pipe_config(new_crtc_state, + needs_modeset(&new_crtc_state->base) ? "[modeset]" : "[fastset]"); } - ret = drm_dp_mst_atomic_check(state); + ret = drm_dp_mst_atomic_check(&state->base); if (ret) return ret; if (any_ms) { - ret = intel_modeset_checks(intel_state); - + ret = intel_modeset_checks(state); if (ret) return ret; } else { - intel_state->cdclk.logical = dev_priv->cdclk.logical; + state->cdclk.logical = dev_priv->cdclk.logical; } - ret = icl_add_linked_planes(intel_state); + ret = icl_add_linked_planes(state); if (ret) return ret; - ret = drm_atomic_helper_check_planes(dev, state); + ret = drm_atomic_helper_check_planes(dev, &state->base); if (ret) return ret; - intel_fbc_choose_crtc(dev_priv, intel_state); - ret = calc_watermark_data(intel_state); + intel_fbc_choose_crtc(dev_priv, state); + ret = calc_watermark_data(state); if (ret) return ret; - ret = intel_bw_atomic_check(intel_state); + ret = intel_bw_atomic_check(state); if (ret) return ret; -- cgit v1.2.3 From a0e701041c958a6667e9bdd050f06ffb2d4de1da Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 May 2019 22:31:28 +0300 Subject: drm/i915: Move state dump to the end of atomic_check() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we're dumping the crtc states before they have been fully calculated. Move the dumping to the end of .atomic_check() so we get a fully up to date dump. Let's also do the dump for fully disabled pipes, but we'll limit that to just saying that the pipe is disabled since the rest of the state is going to be nonsense in that case. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190517193132.8140-10-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 7ce61ecca78b..b7fc89986793 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11805,8 +11805,9 @@ static void intel_dump_pipe_config(struct intel_crtc_state *pipe_config, struct drm_framebuffer *fb; char buf[64]; - DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n", - crtc->base.base.id, crtc->base.name, context); + DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n", + crtc->base.base.id, crtc->base.name, + yesno(pipe_config->base.enable), context); snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); DRM_DEBUG_KMS("output_types: %s (0x%x)\n", @@ -13399,10 +13400,6 @@ static int intel_atomic_check(struct drm_device *dev, if (needs_modeset(&new_crtc_state->base)) any_ms = true; - - intel_dump_pipe_config(new_crtc_state, - needs_modeset(&new_crtc_state->base) ? - "[modeset]" : "[fastset]"); } ret = drm_dp_mst_atomic_check(&state->base); @@ -13434,6 +13431,17 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) return ret; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (!needs_modeset(&new_crtc_state->base) && + !new_crtc_state->update_pipe) + continue; + + intel_dump_pipe_config(new_crtc_state, + needs_modeset(&new_crtc_state->base) ? + "[modeset]" : "[fastset]"); + } + return 0; } -- cgit v1.2.3 From 64f6dbabf79ad99f073f3eb9d33481001100ef43 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 May 2019 22:31:29 +0300 Subject: drm/i915: Include crtc_state.active in crtc state dumps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we're not dumping out whether the crtc is actually active or in dpms off state. Let's include that in the dumps. And while at it compress out a few lines from the state dump. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190517193132.8140-11-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b7fc89986793..048cb8d7ae65 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11810,10 +11810,9 @@ static void intel_dump_pipe_config(struct intel_crtc_state *pipe_config, yesno(pipe_config->base.enable), context); snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); - DRM_DEBUG_KMS("output_types: %s (0x%x)\n", - buf, pipe_config->output_types); - - DRM_DEBUG_KMS("output format: %s\n", + DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n", + yesno(pipe_config->base.active), + buf, pipe_config->output_types, output_formats(pipe_config->output_format)); DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", @@ -11834,10 +11833,8 @@ static void intel_dump_pipe_config(struct intel_crtc_state *pipe_config, &pipe_config->dp_m2_n2); } - DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", - pipe_config->has_audio, pipe_config->has_infoframe); - - DRM_DEBUG_KMS("infoframes enabled: 0x%x\n", + DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", + pipe_config->has_audio, pipe_config->has_infoframe, pipe_config->infoframes.enable); if (pipe_config->infoframes.enable & -- cgit v1.2.3 From 2833920d0ea07f67959940547eb4e6cdccc0f9a4 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 May 2019 22:31:30 +0300 Subject: drm/i915: Dump failed crtc states during atomic check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we're only dumping the failed crtc state if intel_modeset_pipe_config() fails. Let's do the state dump if anything else fails afterwards. The downside is that we lose the immediate knowledge which crtc caused the failure (unless a lower level function indicates it with an additional debug print) but having the full state dumped seems like something that could be beneficial. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190517193132.8140-12-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 048cb8d7ae65..240e5f1b33ad 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13369,7 +13369,7 @@ static int intel_atomic_check(struct drm_device *dev, ret = drm_atomic_helper_check_modeset(dev, &state->base); if (ret) - return ret; + goto fail; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { @@ -13382,12 +13382,8 @@ static int intel_atomic_check(struct drm_device *dev, } ret = intel_modeset_pipe_config(new_crtc_state); - if (ret == -EDEADLK) - return ret; - if (ret) { - intel_dump_pipe_config(new_crtc_state, "[failed]"); - return ret; - } + if (ret) + goto fail; if (intel_pipe_config_compare(dev_priv, old_crtc_state, new_crtc_state, true)) { @@ -13401,32 +13397,32 @@ static int intel_atomic_check(struct drm_device *dev, ret = drm_dp_mst_atomic_check(&state->base); if (ret) - return ret; + goto fail; if (any_ms) { ret = intel_modeset_checks(state); if (ret) - return ret; + goto fail; } else { state->cdclk.logical = dev_priv->cdclk.logical; } ret = icl_add_linked_planes(state); if (ret) - return ret; + goto fail; ret = drm_atomic_helper_check_planes(dev, &state->base); if (ret) - return ret; + goto fail; intel_fbc_choose_crtc(dev_priv, state); ret = calc_watermark_data(state); if (ret) - return ret; + goto fail; ret = intel_bw_atomic_check(state); if (ret) - return ret; + goto fail; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { @@ -13440,6 +13436,20 @@ static int intel_atomic_check(struct drm_device *dev, } return 0; + + fail: + if (ret == -EDEADLK) + return ret; + + /* + * FIXME would probably be nice to know which crtc specifically + * caused the failure, in cases where we can pinpoint it. + */ + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) + intel_dump_pipe_config(new_crtc_state, "[failed]"); + + return ret; } static int intel_atomic_prepare_commit(struct drm_device *dev, -- cgit v1.2.3 From 926878fba5d8ea9f141e60b846336b4de1a0aaf3 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 May 2019 22:31:31 +0300 Subject: drm/i915: Make state dumpers take a const state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Constify a bunch of the arguments of various state dumping functions. Makes it clear they don't mutate the states. And fix up some indent fails while at it. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190517193132.8140-13-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 20 +++++++++++--------- drivers/gpu/drm/i915/intel_dpll_mgr.c | 16 ++++++++-------- drivers/gpu/drm/i915/intel_dpll_mgr.h | 2 +- 3 files changed, 20 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 240e5f1b33ad..35d1a69628e2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11705,17 +11705,19 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, static void intel_dump_crtc_timings(const struct drm_display_mode *mode) { DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " - "type: 0x%x flags: 0x%x\n", - mode->crtc_clock, - mode->crtc_hdisplay, mode->crtc_hsync_start, - mode->crtc_hsync_end, mode->crtc_htotal, - mode->crtc_vdisplay, mode->crtc_vsync_start, - mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); + "type: 0x%x flags: 0x%x\n", + mode->crtc_clock, + mode->crtc_hdisplay, mode->crtc_hsync_start, + mode->crtc_hsync_end, mode->crtc_htotal, + mode->crtc_vdisplay, mode->crtc_vsync_start, + mode->crtc_vsync_end, mode->crtc_vtotal, + mode->type, mode->flags); } static inline void -intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id, - unsigned int lane_count, struct intel_link_m_n *m_n) +intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, + const char *id, unsigned int lane_count, + const struct intel_link_m_n *m_n) { DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", id, lane_count, @@ -11793,7 +11795,7 @@ static const char *output_formats(enum intel_output_format format) return output_format_str[format]; } -static void intel_dump_pipe_config(struct intel_crtc_state *pipe_config, +static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, const char *context) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 897d93537414..69787f259677 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -454,7 +454,7 @@ ibx_get_dpll(struct intel_crtc_state *crtc_state, } static void ibx_dump_hw_state(struct drm_i915_private *dev_priv, - struct intel_dpll_hw_state *hw_state) + const struct intel_dpll_hw_state *hw_state) { DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " "fp0: 0x%x, fp1: 0x%x\n", @@ -856,7 +856,7 @@ hsw_get_dpll(struct intel_crtc_state *crtc_state, } static void hsw_dump_hw_state(struct drm_i915_private *dev_priv, - struct intel_dpll_hw_state *hw_state) + const struct intel_dpll_hw_state *hw_state) { DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", hw_state->wrpll, hw_state->spll); @@ -1425,7 +1425,7 @@ skl_get_dpll(struct intel_crtc_state *crtc_state, } static void skl_dump_hw_state(struct drm_i915_private *dev_priv, - struct intel_dpll_hw_state *hw_state) + const struct intel_dpll_hw_state *hw_state) { DRM_DEBUG_KMS("dpll_hw_state: " "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", @@ -1857,7 +1857,7 @@ bxt_get_dpll(struct intel_crtc_state *crtc_state, } static void bxt_dump_hw_state(struct drm_i915_private *dev_priv, - struct intel_dpll_hw_state *hw_state) + const struct intel_dpll_hw_state *hw_state) { DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " @@ -1888,7 +1888,7 @@ struct intel_dpll_mgr { struct intel_encoder *encoder); void (*dump_hw_state)(struct drm_i915_private *dev_priv, - struct intel_dpll_hw_state *hw_state); + const struct intel_dpll_hw_state *hw_state); }; static const struct dpll_info pch_plls[] = { @@ -2371,7 +2371,7 @@ cnl_get_dpll(struct intel_crtc_state *crtc_state, } static void cnl_dump_hw_state(struct drm_i915_private *dev_priv, - struct intel_dpll_hw_state *hw_state) + const struct intel_dpll_hw_state *hw_state) { DRM_DEBUG_KMS("dpll_hw_state: " "cfgcr0: 0x%x, cfgcr1: 0x%x\n", @@ -3171,7 +3171,7 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv, } static void icl_dump_hw_state(struct drm_i915_private *dev_priv, - struct intel_dpll_hw_state *hw_state) + const struct intel_dpll_hw_state *hw_state) { DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, " "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, " @@ -3341,7 +3341,7 @@ void intel_release_shared_dpll(struct intel_shared_dpll *dpll, * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS. */ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, - struct intel_dpll_hw_state *hw_state) + const struct intel_dpll_hw_state *hw_state) { if (dev_priv->dpll_mgr) { dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state); diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h index 8835dd20f1d2..b5dd9c7ad772 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h @@ -343,7 +343,7 @@ void intel_shared_dpll_swap_state(struct drm_atomic_state *state); void intel_shared_dpll_init(struct drm_device *dev); void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, - struct intel_dpll_hw_state *hw_state); + const struct intel_dpll_hw_state *hw_state); int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv); enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port); bool intel_dpll_is_combophy(enum intel_dpll_id id); -- cgit v1.2.3 From 10d75f5428fd866df36a846a3007bd39bba9c1cf Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 May 2019 22:31:32 +0300 Subject: drm/i915: Fix plane state dumps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stop dumping plane->state for planes. That is the old state most of the time and dumping stale information only serves to confuse people. Instead dump the new state just for the planes included in the operation. For now we'll include only the planes for the modeset/fastset pipes in the dumps. But probably we want to dump them all eventually, just not quite sure how to present that information nicely to the user. And while at it let's dump a few more interesting bits from the state. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190517193132.8140-14-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 84 +++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 39 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 35d1a69628e2..fc47ed0247c5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11795,22 +11795,50 @@ static const char *output_formats(enum intel_output_format format) return output_format_str[format]; } +static void intel_dump_plane_state(const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + const struct drm_framebuffer *fb = plane_state->base.fb; + struct drm_format_name_buf format_name; + + if (!fb) { + DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n", + plane->base.base.id, plane->base.name, + yesno(plane_state->base.visible)); + return; + } + + DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n", + plane->base.base.id, plane->base.name, + fb->base.id, fb->width, fb->height, + drm_get_format_name(fb->format->format, &format_name), + yesno(plane_state->base.visible)); + DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n", + plane_state->base.rotation, plane_state->scaler_id); + if (plane_state->base.visible) + DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", + DRM_RECT_FP_ARG(&plane_state->base.src), + DRM_RECT_ARG(&plane_state->base.dst)); +} + static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, + struct intel_atomic_state *state, const char *context) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_plane *plane; - struct intel_plane *intel_plane; - struct intel_plane_state *state; - struct drm_framebuffer *fb; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_plane_state *plane_state; + struct intel_plane *plane; char buf[64]; + int i; DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n", crtc->base.base.id, crtc->base.name, yesno(pipe_config->base.enable), context); + if (!pipe_config->base.enable) + goto dump_planes; + snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n", yesno(pipe_config->base.active), @@ -11885,35 +11913,13 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); - DRM_DEBUG_KMS("planes on this crtc\n"); - list_for_each_entry(plane, &dev->mode_config.plane_list, head) { - struct drm_format_name_buf format_name; - intel_plane = to_intel_plane(plane); - if (intel_plane->pipe != crtc->pipe) - continue; - - state = to_intel_plane_state(plane->state); - fb = state->base.fb; - if (!fb) { - DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n", - plane->base.id, plane->name, state->scaler_id); - continue; - } +dump_planes: + if (!state) + return; - DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n", - plane->base.id, plane->name, - fb->base.id, fb->width, fb->height, - drm_get_format_name(fb->format->format, &format_name)); - if (INTEL_GEN(dev_priv) >= 9) - DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", - state->scaler_id, - state->base.src.x1 >> 16, - state->base.src.y1 >> 16, - drm_rect_width(&state->base.src) >> 16, - drm_rect_height(&state->base.src) >> 16, - state->base.dst.x1, state->base.dst.y1, - drm_rect_width(&state->base.dst), - drm_rect_height(&state->base.dst)); + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + if (plane->pipe == crtc->pipe) + intel_dump_plane_state(plane_state); } } @@ -12903,8 +12909,8 @@ verify_crtc_state(struct drm_crtc *crtc, if (!intel_pipe_config_compare(dev_priv, sw_config, pipe_config, false)) { I915_STATE_WARN(1, "pipe state doesn't match!\n"); - intel_dump_pipe_config(pipe_config, "[hw state]"); - intel_dump_pipe_config(sw_config, "[sw state]"); + intel_dump_pipe_config(pipe_config, NULL, "[hw state]"); + intel_dump_pipe_config(sw_config, NULL, "[sw state]"); } } @@ -13432,7 +13438,7 @@ static int intel_atomic_check(struct drm_device *dev, !new_crtc_state->update_pipe) continue; - intel_dump_pipe_config(new_crtc_state, + intel_dump_pipe_config(new_crtc_state, state, needs_modeset(&new_crtc_state->base) ? "[modeset]" : "[fastset]"); } @@ -13449,7 +13455,7 @@ static int intel_atomic_check(struct drm_device *dev, */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) - intel_dump_pipe_config(new_crtc_state, "[failed]"); + intel_dump_pipe_config(new_crtc_state, state, "[failed]"); return ret; } @@ -16677,7 +16683,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, for_each_intel_crtc(&dev_priv->drm, crtc) { crtc_state = to_intel_crtc_state(crtc->base.state); intel_sanitize_crtc(crtc, ctx); - intel_dump_pipe_config(crtc_state, "[setup_hw_state]"); + intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]"); } intel_modeset_update_connector_atomic_state(dev); -- cgit v1.2.3 From 87d1372d1da3c5b3ec06c70a50811093d41c671e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 4 Jun 2019 13:00:20 +0100 Subject: drm/i915/selftests: Flush partial-tiling object once We only need to flush the object once prior to starting the partial tiling test as inside the test we explicitly maintain coherency. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20190604120022.20472-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 5db3327958fb..b92809418729 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -98,6 +98,14 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); + i915_gem_object_lock(obj); + err = i915_gem_object_set_to_gtt_domain(obj, true); + i915_gem_object_unlock(obj); + if (err) { + pr_err("Failed to flush to GTT write domain; err=%d\n", err); + return err; + } + for_each_prime_number_from(page, 1, npages) { struct i915_ggtt_view view = compute_partial_view(obj, page, MIN_CHUNK_PAGES); @@ -110,15 +118,6 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, GEM_BUG_ON(view.partial.size > nreal); cond_resched(); - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_gtt_domain(obj, true); - i915_gem_object_unlock(obj); - if (err) { - pr_err("Failed to flush to GTT write domain; err=%d\n", - err); - return err; - } - vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) { pr_err("Failed to pin partial view: offset=%lu; err=%d\n", @@ -144,9 +143,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, if (offset >= obj->base.size) continue; - i915_gem_object_lock(obj); - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); - i915_gem_object_unlock(obj); + i915_gem_flush_ggtt_writes(to_i915(obj->base.dev)); p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); cpu = kmap(p) + offset_in_page(offset); -- cgit v1.2.3 From 1c8242c3a4b2c1fc36c72c5f479058629f772b65 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 4 Jun 2019 13:00:21 +0100 Subject: drm/i915: Use unchecked writes for setting up the fences As the fence registers are not part of the engine powerwells, we do not need to fiddle with forcewake in order to update a fence. Avoid using the heavyweight debug checking normal mmio writes as the checking dominates the selftest runtime and is superfluous! In the process, retire the I915_WRITE() implicit macro with the new intel_uncore_write interface. v2: s/unc/uncore/ Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20190604120022.20472-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 123 +++++++++++++++++------------- 1 file changed, 68 insertions(+), 55 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 2e9e32330aaa..10aa6e350bfa 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -94,9 +94,10 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence, } if (!pipelined) { - struct drm_i915_private *dev_priv = fence->i915; + struct intel_uncore *uncore = &fence->i915->uncore; - /* To w/a incoherency with non-atomic 64-bit register updates, + /* + * To w/a incoherency with non-atomic 64-bit register updates, * we split the 64-bit update into two 32-bit writes. In order * for a partial fence not to be evaluated between writes, we * precede the update with write to turn off the fence register, @@ -105,12 +106,12 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence, * For extra levels of paranoia, we make sure each step lands * before applying the next step. */ - I915_WRITE(fence_reg_lo, 0); - POSTING_READ(fence_reg_lo); + intel_uncore_write_fw(uncore, fence_reg_lo, 0); + intel_uncore_posting_read_fw(uncore, fence_reg_lo); - I915_WRITE(fence_reg_hi, upper_32_bits(val)); - I915_WRITE(fence_reg_lo, lower_32_bits(val)); - POSTING_READ(fence_reg_lo); + intel_uncore_write_fw(uncore, fence_reg_hi, upper_32_bits(val)); + intel_uncore_write_fw(uncore, fence_reg_lo, lower_32_bits(val)); + intel_uncore_posting_read_fw(uncore, fence_reg_lo); } } @@ -146,11 +147,11 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *fence, } if (!pipelined) { - struct drm_i915_private *dev_priv = fence->i915; + struct intel_uncore *uncore = &fence->i915->uncore; i915_reg_t reg = FENCE_REG(fence->id); - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_uncore_write_fw(uncore, reg, val); + intel_uncore_posting_read_fw(uncore, reg); } } @@ -178,18 +179,19 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *fence, } if (!pipelined) { - struct drm_i915_private *dev_priv = fence->i915; + struct intel_uncore *uncore = &fence->i915->uncore; i915_reg_t reg = FENCE_REG(fence->id); - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_uncore_write_fw(uncore, reg, val); + intel_uncore_posting_read_fw(uncore, reg); } } static void fence_write(struct drm_i915_fence_reg *fence, struct i915_vma *vma) { - /* Previous access through the fence register is marshalled by + /* + * Previous access through the fence register is marshalled by * the mb() inside the fault handlers (i915_gem_release_mmaps) * and explicitly managed for internal users. */ @@ -201,7 +203,8 @@ static void fence_write(struct drm_i915_fence_reg *fence, else i965_write_fence_reg(fence, vma); - /* Access through the fenced region afterwards is + /* + * Access through the fenced region afterwards is * ordered by the posting reads whilst writing the registers. */ @@ -308,11 +311,11 @@ int i915_vma_put_fence(struct i915_vma *vma) return fence_update(fence, NULL); } -static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv) +static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *i915) { struct drm_i915_fence_reg *fence; - list_for_each_entry(fence, &dev_priv->mm.fence_list, link) { + list_for_each_entry(fence, &i915->mm.fence_list, link) { GEM_BUG_ON(fence->vma && fence->vma->fence != fence); if (fence->pin_count) @@ -322,7 +325,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv) } /* Wait for completion of pending flips which consume fences */ - if (intel_has_pending_fb_unpin(dev_priv)) + if (intel_has_pending_fb_unpin(i915)) return ERR_PTR(-EAGAIN); return ERR_PTR(-EDEADLK); @@ -353,7 +356,8 @@ i915_vma_pin_fence(struct i915_vma *vma) struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL; int err; - /* Note that we revoke fences on runtime suspend. Therefore the user + /* + * Note that we revoke fences on runtime suspend. Therefore the user * must keep the device awake whilst using the fence. */ assert_rpm_wakelock_held(vma->vm->i915); @@ -395,28 +399,28 @@ out_unpin: /** * i915_reserve_fence - Reserve a fence for vGPU - * @dev_priv: i915 device private + * @i915: i915 device private * * This function walks the fence regs looking for a free one and remove * it from the fence_list. It is used to reserve fence for vGPU to use. */ struct drm_i915_fence_reg * -i915_reserve_fence(struct drm_i915_private *dev_priv) +i915_reserve_fence(struct drm_i915_private *i915) { struct drm_i915_fence_reg *fence; int count; int ret; - lockdep_assert_held(&dev_priv->drm.struct_mutex); + lockdep_assert_held(&i915->drm.struct_mutex); /* Keep at least one fence available for the display engine. */ count = 0; - list_for_each_entry(fence, &dev_priv->mm.fence_list, link) + list_for_each_entry(fence, &i915->mm.fence_list, link) count += !fence->pin_count; if (count <= 1) return ERR_PTR(-ENOSPC); - fence = fence_find(dev_priv); + fence = fence_find(i915); if (IS_ERR(fence)) return fence; @@ -446,19 +450,19 @@ void i915_unreserve_fence(struct drm_i915_fence_reg *fence) /** * i915_gem_restore_fences - restore fence state - * @dev_priv: i915 device private + * @i915: i915 device private * * Restore the hw fence state to match the software tracking again, to be called * after a gpu reset and on resume. Note that on runtime suspend we only cancel * the fences, to be reacquired by the user later. */ -void i915_gem_restore_fences(struct drm_i915_private *dev_priv) +void i915_gem_restore_fences(struct drm_i915_private *i915) { int i; rcu_read_lock(); /* keep obj alive as we dereference */ - for (i = 0; i < dev_priv->num_fence_regs; i++) { - struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; + for (i = 0; i < i915->num_fence_regs; i++) { + struct drm_i915_fence_reg *reg = &i915->fence_regs[i]; struct i915_vma *vma = READ_ONCE(reg->vma); GEM_BUG_ON(vma && vma->fence != reg); @@ -525,18 +529,19 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv) /** * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern - * @dev_priv: i915 device private + * @i915: i915 device private * * Detects bit 6 swizzling of address lookup between IGD access and CPU * access through main memory. */ void -i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) +i915_gem_detect_bit_6_swizzle(struct drm_i915_private *i915) { + struct intel_uncore *uncore = &i915->uncore; u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; - if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) { + if (INTEL_GEN(i915) >= 8 || IS_VALLEYVIEW(i915)) { /* * On BDW+, swizzling is not used. We leave the CPU memory * controller in charge of optimizing memory accesses without @@ -546,9 +551,9 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) */ swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; - } else if (INTEL_GEN(dev_priv) >= 6) { - if (dev_priv->preserve_bios_swizzle) { - if (I915_READ(DISP_ARB_CTL) & + } else if (INTEL_GEN(i915) >= 6) { + if (i915->preserve_bios_swizzle) { + if (intel_uncore_read(uncore, DISP_ARB_CTL) & DISP_TILE_SURFACE_SWIZZLING) { swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; @@ -558,15 +563,17 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) } } else { u32 dimm_c0, dimm_c1; - dimm_c0 = I915_READ(MAD_DIMM_C0); - dimm_c1 = I915_READ(MAD_DIMM_C1); + dimm_c0 = intel_uncore_read(uncore, MAD_DIMM_C0); + dimm_c1 = intel_uncore_read(uncore, MAD_DIMM_C1); dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; - /* Enable swizzling when the channels are populated + /* + * Enable swizzling when the channels are populated * with identically sized dimms. We don't need to check * the 3rd channel because no cpu with gpu attached * ships in that configuration. Also, swizzling only - * makes sense for 2 channels anyway. */ + * makes sense for 2 channels anyway. + */ if (dimm_c0 == dimm_c1) { swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; @@ -575,20 +582,23 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) swizzle_y = I915_BIT_6_SWIZZLE_NONE; } } - } else if (IS_GEN(dev_priv, 5)) { - /* On Ironlake whatever DRAM config, GPU always do + } else if (IS_GEN(i915, 5)) { + /* + * On Ironlake whatever DRAM config, GPU always do * same swizzling setup. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; - } else if (IS_GEN(dev_priv, 2)) { - /* As far as we know, the 865 doesn't have these bit 6 + } else if (IS_GEN(i915, 2)) { + /* + * As far as we know, the 865 doesn't have these bit 6 * swizzling issues. */ swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; - } else if (IS_G45(dev_priv) || IS_I965G(dev_priv) || IS_G33(dev_priv)) { - /* The 965, G33, and newer, have a very flexible memory + } else if (IS_G45(i915) || IS_I965G(i915) || IS_G33(i915)) { + /* + * The 965, G33, and newer, have a very flexible memory * configuration. It will enable dual-channel mode * (interleaving) on as much memory as it can, and the GPU * will additionally sometimes enable different bit 6 @@ -614,14 +624,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) * banks of memory are paired and unswizzled on the * uneven portion, so leave that as unknown. */ - if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) { + if (intel_uncore_read(uncore, C0DRB3) == + intel_uncore_read(uncore, C1DRB3)) { swizzle_x = I915_BIT_6_SWIZZLE_9_10; swizzle_y = I915_BIT_6_SWIZZLE_9; } } else { - u32 dcc; + u32 dcc = intel_uncore_read(uncore, DCC); - /* On 9xx chipsets, channel interleave by the CPU is + /* + * On 9xx chipsets, channel interleave by the CPU is * determined by DCC. For single-channel, neither the CPU * nor the GPU do swizzling. For dual channel interleaved, * the GPU's interleave is bit 9 and 10 for X tiled, and bit @@ -629,7 +641,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) * can be based on either bit 11 (haven't seen this yet) or * bit 17 (common). */ - dcc = I915_READ(DCC); switch (dcc & DCC_ADDRESSING_MODE_MASK) { case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: @@ -638,7 +649,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) break; case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: if (dcc & DCC_CHANNEL_XOR_DISABLE) { - /* This is the base swizzling by the GPU for + /* + * This is the base swizzling by the GPU for * tiled buffers. */ swizzle_x = I915_BIT_6_SWIZZLE_9_10; @@ -656,8 +668,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) } /* check for L-shaped memory aka modified enhanced addressing */ - if (IS_GEN(dev_priv, 4) && - !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) { + if (IS_GEN(i915, 4) && + !(intel_uncore_read(uncore, DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) { swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; } @@ -672,7 +684,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN || swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) { - /* Userspace likes to explode if it sees unknown swizzling, + /* + * Userspace likes to explode if it sees unknown swizzling, * so lie. We will finish the lie when reporting through * the get-tiling-ioctl by reporting the physical swizzle * mode as unknown instead. @@ -681,13 +694,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) * bit17 dependent, and so we need to also prevent the pages * from being moved. */ - dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; + i915->quirks |= QUIRK_PIN_SWIZZLED_PAGES; swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; } - dev_priv->mm.bit_6_swizzle_x = swizzle_x; - dev_priv->mm.bit_6_swizzle_y = swizzle_y; + i915->mm.bit_6_swizzle_x = swizzle_x; + i915->mm.bit_6_swizzle_y = swizzle_y; } /* -- cgit v1.2.3 From 59ec84eca57ab33107f1cd712a183ff5f9e234d8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 4 Jun 2019 13:00:22 +0100 Subject: drm/i915: Use unchecked uncore writes to flush the GTT As the GTT is outside of the powerwell, we can simplify flushing the GGTT writes by using an unchecked mmio write and post. v2: s/unc/uncore/ Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20190604120022.20472-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index ca8a69e8b098..d415438d4815 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -108,22 +108,26 @@ static int i915_get_ggtt_vma_pages(struct i915_vma *vma); -static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv) +static void gen6_ggtt_invalidate(struct drm_i915_private *i915) { + struct intel_uncore *uncore = &i915->uncore; + /* * Note that as an uncached mmio write, this will flush the * WCB of the writes into the GGTT before it triggers the invalidate. */ - I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); } -static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv) +static void guc_ggtt_invalidate(struct drm_i915_private *i915) { - gen6_ggtt_invalidate(dev_priv); - I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); + struct intel_uncore *uncore = &i915->uncore; + + gen6_ggtt_invalidate(i915); + intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); } -static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv) +static void gmch_ggtt_invalidate(struct drm_i915_private *i915) { intel_gtt_chipset_flush(); } @@ -1347,10 +1351,10 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = vm->i915; + struct drm_i915_private *i915 = vm->i915; struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - if (intel_vgpu_active(dev_priv)) + if (intel_vgpu_active(i915)) gen8_ppgtt_notify_vgt(ppgtt, false); if (i915_vm_is_4lvl(vm)) -- cgit v1.2.3 From 1d1b5490b91c932a75188e9acf76ead68d6e9741 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 4 Jun 2019 16:38:30 +0100 Subject: drm/i915/gtt: Replace struct_mutex serialisation for allocation Instead of relying on the caller holding struct_mutex across the allocation, push the allocation under a tree of spinlocks stored inside the page tables. Not only should this allow us to avoid struct_mutex here, but it will allow multiple users to lock independent ranges for concurrent allocations, and operate independently. This is vital for pushing the GTT manipulation into a background thread where dependency on struct_mutex is verboten, and for allowing other callers to avoid struct_mutex altogether. v2: Restore lost GEM_BUG_ON for removing too many PTE from gen6_ppgtt_clear_range. Signed-off-by: Chris Wilson Cc: Matthew Auld Cc: Mika Kuoppala Reviewed-by: Joonas Lahtinen Acked-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190604153830.19096-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 213 +++++++++++++++++++++++++----------- drivers/gpu/drm/i915/i915_gem_gtt.h | 9 +- 2 files changed, 153 insertions(+), 69 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index d415438d4815..56a436858043 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -659,7 +659,7 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm) return ERR_PTR(-ENOMEM); } - pt->used_ptes = 0; + atomic_set(&pt->used_ptes, 0); return pt; } @@ -694,7 +694,8 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) return ERR_PTR(-ENOMEM); } - pd->used_pdes = 0; + atomic_set(&pd->used_pdes, 0); + spin_lock_init(&pd->lock); return pd; } @@ -725,6 +726,8 @@ static int __pdp_init(struct i915_address_space *vm, memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes); + atomic_set(&pdp->used_pdpes, 0); + spin_lock_init(&pdp->lock); return 0; } @@ -779,11 +782,8 @@ static void free_pdp(struct i915_address_space *vm, static void gen8_initialize_pdp(struct i915_address_space *vm, struct i915_page_directory_pointer *pdp) { - gen8_ppgtt_pdpe_t scratch_pdpe; - - scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC); - - fill_px(vm, pdp, scratch_pdpe); + fill_px(vm, pdp, + gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC)); } static void gen8_initialize_pml4(struct i915_address_space *vm, @@ -792,6 +792,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm, fill_px(vm, pml4, gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC)); memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4); + spin_lock_init(&pml4->lock); } /* @@ -815,17 +816,12 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm, unsigned int num_entries = gen8_pte_count(start, length); gen8_pte_t *vaddr; - GEM_BUG_ON(num_entries > pt->used_ptes); - - pt->used_ptes -= num_entries; - if (!pt->used_ptes) - return true; - vaddr = kmap_atomic_px(pt); memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries); kunmap_atomic(vaddr); - return false; + GEM_BUG_ON(num_entries > atomic_read(&pt->used_ptes)); + return !atomic_sub_return(num_entries, &pt->used_ptes); } static void gen8_ppgtt_set_pde(struct i915_address_space *vm, @@ -835,8 +831,6 @@ static void gen8_ppgtt_set_pde(struct i915_address_space *vm, { gen8_pde_t *vaddr; - pd->page_table[pde] = pt; - vaddr = kmap_atomic_px(pd); vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC); kunmap_atomic(vaddr); @@ -850,19 +844,28 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, u32 pde; gen8_for_each_pde(pt, pd, start, length, pde) { + bool free = false; + GEM_BUG_ON(pt == vm->scratch_pt); if (!gen8_ppgtt_clear_pt(vm, pt, start, length)) continue; - gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde); - GEM_BUG_ON(!pd->used_pdes); - pd->used_pdes--; + spin_lock(&pd->lock); + if (!atomic_read(&pt->used_ptes)) { + gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde); + pd->page_table[pde] = vm->scratch_pt; - free_pt(vm, pt); + GEM_BUG_ON(!atomic_read(&pd->used_pdes)); + atomic_dec(&pd->used_pdes); + free = true; + } + spin_unlock(&pd->lock); + if (free) + free_pt(vm, pt); } - return !pd->used_pdes; + return !atomic_read(&pd->used_pdes); } static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm, @@ -872,7 +875,6 @@ static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm, { gen8_ppgtt_pdpe_t *vaddr; - pdp->page_directory[pdpe] = pd; if (!i915_vm_is_4lvl(vm)) return; @@ -892,19 +894,28 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, unsigned int pdpe; gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { + bool free = false; + GEM_BUG_ON(pd == vm->scratch_pd); if (!gen8_ppgtt_clear_pd(vm, pd, start, length)) continue; - gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); - GEM_BUG_ON(!pdp->used_pdpes); - pdp->used_pdpes--; + spin_lock(&pdp->lock); + if (!atomic_read(&pd->used_pdes)) { + gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); + pdp->page_directory[pdpe] = vm->scratch_pd; - free_pd(vm, pd); + GEM_BUG_ON(!atomic_read(&pdp->used_pdpes)); + atomic_dec(&pdp->used_pdpes); + free = true; + } + spin_unlock(&pdp->lock); + if (free) + free_pd(vm, pd); } - return !pdp->used_pdpes; + return !atomic_read(&pdp->used_pdpes); } static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm, @@ -919,8 +930,6 @@ static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4, { gen8_ppgtt_pml4e_t *vaddr; - pml4->pdps[pml4e] = pdp; - vaddr = kmap_atomic_px(pml4); vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC); kunmap_atomic(vaddr); @@ -941,14 +950,21 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, GEM_BUG_ON(!i915_vm_is_4lvl(vm)); gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { + bool free = false; GEM_BUG_ON(pdp == vm->scratch_pdp); if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length)) continue; - gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); - - free_pdp(vm, pdp); + spin_lock(&pml4->lock); + if (!atomic_read(&pdp->used_pdpes)) { + gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); + pml4->pdps[pml4e] = vm->scratch_pdp; + free = true; + } + spin_unlock(&pml4->lock); + if (free) + free_pdp(vm, pdp); } } @@ -1373,27 +1389,38 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, u64 from = start; unsigned int pde; + spin_lock(&pd->lock); gen8_for_each_pde(pt, pd, start, length, pde) { - int count = gen8_pte_count(start, length); + const int count = gen8_pte_count(start, length); if (pt == vm->scratch_pt) { - pd->used_pdes++; + struct i915_page_table *old; + + spin_unlock(&pd->lock); pt = alloc_pt(vm); - if (IS_ERR(pt)) { - pd->used_pdes--; + if (IS_ERR(pt)) goto unwind; - } if (count < GEN8_PTES || intel_vgpu_active(vm->i915)) gen8_initialize_pt(vm, pt); - gen8_ppgtt_set_pde(vm, pd, pt, pde); - GEM_BUG_ON(pd->used_pdes > I915_PDES); + old = cmpxchg(&pd->page_table[pde], vm->scratch_pt, pt); + if (old == vm->scratch_pt) { + gen8_ppgtt_set_pde(vm, pd, pt, pde); + atomic_inc(&pd->used_pdes); + } else { + free_pt(vm, pt); + pt = old; + } + + spin_lock(&pd->lock); } - pt->used_ptes += count; + atomic_add(count, &pt->used_ptes); } + spin_unlock(&pd->lock); + return 0; unwind: @@ -1410,35 +1437,54 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, unsigned int pdpe; int ret; + spin_lock(&pdp->lock); gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { if (pd == vm->scratch_pd) { - pdp->used_pdpes++; + struct i915_page_directory *old; + + spin_unlock(&pdp->lock); pd = alloc_pd(vm); - if (IS_ERR(pd)) { - pdp->used_pdpes--; + if (IS_ERR(pd)) goto unwind; - } gen8_initialize_pd(vm, pd); - gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); - GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm)); + + old = cmpxchg(&pdp->page_directory[pdpe], + vm->scratch_pd, pd); + if (old == vm->scratch_pd) { + gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); + atomic_inc(&pdp->used_pdpes); + } else { + free_pd(vm, pd); + pd = old; + } + + spin_lock(&pdp->lock); } + atomic_inc(&pd->used_pdes); + spin_unlock(&pdp->lock); ret = gen8_ppgtt_alloc_pd(vm, pd, start, length); if (unlikely(ret)) goto unwind_pd; + + spin_lock(&pdp->lock); + atomic_dec(&pd->used_pdes); } + spin_unlock(&pdp->lock); return 0; unwind_pd: - if (!pd->used_pdes) { + spin_lock(&pdp->lock); + if (atomic_dec_and_test(&pd->used_pdes)) { gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); - GEM_BUG_ON(!pdp->used_pdpes); - pdp->used_pdpes--; + GEM_BUG_ON(!atomic_read(&pdp->used_pdpes)); + atomic_dec(&pdp->used_pdpes); free_pd(vm, pd); } + spin_unlock(&pdp->lock); unwind: gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); return -ENOMEM; @@ -1461,28 +1507,50 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, u32 pml4e; int ret; + spin_lock(&pml4->lock); gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - if (pml4->pdps[pml4e] == vm->scratch_pdp) { + if (pdp == vm->scratch_pdp) { + struct i915_page_directory_pointer *old; + + spin_unlock(&pml4->lock); + pdp = alloc_pdp(vm); if (IS_ERR(pdp)) goto unwind; gen8_initialize_pdp(vm, pdp); - gen8_ppgtt_set_pml4e(pml4, pdp, pml4e); + + old = cmpxchg(&pml4->pdps[pml4e], vm->scratch_pdp, pdp); + if (old == vm->scratch_pdp) { + gen8_ppgtt_set_pml4e(pml4, pdp, pml4e); + } else { + free_pdp(vm, pdp); + pdp = old; + } + + spin_lock(&pml4->lock); } + atomic_inc(&pdp->used_pdpes); + spin_unlock(&pml4->lock); ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length); if (unlikely(ret)) goto unwind_pdp; + + spin_lock(&pml4->lock); + atomic_dec(&pdp->used_pdpes); } + spin_unlock(&pml4->lock); return 0; unwind_pdp: - if (!pdp->used_pdpes) { + spin_lock(&pml4->lock); + if (atomic_dec_and_test(&pdp->used_pdpes)) { gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); free_pdp(vm, pdp); } + spin_unlock(&pml4->lock); unwind: gen8_ppgtt_clear_4lvl(vm, from, start - from); return -ENOMEM; @@ -1504,10 +1572,10 @@ static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt) gen8_initialize_pd(vm, pd); gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); - pdp->used_pdpes++; + atomic_inc(&pdp->used_pdpes); } - pdp->used_pdpes++; /* never remove */ + atomic_inc(&pdp->used_pdpes); /* never remove */ return 0; unwind: @@ -1516,7 +1584,7 @@ unwind: gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); free_pd(vm, pd); } - pdp->used_pdpes = 0; + atomic_set(&pdp->used_pdpes, 0); return -ENOMEM; } @@ -1688,9 +1756,8 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, num_entries -= count; - GEM_BUG_ON(count > pt->used_ptes); - pt->used_ptes -= count; - if (!pt->used_ptes) + GEM_BUG_ON(count > atomic_read(&pt->used_ptes)); + if (!atomic_sub_return(count, &pt->used_ptes)) ppgtt->scan_for_unused_pt = true; /* @@ -1760,28 +1827,41 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, wakeref = intel_runtime_pm_get(vm->i915); + spin_lock(&ppgtt->base.pd.lock); gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) { const unsigned int count = gen6_pte_count(start, length); if (pt == vm->scratch_pt) { + struct i915_page_table *old; + + spin_unlock(&ppgtt->base.pd.lock); + pt = alloc_pt(vm); if (IS_ERR(pt)) goto unwind_out; gen6_initialize_pt(vm, pt); - ppgtt->base.pd.page_table[pde] = pt; - if (i915_vma_is_bound(ppgtt->vma, - I915_VMA_GLOBAL_BIND)) { - gen6_write_pde(ppgtt, pde, pt); - flush = true; + old = cmpxchg(&ppgtt->base.pd.page_table[pde], + vm->scratch_pt, pt); + if (old == vm->scratch_pt) { + ppgtt->base.pd.page_table[pde] = pt; + if (i915_vma_is_bound(ppgtt->vma, + I915_VMA_GLOBAL_BIND)) { + gen6_write_pde(ppgtt, pde, pt); + flush = true; + } + } else { + free_pt(vm, pt); + pt = old; } - GEM_BUG_ON(pt->used_ptes); + spin_lock(&ppgtt->base.pd.lock); } - pt->used_ptes += count; + atomic_add(count, &pt->used_ptes); } + spin_unlock(&ppgtt->base.pd.lock); if (flush) { mark_tlbs_dirty(&ppgtt->base); @@ -1822,6 +1902,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt) gen6_initialize_pt(vm, vm->scratch_pt); gen6_for_all_pdes(unused, &ppgtt->base.pd, pde) ppgtt->base.pd.page_table[pde] = vm->scratch_pt; + spin_lock_init(&ppgtt->base.pd.lock); return 0; } @@ -1950,7 +2031,7 @@ static void pd_vma_unbind(struct i915_vma *vma) /* Free all no longer used page tables */ gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) { - if (pt->used_ptes || pt == scratch_pt) + if (atomic_read(&pt->used_ptes) || pt == scratch_pt) continue; free_pt(&ppgtt->base.vm, pt); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 73b6608740f2..152a03560c22 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -248,25 +248,28 @@ struct i915_page_dma { struct i915_page_table { struct i915_page_dma base; - unsigned int used_ptes; + atomic_t used_ptes; }; struct i915_page_directory { struct i915_page_dma base; struct i915_page_table *page_table[I915_PDES]; /* PDEs */ - unsigned int used_pdes; + atomic_t used_pdes; + spinlock_t lock; }; struct i915_page_directory_pointer { struct i915_page_dma base; struct i915_page_directory **page_directory; - unsigned int used_pdpes; + atomic_t used_pdpes; + spinlock_t lock; }; struct i915_pml4 { struct i915_page_dma base; struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4]; + spinlock_t lock; }; struct i915_vma_ops { -- cgit v1.2.3 From 2740e81aad0f6e3ce48fea03a4cf4a4f36a717b8 Mon Sep 17 00:00:00 2001 From: Swati Sharma Date: Wed, 29 May 2019 15:20:51 +0530 Subject: drm/i915: Introduce vfunc read_luts() to create hw lut In this patch, a vfunc read_luts() is introduced to create a hw lut i.e. lut having values read from gamma/degamma registers which will later be used to compare with sw lut to validate gamma/degamma lut values. v3: -Rebase v4: -Renamed intel_get_color_config to intel_color_get_config [Jani] -Wrapped get_color_config() [Jani] v5: -Renamed intel_color_get_config() to intel_color_read_luts() -Renamed get_color_config to read_luts v6: -Renamed intel_color_read_luts() back to intel_color_get_config() [Jani and Ville] Signed-off-by: Swati Sharma Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1559123462-7343-2-git-send-email-swati2.sharma@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/intel_color.c | 8 ++++++++ drivers/gpu/drm/i915/intel_color.h | 1 + 3 files changed, 10 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a0539b837df5..b72ad58dd5a5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -340,6 +340,7 @@ struct drm_i915_display_funcs { * involved with the same commit. */ void (*load_luts)(const struct intel_crtc_state *crtc_state); + void (*read_luts)(struct intel_crtc_state *crtc_state); }; struct intel_csr { diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 45649904ba5c..0b8cf3e8c963 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -879,6 +879,14 @@ int intel_color_check(struct intel_crtc_state *crtc_state) return dev_priv->display.color_check(crtc_state); } +void intel_color_get_config(struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + + if (dev_priv->display.read_luts) + dev_priv->display.read_luts(crtc_state); +} + static bool need_plane_update(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { diff --git a/drivers/gpu/drm/i915/intel_color.h b/drivers/gpu/drm/i915/intel_color.h index b8a3ce609587..057e8ac63555 100644 --- a/drivers/gpu/drm/i915/intel_color.h +++ b/drivers/gpu/drm/i915/intel_color.h @@ -13,5 +13,6 @@ void intel_color_init(struct intel_crtc *crtc); int intel_color_check(struct intel_crtc_state *crtc_state); void intel_color_commit(const struct intel_crtc_state *crtc_state); void intel_color_load_luts(const struct intel_crtc_state *crtc_state); +void intel_color_get_config(struct intel_crtc_state *crtc_state); #endif /* __INTEL_COLOR_H__ */ -- cgit v1.2.3 From 3633e5116dfb01f98de4c88daf2c673c52290755 Mon Sep 17 00:00:00 2001 From: Swati Sharma Date: Wed, 29 May 2019 15:20:52 +0530 Subject: drm/i915: Enable intel_color_get_config() In this patch, intel_color_get_config() is enabled and support for read_luts() will be added platform by platform incrementally in the follow-up patches. v4: -Renamed intel_get_color_config to intel_color_get_config [Jani] -Added the user early on such that support for get_color_config() can be added platform by platform incrementally [Jani] v5: -Incorrect place for calling intel_color_get_config() in haswell_get_pipe_config() [Ville] v6: -Renamed intel_color_read_luts() to intel_color_get_config() [Jani and Ville] Signed-off-by: Swati Sharma Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1559123462-7343-3-git-send-email-swati2.sharma@intel.com --- drivers/gpu/drm/i915/intel_display.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index fc47ed0247c5..122a075b6174 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8661,6 +8661,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe)); i9xx_get_pipe_color_config(pipe_config); + intel_color_get_config(pipe_config); if (INTEL_GEN(dev_priv) < 4) pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; @@ -9758,6 +9759,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); i9xx_get_pipe_color_config(pipe_config); + intel_color_get_config(pipe_config); if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { struct intel_shared_dpll *pll; @@ -10206,6 +10208,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, i9xx_get_pipe_color_config(pipe_config); } + intel_color_get_config(pipe_config); + power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); WARN_ON(power_domain_mask & BIT_ULL(power_domain)); -- cgit v1.2.3 From 36a0f92020dc8794d3aa69b7fb4c5d2bf99b0099 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 31 May 2019 16:14:51 +0300 Subject: drm/i915/bios: make child device order the priority order MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the child device order the priority order in sanitizing DDC pin and AUX CH. First come, first served. Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/34ab98880386a095422521ad39f4c080eeb3989a.1559308269.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_bios.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index a0b708f7f384..0a1b9a4a1b71 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1242,8 +1242,7 @@ static u8 translate_iboost(u8 val) static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { - const struct ddi_vbt_port_info *info = - &dev_priv->vbt.ddi_port_info[port]; + struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; enum port p; if (!info->alternate_ddc_pin) @@ -1258,8 +1257,8 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, " "disabling port %c DVI/HDMI support\n", - port_name(p), i->alternate_ddc_pin, - port_name(port), port_name(p)); + port_name(port), info->alternate_ddc_pin, + port_name(p), port_name(port)); /* * If we have multiple ports supposedly sharing the @@ -1267,20 +1266,19 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, * port. Otherwise they share the same ddc bin and * system couldn't communicate with them separately. * - * Due to parsing the ports in child device order, - * a later device will always clobber an earlier one. + * Give child device order the priority, first come first + * served. */ - i->supports_dvi = false; - i->supports_hdmi = false; - i->alternate_ddc_pin = 0; + info->supports_dvi = false; + info->supports_hdmi = false; + info->alternate_ddc_pin = 0; } } static void sanitize_aux_ch(struct drm_i915_private *dev_priv, enum port port) { - const struct ddi_vbt_port_info *info = - &dev_priv->vbt.ddi_port_info[port]; + struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; enum port p; if (!info->alternate_aux_channel) @@ -1295,8 +1293,8 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, " "disabling port %c DP support\n", - port_name(p), i->alternate_aux_channel, - port_name(port), port_name(p)); + port_name(port), info->alternate_aux_channel, + port_name(p), port_name(port)); /* * If we have multiple ports supposedlt sharing the @@ -1304,11 +1302,11 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, * port. Otherwise they share the same aux channel * and system couldn't communicate with them separately. * - * Due to parsing the ports in child device order, - * a later device will always clobber an earlier one. + * Give child device order the priority, first come first + * served. */ - i->supports_dp = false; - i->alternate_aux_channel = 0; + info->supports_dp = false; + info->alternate_aux_channel = 0; } } -- cgit v1.2.3 From 7679f9b8f6ee393ad204c6b2e09c7ae44a15a7c7 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 31 May 2019 16:14:52 +0300 Subject: drm/i915/bios: store child device pointer in DDI port info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows us to avoid iterating the child devices in some cases. Also replace the presence bit with child device being non-NULL, and set the child device pointer last to allow us to take advantage of it in follow-up work. Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/ceccb75d637af3134d0328d67cbd6623932f94db.1559308269.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 4 +++- drivers/gpu/drm/i915/intel_bios.c | 10 +++++----- 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b72ad58dd5a5..89bf1e34feaa 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -834,6 +834,9 @@ struct i915_gem_mm { #define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */ struct ddi_vbt_port_info { + /* Non-NULL if port present. */ + const struct child_device_config *child; + int max_tmds_clock; /* @@ -844,7 +847,6 @@ struct ddi_vbt_port_info { #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff u8 hdmi_level_shift; - u8 present:1; u8 supports_dvi:1; u8 supports_hdmi:1; u8 supports_dp:1; diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 0a1b9a4a1b71..325b8c8dfa5e 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1251,7 +1251,7 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, for (p = PORT_A; p < I915_MAX_PORTS; p++) { struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p]; - if (p == port || !i->present || + if (p == port || !i->child || info->alternate_ddc_pin != i->alternate_ddc_pin) continue; @@ -1287,7 +1287,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, for (p = PORT_A; p < I915_MAX_PORTS; p++) { struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p]; - if (p == port || !i->present || + if (p == port || !i->child || info->alternate_aux_channel != i->alternate_aux_channel) continue; @@ -1395,14 +1395,12 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, info = &dev_priv->vbt.ddi_port_info[port]; - if (info->present) { + if (info->child) { DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n", port_name(port)); return; } - info->present = true; - is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; is_crt = child->device_type & DEVICE_TYPE_ANALOG_OUTPUT; @@ -1530,6 +1528,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, DRM_DEBUG_KMS("VBT DP max link rate for port %c: %d\n", port_name(port), info->dp_max_link_rate); } + + info->child = child; } static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version) -- cgit v1.2.3 From cc21f01137a48919c1c5a443b341380ed3ea799a Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 31 May 2019 16:14:53 +0300 Subject: drm/i915/bios: refactor DDC pin and AUX CH sanitize functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add separate functions to get the port by DDC pin and AUX channel. Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/a8b4afc08dd03e08c1403531e7f5ab33d777b1db.1559308269.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_bios.c | 48 +++++++++++++++++++++++++++------------ 1 file changed, 34 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 325b8c8dfa5e..5cd4ddc8f5dd 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1239,6 +1239,21 @@ static u8 translate_iboost(u8 val) return mapping[val]; } +static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin) +{ + const struct ddi_vbt_port_info *info; + enum port port; + + for (port = PORT_A; port < I915_MAX_PORTS; port++) { + info = &i915->vbt.ddi_port_info[port]; + + if (info->child && ddc_pin == info->alternate_ddc_pin) + return port; + } + + return PORT_NONE; +} + static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { @@ -1248,13 +1263,8 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, if (!info->alternate_ddc_pin) return; - for (p = PORT_A; p < I915_MAX_PORTS; p++) { - struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p]; - - if (p == port || !i->child || - info->alternate_ddc_pin != i->alternate_ddc_pin) - continue; - + p = get_port_by_ddc_pin(dev_priv, info->alternate_ddc_pin); + if (p != PORT_NONE) { DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, " "disabling port %c DVI/HDMI support\n", port_name(port), info->alternate_ddc_pin, @@ -1275,6 +1285,21 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, } } +static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch) +{ + const struct ddi_vbt_port_info *info; + enum port port; + + for (port = PORT_A; port < I915_MAX_PORTS; port++) { + info = &i915->vbt.ddi_port_info[port]; + + if (info->child && aux_ch == info->alternate_aux_channel) + return port; + } + + return PORT_NONE; +} + static void sanitize_aux_ch(struct drm_i915_private *dev_priv, enum port port) { @@ -1284,13 +1309,8 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, if (!info->alternate_aux_channel) return; - for (p = PORT_A; p < I915_MAX_PORTS; p++) { - struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p]; - - if (p == port || !i->child || - info->alternate_aux_channel != i->alternate_aux_channel) - continue; - + p = get_port_by_aux_ch(dev_priv, info->alternate_aux_channel); + if (p != PORT_NONE) { DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, " "disabling port %c DP support\n", port_name(port), info->alternate_aux_channel, -- cgit v1.2.3 From c72deaa47f453fc698f7ac2bb87fd09ad96696de Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 31 May 2019 16:14:54 +0300 Subject: drm/i915/bios: use port info child pointer to determine HPD invert MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid iterating the child devices. Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/841c226efa424701161dd9f1793e0cf96b45a07c.1559308269.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_bios.c | 39 ++++++--------------------------------- drivers/gpu/drm/i915/intel_bios.h | 2 +- 2 files changed, 7 insertions(+), 34 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 5cd4ddc8f5dd..1f34ffa96219 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -2169,49 +2169,22 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, /** * intel_bios_is_port_hpd_inverted - is HPD inverted for %port - * @dev_priv: i915 device instance + * @i915: i915 device instance * @port: port to check * * Return true if HPD should be inverted for %port. */ bool -intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, +intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, enum port port) { - const struct child_device_config *child; - int i; + const struct child_device_config *child = + i915->vbt.ddi_port_info[port].child; - if (WARN_ON_ONCE(!IS_GEN9_LP(dev_priv))) + if (WARN_ON_ONCE(!IS_GEN9_LP(i915))) return false; - for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - child = dev_priv->vbt.child_dev + i; - - if (!child->hpd_invert) - continue; - - switch (child->dvo_port) { - case DVO_PORT_DPA: - case DVO_PORT_HDMIA: - if (port == PORT_A) - return true; - break; - case DVO_PORT_DPB: - case DVO_PORT_HDMIB: - if (port == PORT_B) - return true; - break; - case DVO_PORT_DPC: - case DVO_PORT_HDMIC: - if (port == PORT_C) - return true; - break; - default: - break; - } - } - - return false; + return child && child->hpd_invert; } /** diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 7bac53f219e1..55fb0818926c 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -235,7 +235,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); -bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, +bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, enum port port); bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, enum port port); -- cgit v1.2.3 From a7475e5dae8e69e7b28bee5d98fb808bc6d426a2 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 31 May 2019 16:14:55 +0300 Subject: drm/i915/bios: use port info child pointer to determine LSPCON presence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid iterating the child devices. This should be a non-functional change, but theoretically this might enable LSPCON on some extra ports with buggy VBTs. Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/4bbaff16abb3461ccb67abf1537f68bb50823390.1559308269.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_bios.c | 52 +++++---------------------------------- drivers/gpu/drm/i915/intel_bios.h | 2 +- 2 files changed, 7 insertions(+), 47 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 1f34ffa96219..8cff17b5f891 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -2189,59 +2189,19 @@ intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, /** * intel_bios_is_lspcon_present - if LSPCON is attached on %port - * @dev_priv: i915 device instance + * @i915: i915 device instance * @port: port to check * * Return true if LSPCON is present on this port */ bool -intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, - enum port port) +intel_bios_is_lspcon_present(const struct drm_i915_private *i915, + enum port port) { - const struct child_device_config *child; - int i; - - if (!HAS_LSPCON(dev_priv)) - return false; - - for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - child = dev_priv->vbt.child_dev + i; - - if (!child->lspcon) - continue; - - switch (child->dvo_port) { - case DVO_PORT_DPA: - case DVO_PORT_HDMIA: - if (port == PORT_A) - return true; - break; - case DVO_PORT_DPB: - case DVO_PORT_HDMIB: - if (port == PORT_B) - return true; - break; - case DVO_PORT_DPC: - case DVO_PORT_HDMIC: - if (port == PORT_C) - return true; - break; - case DVO_PORT_DPD: - case DVO_PORT_HDMID: - if (port == PORT_D) - return true; - break; - case DVO_PORT_DPF: - case DVO_PORT_HDMIF: - if (port == PORT_F) - return true; - break; - default: - break; - } - } + const struct child_device_config *child = + i915->vbt.ddi_port_info[port].child; - return false; + return HAS_LSPCON(i915) && child && child->lspcon; } enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 55fb0818926c..4e42cfaf61a7 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -237,7 +237,7 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, enum port port); -bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, +bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915, enum port port); enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port); -- cgit v1.2.3 From 932cd15431567c72ccc6874e60ebe2095daf345c Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 31 May 2019 16:14:56 +0300 Subject: drm/i915/bios: clean up VBT port info debug logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change the order, add some stylistic touches, and add LSPCON. Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/3c9aaedcacaeaca24b2a35bf2af680dd118823d4.1559308269.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_bios.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 8cff17b5f891..81ca94498178 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1445,8 +1445,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, if (bdb_version >= 209) info->supports_tbt = child->tbt; - DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d TCUSB:%d TBT:%d\n", - port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt, + DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d\n", + port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, + HAS_LSPCON(dev_priv) && child->lspcon, info->supports_typec_usb, info->supports_tbt); if (is_edp && is_dvi) -- cgit v1.2.3 From aafe16e38df2bbc021b0e4992ab4efee09303a6e Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 31 May 2019 16:14:57 +0300 Subject: drm/i915/bios: remove unused, obsolete VBT definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We've carried this baggage for more than a decade. Time to let it go. Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/00058e6507aa7a62fce1d2a6de223cbbfabb204b.1559308269.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_vbt_defs.h | 180 ---------------------------------- 1 file changed, 180 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index fdbbb9a53804..3f870ac48840 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -75,25 +75,6 @@ struct bdb_header { u16 bdb_size; } __packed; -/* strictly speaking, this is a "skip" block, but it has interesting info */ -struct vbios_data { - u8 type; /* 0 == desktop, 1 == mobile */ - u8 relstage; - u8 chipset; - u8 lvds_present:1; - u8 tv_present:1; - u8 rsvd2:6; /* finish byte */ - u8 rsvd3[4]; - u8 signon[155]; - u8 copyright[61]; - u16 code_segment; - u8 dos_boot_mode; - u8 bandwidth_percent; - u8 rsvd4; /* popup memory size */ - u8 resize_pci_bios; - u8 rsvd5; /* is crt already on ddc2 */ -} __packed; - /* * There are several types of BIOS data blocks (BDBs), each block has * an ID and size in the first 3 bytes (ID in first, size in next 2). @@ -601,35 +582,6 @@ struct bdb_lfp_backlight_data { struct bdb_lfp_backlight_control_method backlight_control[16]; } __packed; -struct aimdb_header { - char signature[16]; - char oem_device[20]; - u16 aimdb_version; - u16 aimdb_header_size; - u16 aimdb_size; -} __packed; - -struct aimdb_block { - u8 aimdb_id; - u16 aimdb_size; -} __packed; - -struct vch_panel_data { - u16 fp_timing_offset; - u8 fp_timing_size; - u16 dvo_timing_offset; - u8 dvo_timing_size; - u16 text_fitting_offset; - u8 text_fitting_size; - u16 graphics_fitting_offset; - u8 graphics_fitting_size; -} __packed; - -struct vch_bdb_22 { - struct aimdb_block aimdb_block; - struct vch_panel_data panels[16]; -} __packed; - struct bdb_sdvo_lvds_options { u8 panel_backlight; u8 h40_set_panel_type; @@ -781,127 +733,6 @@ struct bdb_psr { struct psr_table psr_table[16]; } __packed; -/* - * Driver<->VBIOS interaction occurs through scratch bits in - * GR18 & SWF*. - */ - -/* GR18 bits are set on display switch and hotkey events */ -#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */ -#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */ -#define GR18_HK_NONE (0x0<<3) -#define GR18_HK_LFP_STRETCH (0x1<<3) -#define GR18_HK_TOGGLE_DISP (0x2<<3) -#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */ -#define GR18_HK_POPUP_DISABLED (0x6<<3) -#define GR18_HK_POPUP_ENABLED (0x7<<3) -#define GR18_HK_PFIT (0x8<<3) -#define GR18_HK_APM_CHANGE (0xa<<3) -#define GR18_HK_MULTIPLE (0xc<<3) -#define GR18_USER_INT_EN (1<<2) -#define GR18_A0000_FLUSH_EN (1<<1) -#define GR18_SMM_EN (1<<0) - -/* Set by driver, cleared by VBIOS */ -#define SWF00_YRES_SHIFT 16 -#define SWF00_XRES_SHIFT 0 -#define SWF00_RES_MASK 0xffff - -/* Set by VBIOS at boot time and driver at runtime */ -#define SWF01_TV2_FORMAT_SHIFT 8 -#define SWF01_TV1_FORMAT_SHIFT 0 -#define SWF01_TV_FORMAT_MASK 0xffff - -#define SWF10_VBIOS_BLC_I2C_EN (1<<29) -#define SWF10_GTT_OVERRIDE_EN (1<<28) -#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */ -#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24) -#define SWF10_OLD_TOGGLE 0x0 -#define SWF10_TOGGLE_LIST_1 0x1 -#define SWF10_TOGGLE_LIST_2 0x2 -#define SWF10_TOGGLE_LIST_3 0x3 -#define SWF10_TOGGLE_LIST_4 0x4 -#define SWF10_PANNING_EN (1<<23) -#define SWF10_DRIVER_LOADED (1<<22) -#define SWF10_EXTENDED_DESKTOP (1<<21) -#define SWF10_EXCLUSIVE_MODE (1<<20) -#define SWF10_OVERLAY_EN (1<<19) -#define SWF10_PLANEB_HOLDOFF (1<<18) -#define SWF10_PLANEA_HOLDOFF (1<<17) -#define SWF10_VGA_HOLDOFF (1<<16) -#define SWF10_ACTIVE_DISP_MASK 0xffff -#define SWF10_PIPEB_LFP2 (1<<15) -#define SWF10_PIPEB_EFP2 (1<<14) -#define SWF10_PIPEB_TV2 (1<<13) -#define SWF10_PIPEB_CRT2 (1<<12) -#define SWF10_PIPEB_LFP (1<<11) -#define SWF10_PIPEB_EFP (1<<10) -#define SWF10_PIPEB_TV (1<<9) -#define SWF10_PIPEB_CRT (1<<8) -#define SWF10_PIPEA_LFP2 (1<<7) -#define SWF10_PIPEA_EFP2 (1<<6) -#define SWF10_PIPEA_TV2 (1<<5) -#define SWF10_PIPEA_CRT2 (1<<4) -#define SWF10_PIPEA_LFP (1<<3) -#define SWF10_PIPEA_EFP (1<<2) -#define SWF10_PIPEA_TV (1<<1) -#define SWF10_PIPEA_CRT (1<<0) - -#define SWF11_MEMORY_SIZE_SHIFT 16 -#define SWF11_SV_TEST_EN (1<<15) -#define SWF11_IS_AGP (1<<14) -#define SWF11_DISPLAY_HOLDOFF (1<<13) -#define SWF11_DPMS_REDUCED (1<<12) -#define SWF11_IS_VBE_MODE (1<<11) -#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */ -#define SWF11_DPMS_MASK 0x07 -#define SWF11_DPMS_OFF (1<<2) -#define SWF11_DPMS_SUSPEND (1<<1) -#define SWF11_DPMS_STANDBY (1<<0) -#define SWF11_DPMS_ON 0 - -#define SWF14_GFX_PFIT_EN (1<<31) -#define SWF14_TEXT_PFIT_EN (1<<30) -#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */ -#define SWF14_POPUP_EN (1<<28) -#define SWF14_DISPLAY_HOLDOFF (1<<27) -#define SWF14_DISP_DETECT_EN (1<<26) -#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */ -#define SWF14_DRIVER_STATUS (1<<24) -#define SWF14_OS_TYPE_WIN9X (1<<23) -#define SWF14_OS_TYPE_WINNT (1<<22) -/* 21:19 rsvd */ -#define SWF14_PM_TYPE_MASK 0x00070000 -#define SWF14_PM_ACPI_VIDEO (0x4 << 16) -#define SWF14_PM_ACPI (0x3 << 16) -#define SWF14_PM_APM_12 (0x2 << 16) -#define SWF14_PM_APM_11 (0x1 << 16) -#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */ - /* if GR18 indicates a display switch */ -#define SWF14_DS_PIPEB_LFP2_EN (1<<15) -#define SWF14_DS_PIPEB_EFP2_EN (1<<14) -#define SWF14_DS_PIPEB_TV2_EN (1<<13) -#define SWF14_DS_PIPEB_CRT2_EN (1<<12) -#define SWF14_DS_PIPEB_LFP_EN (1<<11) -#define SWF14_DS_PIPEB_EFP_EN (1<<10) -#define SWF14_DS_PIPEB_TV_EN (1<<9) -#define SWF14_DS_PIPEB_CRT_EN (1<<8) -#define SWF14_DS_PIPEA_LFP2_EN (1<<7) -#define SWF14_DS_PIPEA_EFP2_EN (1<<6) -#define SWF14_DS_PIPEA_TV2_EN (1<<5) -#define SWF14_DS_PIPEA_CRT2_EN (1<<4) -#define SWF14_DS_PIPEA_LFP_EN (1<<3) -#define SWF14_DS_PIPEA_EFP_EN (1<<2) -#define SWF14_DS_PIPEA_TV_EN (1<<1) -#define SWF14_DS_PIPEA_CRT_EN (1<<0) - /* if GR18 indicates a panel fitting request */ -#define SWF14_PFIT_EN (1<<0) /* 0 means disable */ - /* if GR18 indicates an APM change request */ -#define SWF14_APM_HIBERNATE 0x4 -#define SWF14_APM_SUSPEND 0x3 -#define SWF14_APM_STANDBY 0x1 -#define SWF14_APM_RESTORE 0x0 - /* Block 52 contains MIPI configuration block * 6 * bdb_mipi_config, followed by 6 pps data block * block below @@ -922,15 +753,4 @@ struct bdb_mipi_sequence { u8 data[0]; } __packed; -enum mipi_gpio_pin_index { - MIPI_GPIO_UNDEFINED = 0, - MIPI_GPIO_PANEL_ENABLE, - MIPI_GPIO_BL_ENABLE, - MIPI_GPIO_PWM_ENABLE, - MIPI_GPIO_RESET_N, - MIPI_GPIO_PWR_DOWN_R, - MIPI_GPIO_STDBY_RST_N, - MIPI_GPIO_MAX -}; - #endif /* _INTEL_VBT_DEFS_H_ */ -- cgit v1.2.3 From f87f6599c8436b7e963df7a6dcedc253b4b37cd6 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 31 May 2019 16:14:58 +0300 Subject: drm/i915/bios: reserve struct bdb_ prefix for BDB blocks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't use bdb_ prefixes for structs within blocks. Add a new bdb struct for SDVO panel DTDs for completeness. Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/cd062ced1bbbe07e087212b42c83e5ac64a22a49.1559308269.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_bios.c | 12 ++++++------ drivers/gpu/drm/i915/intel_vbt_defs.h | 20 ++++++++++++-------- 2 files changed, 18 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 81ca94498178..b5aedb605c05 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -302,7 +302,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) { const struct bdb_lfp_backlight_data *backlight_data; - const struct bdb_lfp_backlight_data_entry *entry; + const struct lfp_backlight_data_entry *entry; int panel_type = dev_priv->vbt.panel_type; backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT); @@ -327,7 +327,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI; if (bdb->version >= 191 && get_blocksize(backlight_data) >= sizeof(*backlight_data)) { - const struct bdb_lfp_backlight_control_method *method; + const struct lfp_backlight_control_method *method; method = &backlight_data->backlight_control[panel_type]; dev_priv->vbt.backlight.type = method->type; @@ -351,7 +351,7 @@ static void parse_sdvo_panel_data(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) { - const struct lvds_dvo_timing *dvo_timing; + const struct bdb_sdvo_panel_dtds *dtds; struct drm_display_mode *panel_fixed_mode; int index; @@ -371,15 +371,15 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv, index = sdvo_lvds_options->panel_type; } - dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS); - if (!dvo_timing) + dtds = find_section(bdb, BDB_SDVO_PANEL_DTDS); + if (!dtds) return; panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); if (!panel_fixed_mode) return; - fill_detail_timing_data(panel_fixed_mode, dvo_timing + index); + fill_detail_timing_data(panel_fixed_mode, &dtds->dtds[index]); dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode; diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index 3f870ac48840..0566c52b058a 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -477,7 +477,7 @@ struct bdb_lvds_options { } __packed; /* LFP pointer table contains entries to the struct below */ -struct bdb_lvds_lfp_data_ptr { +struct lvds_lfp_data_ptr { u16 fp_timing_offset; /* offsets are from start of bdb */ u8 fp_table_size; u16 dvo_timing_offset; @@ -488,7 +488,7 @@ struct bdb_lvds_lfp_data_ptr { struct bdb_lvds_lfp_data_ptrs { u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ - struct bdb_lvds_lfp_data_ptr ptr[16]; + struct lvds_lfp_data_ptr ptr[16]; } __packed; /* LFP data has 3 blocks per entry */ @@ -547,20 +547,24 @@ struct lvds_pnp_id { u8 mfg_year; } __packed; -struct bdb_lvds_lfp_data_entry { +struct lvds_lfp_data_entry { struct lvds_fp_timing fp_timing; struct lvds_dvo_timing dvo_timing; struct lvds_pnp_id pnp_id; } __packed; struct bdb_lvds_lfp_data { - struct bdb_lvds_lfp_data_entry data[16]; + struct lvds_lfp_data_entry data[16]; +} __packed; + +struct bdb_sdvo_panel_dtds { + struct lvds_dvo_timing dtds[4]; } __packed; #define BDB_BACKLIGHT_TYPE_NONE 0 #define BDB_BACKLIGHT_TYPE_PWM 2 -struct bdb_lfp_backlight_data_entry { +struct lfp_backlight_data_entry { u8 type:2; u8 active_low_pwm:1; u8 obsolete1:5; @@ -570,16 +574,16 @@ struct bdb_lfp_backlight_data_entry { u8 obsolete3; } __packed; -struct bdb_lfp_backlight_control_method { +struct lfp_backlight_control_method { u8 type:4; u8 controller:4; } __packed; struct bdb_lfp_backlight_data { u8 entry_size; - struct bdb_lfp_backlight_data_entry data[16]; + struct lfp_backlight_data_entry data[16]; u8 level[16]; - struct bdb_lfp_backlight_control_method backlight_control[16]; + struct lfp_backlight_control_method backlight_control[16]; } __packed; struct bdb_sdvo_lvds_options { -- cgit v1.2.3 From 231dcffc234f13ce5a0816dfecf2603543ef1d36 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 31 May 2019 16:14:59 +0300 Subject: drm/i915/bios: add BDB block comments before definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the comments verbatim from the spec to help find the right block. No functional changes. Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/52c32be96bd605d7a9f94accbd4dbe7718849f93.1559308269.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_vbt_defs.h | 57 ++++++++++++++++++++++++++++++----- 1 file changed, 50 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index 0566c52b058a..7dbe8369baf9 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -116,6 +116,10 @@ struct bdb_header { #define BDB_MIPI_SEQUENCE 53 #define BDB_SKIP 254 /* VBIOS private block, ignore */ +/* + * Block 1 - General Bit Definitions + */ + struct bdb_general_features { /* bits 1 */ u8 panel_fitting:2; @@ -157,6 +161,10 @@ struct bdb_general_features { u8 rsvd11:2; /* finish byte */ } __packed; +/* + * Block 2 - General Bytes Definition + */ + /* pre-915 */ #define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */ #define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */ @@ -447,6 +455,10 @@ struct bdb_general_definitions { u8 devices[0]; } __packed; +/* + * Block 40 - LFP Data Block + */ + /* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */ #define MODE_MASK 0x3 @@ -476,6 +488,10 @@ struct bdb_lvds_options { u32 blt_control_type_bits; } __packed; +/* + * Block 41 - LFP Data Table Pointers + */ + /* LFP pointer table contains entries to the struct below */ struct lvds_lfp_data_ptr { u16 fp_timing_offset; /* offsets are from start of bdb */ @@ -491,6 +507,10 @@ struct bdb_lvds_lfp_data_ptrs { struct lvds_lfp_data_ptr ptr[16]; } __packed; +/* + * Block 42 - LFP Data Tables + */ + /* LFP data has 3 blocks per entry */ struct lvds_fp_timing { u16 x_res; @@ -557,10 +577,18 @@ struct bdb_lvds_lfp_data { struct lvds_lfp_data_entry data[16]; } __packed; +/* + * Block 23 - SDVO LVDS Panel DTDs + */ + struct bdb_sdvo_panel_dtds { struct lvds_dvo_timing dtds[4]; } __packed; +/* + * Block 43 - LFP Backlight Control Data Block + */ + #define BDB_BACKLIGHT_TYPE_NONE 0 #define BDB_BACKLIGHT_TYPE_PWM 2 @@ -586,6 +614,10 @@ struct bdb_lfp_backlight_data { struct lfp_backlight_control_method backlight_control[16]; } __packed; +/* + * Block 22 - SDVO LVDS General Options + */ + struct bdb_sdvo_lvds_options { u8 panel_backlight; u8 h40_set_panel_type; @@ -602,6 +634,9 @@ struct bdb_sdvo_lvds_options { u8 panel_misc_bits_4; } __packed; +/* + * Block 12 - Driver Features Data Block + */ #define BDB_DRIVER_FEATURE_NO_LVDS 0 #define BDB_DRIVER_FEATURE_INT_LVDS 1 @@ -662,6 +697,10 @@ struct bdb_driver_features { u16 pc_feature_valid:1; } __packed; +/* + * Block 27 - eDP VBT Block + */ + #define EDP_18BPP 0 #define EDP_24BPP 1 #define EDP_30BPP 2 @@ -714,6 +753,10 @@ struct bdb_edp { struct edp_full_link_params full_link_params[16]; /* 199 */ } __packed; +/* + * Block 9 - SRD Feature Block + */ + struct psr_table { /* Feature bits */ u8 full_link:1; @@ -737,10 +780,10 @@ struct bdb_psr { struct psr_table psr_table[16]; } __packed; -/* Block 52 contains MIPI configuration block - * 6 * bdb_mipi_config, followed by 6 pps data block - * block below +/* + * Block 52 - MIPI Configuration Block */ + #define MAX_MIPI_CONFIGURATIONS 6 struct bdb_mipi_config { @@ -748,13 +791,13 @@ struct bdb_mipi_config { struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; } __packed; -/* Block 53 contains MIPI sequences as needed by the panel - * for enabling it. This block can be variable in size and - * can be maximum of 6 blocks +/* + * Block 53 - MIPI Sequence Block */ + struct bdb_mipi_sequence { u8 version; - u8 data[0]; + u8 data[0]; /* up to 6 variable length blocks */ } __packed; #endif /* _INTEL_VBT_DEFS_H_ */ -- cgit v1.2.3 From 843444ed13012c356246740926d96d7a5f9103fd Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 31 May 2019 16:15:00 +0300 Subject: drm/i915/bios: sort BDB block definitions using block ID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make it easier to find the right blocks. No functional changes. Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/56e1989940d83a670d087d531b7b6aa5dc4c0228.1559308269.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_vbt_defs.h | 372 +++++++++++++++++----------------- 1 file changed, 186 insertions(+), 186 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index 7dbe8369baf9..608fa4135262 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -456,182 +456,30 @@ struct bdb_general_definitions { } __packed; /* - * Block 40 - LFP Data Block - */ - -/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */ -#define MODE_MASK 0x3 - -struct bdb_lvds_options { - u8 panel_type; - u8 rsvd1; - /* LVDS capabilities, stored in a dword */ - u8 pfit_mode:2; - u8 pfit_text_mode_enhanced:1; - u8 pfit_gfx_mode_enhanced:1; - u8 pfit_ratio_auto:1; - u8 pixel_dither:1; - u8 lvds_edid:1; - u8 rsvd2:1; - u8 rsvd4; - /* LVDS Panel channel bits stored here */ - u32 lvds_panel_channel_bits; - /* LVDS SSC (Spread Spectrum Clock) bits stored here. */ - u16 ssc_bits; - u16 ssc_freq; - u16 ssc_ddt; - /* Panel color depth defined here */ - u16 panel_color_depth; - /* LVDS panel type bits stored here */ - u32 dps_panel_type_bits; - /* LVDS backlight control type bits stored here */ - u32 blt_control_type_bits; -} __packed; - -/* - * Block 41 - LFP Data Table Pointers - */ - -/* LFP pointer table contains entries to the struct below */ -struct lvds_lfp_data_ptr { - u16 fp_timing_offset; /* offsets are from start of bdb */ - u8 fp_table_size; - u16 dvo_timing_offset; - u8 dvo_table_size; - u16 panel_pnp_id_offset; - u8 pnp_table_size; -} __packed; - -struct bdb_lvds_lfp_data_ptrs { - u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ - struct lvds_lfp_data_ptr ptr[16]; -} __packed; - -/* - * Block 42 - LFP Data Tables - */ - -/* LFP data has 3 blocks per entry */ -struct lvds_fp_timing { - u16 x_res; - u16 y_res; - u32 lvds_reg; - u32 lvds_reg_val; - u32 pp_on_reg; - u32 pp_on_reg_val; - u32 pp_off_reg; - u32 pp_off_reg_val; - u32 pp_cycle_reg; - u32 pp_cycle_reg_val; - u32 pfit_reg; - u32 pfit_reg_val; - u16 terminator; -} __packed; - -struct lvds_dvo_timing { - u16 clock; /**< In 10khz */ - u8 hactive_lo; - u8 hblank_lo; - u8 hblank_hi:4; - u8 hactive_hi:4; - u8 vactive_lo; - u8 vblank_lo; - u8 vblank_hi:4; - u8 vactive_hi:4; - u8 hsync_off_lo; - u8 hsync_pulse_width_lo; - u8 vsync_pulse_width_lo:4; - u8 vsync_off_lo:4; - u8 vsync_pulse_width_hi:2; - u8 vsync_off_hi:2; - u8 hsync_pulse_width_hi:2; - u8 hsync_off_hi:2; - u8 himage_lo; - u8 vimage_lo; - u8 vimage_hi:4; - u8 himage_hi:4; - u8 h_border; - u8 v_border; - u8 rsvd1:3; - u8 digital:2; - u8 vsync_positive:1; - u8 hsync_positive:1; - u8 non_interlaced:1; -} __packed; - -struct lvds_pnp_id { - u16 mfg_name; - u16 product_code; - u32 serial; - u8 mfg_week; - u8 mfg_year; -} __packed; - -struct lvds_lfp_data_entry { - struct lvds_fp_timing fp_timing; - struct lvds_dvo_timing dvo_timing; - struct lvds_pnp_id pnp_id; -} __packed; - -struct bdb_lvds_lfp_data { - struct lvds_lfp_data_entry data[16]; -} __packed; - -/* - * Block 23 - SDVO LVDS Panel DTDs - */ - -struct bdb_sdvo_panel_dtds { - struct lvds_dvo_timing dtds[4]; -} __packed; - -/* - * Block 43 - LFP Backlight Control Data Block + * Block 9 - SRD Feature Block */ -#define BDB_BACKLIGHT_TYPE_NONE 0 -#define BDB_BACKLIGHT_TYPE_PWM 2 +struct psr_table { + /* Feature bits */ + u8 full_link:1; + u8 require_aux_to_wakeup:1; + u8 feature_bits_rsvd:6; -struct lfp_backlight_data_entry { - u8 type:2; - u8 active_low_pwm:1; - u8 obsolete1:5; - u16 pwm_freq_hz; - u8 min_brightness; - u8 obsolete2; - u8 obsolete3; -} __packed; + /* Wait times */ + u8 idle_frames:4; + u8 lines_to_wait:3; + u8 wait_times_rsvd:1; -struct lfp_backlight_control_method { - u8 type:4; - u8 controller:4; -} __packed; + /* TP wake up time in multiple of 100 */ + u16 tp1_wakeup_time; + u16 tp2_tp3_wakeup_time; -struct bdb_lfp_backlight_data { - u8 entry_size; - struct lfp_backlight_data_entry data[16]; - u8 level[16]; - struct lfp_backlight_control_method backlight_control[16]; + /* PSR2 TP2/TP3 wakeup time for 16 panels */ + u32 psr2_tp2_tp3_wakeup_time; } __packed; -/* - * Block 22 - SDVO LVDS General Options - */ - -struct bdb_sdvo_lvds_options { - u8 panel_backlight; - u8 h40_set_panel_type; - u8 panel_type; - u8 ssc_clk_freq; - u16 als_low_trip; - u16 als_high_trip; - u8 sclalarcoeff_tab_row_num; - u8 sclalarcoeff_tab_row_size; - u8 coefficient[8]; - u8 panel_misc_bits_1; - u8 panel_misc_bits_2; - u8 panel_misc_bits_3; - u8 panel_misc_bits_4; +struct bdb_psr { + struct psr_table psr_table[16]; } __packed; /* @@ -697,6 +545,65 @@ struct bdb_driver_features { u16 pc_feature_valid:1; } __packed; +/* + * Block 22 - SDVO LVDS General Options + */ + +struct bdb_sdvo_lvds_options { + u8 panel_backlight; + u8 h40_set_panel_type; + u8 panel_type; + u8 ssc_clk_freq; + u16 als_low_trip; + u16 als_high_trip; + u8 sclalarcoeff_tab_row_num; + u8 sclalarcoeff_tab_row_size; + u8 coefficient[8]; + u8 panel_misc_bits_1; + u8 panel_misc_bits_2; + u8 panel_misc_bits_3; + u8 panel_misc_bits_4; +} __packed; + +/* + * Block 23 - SDVO LVDS Panel DTDs + */ + +struct lvds_dvo_timing { + u16 clock; /**< In 10khz */ + u8 hactive_lo; + u8 hblank_lo; + u8 hblank_hi:4; + u8 hactive_hi:4; + u8 vactive_lo; + u8 vblank_lo; + u8 vblank_hi:4; + u8 vactive_hi:4; + u8 hsync_off_lo; + u8 hsync_pulse_width_lo; + u8 vsync_pulse_width_lo:4; + u8 vsync_off_lo:4; + u8 vsync_pulse_width_hi:2; + u8 vsync_off_hi:2; + u8 hsync_pulse_width_hi:2; + u8 hsync_off_hi:2; + u8 himage_lo; + u8 vimage_lo; + u8 vimage_hi:4; + u8 himage_hi:4; + u8 h_border; + u8 v_border; + u8 rsvd1:3; + u8 digital:2; + u8 vsync_positive:1; + u8 hsync_positive:1; + u8 non_interlaced:1; +} __packed; + +struct bdb_sdvo_panel_dtds { + struct lvds_dvo_timing dtds[4]; +} __packed; + /* * Block 27 - eDP VBT Block */ @@ -754,30 +661,123 @@ struct bdb_edp { } __packed; /* - * Block 9 - SRD Feature Block + * Block 40 - LFP Data Block */ -struct psr_table { - /* Feature bits */ - u8 full_link:1; - u8 require_aux_to_wakeup:1; - u8 feature_bits_rsvd:6; +/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */ +#define MODE_MASK 0x3 - /* Wait times */ - u8 idle_frames:4; - u8 lines_to_wait:3; - u8 wait_times_rsvd:1; +struct bdb_lvds_options { + u8 panel_type; + u8 rsvd1; + /* LVDS capabilities, stored in a dword */ + u8 pfit_mode:2; + u8 pfit_text_mode_enhanced:1; + u8 pfit_gfx_mode_enhanced:1; + u8 pfit_ratio_auto:1; + u8 pixel_dither:1; + u8 lvds_edid:1; + u8 rsvd2:1; + u8 rsvd4; + /* LVDS Panel channel bits stored here */ + u32 lvds_panel_channel_bits; + /* LVDS SSC (Spread Spectrum Clock) bits stored here. */ + u16 ssc_bits; + u16 ssc_freq; + u16 ssc_ddt; + /* Panel color depth defined here */ + u16 panel_color_depth; + /* LVDS panel type bits stored here */ + u32 dps_panel_type_bits; + /* LVDS backlight control type bits stored here */ + u32 blt_control_type_bits; +} __packed; - /* TP wake up time in multiple of 100 */ - u16 tp1_wakeup_time; - u16 tp2_tp3_wakeup_time; +/* + * Block 41 - LFP Data Table Pointers + */ - /* PSR2 TP2/TP3 wakeup time for 16 panels */ - u32 psr2_tp2_tp3_wakeup_time; +/* LFP pointer table contains entries to the struct below */ +struct lvds_lfp_data_ptr { + u16 fp_timing_offset; /* offsets are from start of bdb */ + u8 fp_table_size; + u16 dvo_timing_offset; + u8 dvo_table_size; + u16 panel_pnp_id_offset; + u8 pnp_table_size; } __packed; -struct bdb_psr { - struct psr_table psr_table[16]; +struct bdb_lvds_lfp_data_ptrs { + u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ + struct lvds_lfp_data_ptr ptr[16]; +} __packed; + +/* + * Block 42 - LFP Data Tables + */ + +/* LFP data has 3 blocks per entry */ +struct lvds_fp_timing { + u16 x_res; + u16 y_res; + u32 lvds_reg; + u32 lvds_reg_val; + u32 pp_on_reg; + u32 pp_on_reg_val; + u32 pp_off_reg; + u32 pp_off_reg_val; + u32 pp_cycle_reg; + u32 pp_cycle_reg_val; + u32 pfit_reg; + u32 pfit_reg_val; + u16 terminator; +} __packed; + +struct lvds_pnp_id { + u16 mfg_name; + u16 product_code; + u32 serial; + u8 mfg_week; + u8 mfg_year; +} __packed; + +struct lvds_lfp_data_entry { + struct lvds_fp_timing fp_timing; + struct lvds_dvo_timing dvo_timing; + struct lvds_pnp_id pnp_id; +} __packed; + +struct bdb_lvds_lfp_data { + struct lvds_lfp_data_entry data[16]; +} __packed; + +/* + * Block 43 - LFP Backlight Control Data Block + */ + +#define BDB_BACKLIGHT_TYPE_NONE 0 +#define BDB_BACKLIGHT_TYPE_PWM 2 + +struct lfp_backlight_data_entry { + u8 type:2; + u8 active_low_pwm:1; + u8 obsolete1:5; + u16 pwm_freq_hz; + u8 min_brightness; + u8 obsolete2; + u8 obsolete3; +} __packed; + +struct lfp_backlight_control_method { + u8 type:4; + u8 controller:4; +} __packed; + +struct bdb_lfp_backlight_data { + u8 entry_size; + struct lfp_backlight_data_entry data[16]; + u8 level[16]; + struct lfp_backlight_control_method backlight_control[16]; } __packed; /* -- cgit v1.2.3 From 1434e1f613e855e4cef825b97abaa585840a5677 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 31 May 2019 16:15:01 +0300 Subject: drm/i915/bios: add VBT swing bit to child device definition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New bit to look in another BDB block for more. No functional changes. Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/7ab46e3f53fd2c12cb60b9eabbebb65b27004a9e.1559308269.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_vbt_defs.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index 608fa4135262..ea1697ab00d2 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -391,7 +391,8 @@ struct child_device_config { u8 lspcon:1; /* 192 */ u8 iboost:1; /* 196 */ u8 hpd_invert:1; /* 196 */ - u8 flag_reserved:3; + u8 use_vbt_vswing:1; /* 218 */ + u8 flag_reserved:2; u8 hdmi_support:1; /* 158 */ u8 dp_support:1; /* 158 */ u8 tmds_support:1; /* 158 */ -- cgit v1.2.3 From b77f9525a658c0c519ae8c18b8e4fd256900b1a2 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 31 May 2019 16:15:02 +0300 Subject: drm/i915/bios: add more LFP options MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add new fields in the LFP block. No functional changes. Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/9a7f41aab894d7e96d8ad4776cf14f94cfd17d04.1559308269.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_vbt_defs.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index ea1697ab00d2..6d46ca4bf8c4 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -670,7 +670,7 @@ struct bdb_edp { struct bdb_lvds_options { u8 panel_type; - u8 rsvd1; + u8 panel_type2; /* 212 */ /* LVDS capabilities, stored in a dword */ u8 pfit_mode:2; u8 pfit_text_mode_enhanced:1; @@ -692,6 +692,9 @@ struct bdb_lvds_options { u32 dps_panel_type_bits; /* LVDS backlight control type bits stored here */ u32 blt_control_type_bits; + + u16 lcdvcc_s0_enable; /* 200 */ + u32 rotation; /* 228 */ } __packed; /* -- cgit v1.2.3 From f41c615310d2f4f20f6259ae254bc058f61ced6d Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 31 May 2019 16:15:03 +0300 Subject: drm/i915/bios: add an enum for BDB block IDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Better grouping, better semantics for find_section(). No functional changes. Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/192cc8a45cb5c36ccbde25a725df135793a4263f.1559308269.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_bios.c | 4 +- drivers/gpu/drm/i915/intel_vbt_defs.h | 71 ++++++++++++++++++----------------- 2 files changed, 38 insertions(+), 37 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b5aedb605c05..1c037dfa83f5 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -76,13 +76,13 @@ static u32 get_blocksize(const void *block_data) } static const void * -find_section(const void *_bdb, int section_id) +find_section(const void *_bdb, enum bdb_block_id section_id) { const struct bdb_header *bdb = _bdb; const u8 *base = _bdb; int index = 0; u32 total, current_size; - u8 current_id; + enum bdb_block_id current_id; /* skip to first section */ index += bdb->header_size; diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index 6d46ca4bf8c4..89ef14cafb6b 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -80,41 +80,42 @@ struct bdb_header { * an ID and size in the first 3 bytes (ID in first, size in next 2). * Known types are listed below. */ -#define BDB_GENERAL_FEATURES 1 -#define BDB_GENERAL_DEFINITIONS 2 -#define BDB_OLD_TOGGLE_LIST 3 -#define BDB_MODE_SUPPORT_LIST 4 -#define BDB_GENERIC_MODE_TABLE 5 -#define BDB_EXT_MMIO_REGS 6 -#define BDB_SWF_IO 7 -#define BDB_SWF_MMIO 8 -#define BDB_PSR 9 -#define BDB_MODE_REMOVAL_TABLE 10 -#define BDB_CHILD_DEVICE_TABLE 11 -#define BDB_DRIVER_FEATURES 12 -#define BDB_DRIVER_PERSISTENCE 13 -#define BDB_EXT_TABLE_PTRS 14 -#define BDB_DOT_CLOCK_OVERRIDE 15 -#define BDB_DISPLAY_SELECT 16 -/* 17 rsvd */ -#define BDB_DRIVER_ROTATION 18 -#define BDB_DISPLAY_REMOVE 19 -#define BDB_OEM_CUSTOM 20 -#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */ -#define BDB_SDVO_LVDS_OPTIONS 22 -#define BDB_SDVO_PANEL_DTDS 23 -#define BDB_SDVO_LVDS_PNP_IDS 24 -#define BDB_SDVO_LVDS_POWER_SEQ 25 -#define BDB_TV_OPTIONS 26 -#define BDB_EDP 27 -#define BDB_LVDS_OPTIONS 40 -#define BDB_LVDS_LFP_DATA_PTRS 41 -#define BDB_LVDS_LFP_DATA 42 -#define BDB_LVDS_BACKLIGHT 43 -#define BDB_LVDS_POWER 44 -#define BDB_MIPI_CONFIG 52 -#define BDB_MIPI_SEQUENCE 53 -#define BDB_SKIP 254 /* VBIOS private block, ignore */ +enum bdb_block_id { + BDB_GENERAL_FEATURES = 1, + BDB_GENERAL_DEFINITIONS = 2, + BDB_OLD_TOGGLE_LIST = 3, + BDB_MODE_SUPPORT_LIST = 4, + BDB_GENERIC_MODE_TABLE = 5, + BDB_EXT_MMIO_REGS = 6, + BDB_SWF_IO = 7, + BDB_SWF_MMIO = 8, + BDB_PSR = 9, + BDB_MODE_REMOVAL_TABLE = 10, + BDB_CHILD_DEVICE_TABLE = 11, + BDB_DRIVER_FEATURES = 12, + BDB_DRIVER_PERSISTENCE = 13, + BDB_EXT_TABLE_PTRS = 14, + BDB_DOT_CLOCK_OVERRIDE = 15, + BDB_DISPLAY_SELECT = 16, + BDB_DRIVER_ROTATION = 18, + BDB_DISPLAY_REMOVE = 19, + BDB_OEM_CUSTOM = 20, + BDB_EFP_LIST = 21, /* workarounds for VGA hsync/vsync */ + BDB_SDVO_LVDS_OPTIONS = 22, + BDB_SDVO_PANEL_DTDS = 23, + BDB_SDVO_LVDS_PNP_IDS = 24, + BDB_SDVO_LVDS_POWER_SEQ = 25, + BDB_TV_OPTIONS = 26, + BDB_EDP = 27, + BDB_LVDS_OPTIONS = 40, + BDB_LVDS_LFP_DATA_PTRS = 41, + BDB_LVDS_LFP_DATA = 42, + BDB_LVDS_BACKLIGHT = 43, + BDB_LVDS_POWER = 44, + BDB_MIPI_CONFIG = 52, + BDB_MIPI_SEQUENCE = 53, + BDB_SKIP = 254, /* VBIOS private block, ignore */ +}; /* * Block 1 - General Bit Definitions -- cgit v1.2.3 From 2c1c55252647abd989b94f725b190c700312d053 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 24 May 2019 19:40:27 +0200 Subject: drm/i915/dsi: Use a fuzzy check for burst mode clock check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prior to this commit we fail to init the DSI panel on the GPD MicroPC: https://www.indiegogo.com/projects/gpd-micropc-6-inch-handheld-industry-laptop#/ The problem is intel_dsi_vbt_init() failing with the following error: *ERROR* Burst mode freq is less than computed The pclk in the VBT panel modeline is 70000, together with 24 bpp and 4 lines this results in a bitrate value of 70000 * 24 / 4 = 420000. But the target_burst_mode_freq in the VBT is 418000. This commit works around this problem by adding an intel_fuzzy_clock_check when target_burst_mode_freq < bitrate and setting target_burst_mode_freq to bitrate when that checks succeeds, fixing the panel not working. Cc: stable@vger.kernel.org Reviewed-by: Ville Syrjälä Signed-off-by: Hans de Goede Link: https://patchwork.freedesktop.org/patch/msgid/20190524174028.21659-2-hdegoede@redhat.com --- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_drv.h | 1 + drivers/gpu/drm/i915/intel_dsi_vbt.c | 11 +++++++++++ 3 files changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 122a075b6174..1560030563b6 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12163,7 +12163,7 @@ encoder_retry: return 0; } -static bool intel_fuzzy_clock_check(int clock1, int clock2) +bool intel_fuzzy_clock_check(int clock1, int clock2) { int diff; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0dcc03592d6e..d0aeb383024a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1557,6 +1557,7 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, const struct dpll *dpll); void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe); int lpt_get_iclkip(struct drm_i915_private *dev_priv); +bool intel_fuzzy_clock_check(int clock1, int clock2); /* modesetting asserts */ void assert_panel_unlocked(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c index fbed9064ac7e..7cdde1d04f4b 100644 --- a/drivers/gpu/drm/i915/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c @@ -858,6 +858,17 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) if (mipi_config->target_burst_mode_freq) { u32 bitrate = intel_dsi_bitrate(intel_dsi); + /* + * Sometimes the VBT contains a slightly lower clock, + * then the bitrate we have calculated, in this case + * just replace it with the calculated bitrate. + */ + if (mipi_config->target_burst_mode_freq < bitrate && + intel_fuzzy_clock_check( + mipi_config->target_burst_mode_freq, + bitrate)) + mipi_config->target_burst_mode_freq = bitrate; + if (mipi_config->target_burst_mode_freq < bitrate) { DRM_ERROR("Burst mode freq is less than computed\n"); return false; -- cgit v1.2.3 From b7143860634387a6bc2ca537a79caf9522e4f1a8 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Wed, 5 Jun 2019 14:18:32 -0700 Subject: drm/i915/ehl: Support HBR3 on EHL combo PHY Unlike ICL, EHL's combo PHYs can support HBR3 data rates. Note that this just extends the upper limit; we will continue to honor the max data rate specified in the VBT in cases where it is lower than HBR3. Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190605211832.23945-1-matthew.d.roper@intel.com Reviewed-by: Manasi Navare --- drivers/gpu/drm/i915/intel_dp.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 24b56b2a76c8..b099a9dc28fd 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -332,6 +332,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp) enum port port = dig_port->base.port; if (intel_port_is_combophy(dev_priv, port) && + !IS_ELKHARTLAKE(dev_priv) && !intel_dp_is_edp(intel_dp)) return 540000; -- cgit v1.2.3 From affa22b5f0f7e9caf61887671abe38819737bf16 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 5 Jun 2019 12:56:57 +0300 Subject: drm/i915: fix documentation build warnings Just a straightforward bag of fixes for a clean htmldocs build. Reviewed-by: Mika Kuoppala Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190605095657.23601-2-jani.nikula@intel.com --- Documentation/gpu/i915.rst | 6 ------ drivers/gpu/drm/i915/i915_reg.h | 2 +- drivers/gpu/drm/i915/i915_vma.h | 2 ++ drivers/gpu/drm/i915/intel_guc_fwif.h | 2 ++ drivers/gpu/drm/i915/intel_runtime_pm.c | 2 -- 5 files changed, 5 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst index f98ee95da90f..300220c4b498 100644 --- a/Documentation/gpu/i915.rst +++ b/Documentation/gpu/i915.rst @@ -475,12 +475,6 @@ i915_context_create and i915_context_free .. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h :doc: i915_context_create and i915_context_free tracepoints -switch_mm ---------- - -.. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h - :doc: switch_mm tracepoint - Perf ==== diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 07e3f861a92e..b7c13d5deb15 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -153,7 +153,7 @@ * REG_FIELD_PREP() - Prepare a u32 bitfield value * @__mask: shifted mask defining the field's length and position * @__val: value to put in the field - + * * Local copy of FIELD_PREP() to generate an integer constant expression, force * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK(). * diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 2657c99fe187..bc15083bd479 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -40,6 +40,8 @@ enum i915_cache_level; /** + * DOC: Virtual Memory Address + * * A VMA represents a GEM BO that is bound into an address space. Therefore, a * VMA's presence cannot be guaranteed before binding, or after unbinding the * object into/from the address space. diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 3d1de288d96c..f55f3bc8524d 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -500,6 +500,8 @@ enum guc_log_buffer_type { }; /** + * struct guc_log_buffer_state - GuC log buffer state + * * Below state structure is used for coordination of retrieval of GuC firmware * logs. Separate state is maintained for each log buffer type. * read_ptr points to the location where i915 read last in log buffer and diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 3bdeea596ad5..af3c1ada1b2e 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -391,8 +391,6 @@ static intel_wakeref_t __intel_runtime_pm_get(struct drm_i915_private *i915, * asynchronous PM management from display code) and ensures that it is powered * up. Raw references are not considered during wakelock assert checks. * - * Returns: - * True when the power domain is enabled, false otherwise. * Any runtime pm reference obtained by this function must have a symmetric * call to intel_runtime_pm_put_raw() to release the reference again. * -- cgit v1.2.3 From 155ab8836caa69579a97a02ccafee929091170b5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 6 Jun 2019 12:23:20 +0100 Subject: drm/i915: Move object close under its own lock Use i915_gem_object_lock() to guard the LUT and active reference to allow us to break free of struct_mutex for handling GEM_CLOSE. Testcase: igt/gem_close_race Testcase: igt/gem_exec_parallel Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190606112320.9704-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 80 +++++++++++++---------- drivers/gpu/drm/i915/gem/i915_gem_context_types.h | 12 +--- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 25 ++++--- drivers/gpu/drm/i915/gem/i915_gem_object.c | 38 ++++++----- drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 1 - drivers/gpu/drm/i915/gem/selftests/mock_context.c | 1 - drivers/gpu/drm/i915/i915_drv.h | 4 +- drivers/gpu/drm/i915/i915_gem.c | 1 + drivers/gpu/drm/i915/i915_gem_gtt.c | 1 + drivers/gpu/drm/i915/i915_timeline.c | 13 ++-- drivers/gpu/drm/i915/i915_vma.c | 48 +++++++++----- drivers/gpu/drm/i915/i915_vma.h | 17 +++-- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 1 + 13 files changed, 139 insertions(+), 103 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 08721ef62e4e..6cac1c144c79 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -95,24 +95,45 @@ void i915_lut_handle_free(struct i915_lut_handle *lut) static void lut_close(struct i915_gem_context *ctx) { - struct i915_lut_handle *lut, *ln; struct radix_tree_iter iter; void __rcu **slot; - list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) { - list_del(&lut->obj_link); - i915_lut_handle_free(lut); - } - INIT_LIST_HEAD(&ctx->handles_list); + lockdep_assert_held(&ctx->mutex); rcu_read_lock(); radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { struct i915_vma *vma = rcu_dereference_raw(*slot); + struct drm_i915_gem_object *obj = vma->obj; + struct i915_lut_handle *lut; + + if (!kref_get_unless_zero(&obj->base.refcount)) + continue; - radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); + rcu_read_unlock(); + i915_gem_object_lock(obj); + list_for_each_entry(lut, &obj->lut_list, obj_link) { + if (lut->ctx != ctx) + continue; - vma->open_count--; - i915_vma_put(vma); + if (lut->handle != iter.index) + continue; + + list_del(&lut->obj_link); + break; + } + i915_gem_object_unlock(obj); + rcu_read_lock(); + + if (&lut->obj_link != &obj->lut_list) { + i915_lut_handle_free(lut); + radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); + if (atomic_dec_and_test(&vma->open_count) && + !i915_vma_is_ggtt(vma)) + i915_vma_close(vma); + i915_gem_object_put(obj); + } + + i915_gem_object_put(obj); } rcu_read_unlock(); } @@ -250,15 +271,9 @@ static void free_engines(struct i915_gem_engines *e) __free_engines(e, e->num_engines); } -static void free_engines_rcu(struct work_struct *wrk) +static void free_engines_rcu(struct rcu_head *rcu) { - struct i915_gem_engines *e = - container_of(wrk, struct i915_gem_engines, rcu.work); - struct drm_i915_private *i915 = e->i915; - - mutex_lock(&i915->drm.struct_mutex); - free_engines(e); - mutex_unlock(&i915->drm.struct_mutex); + free_engines(container_of(rcu, struct i915_gem_engines, rcu)); } static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) @@ -271,7 +286,7 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) if (!e) return ERR_PTR(-ENOMEM); - e->i915 = ctx->i915; + init_rcu_head(&e->rcu); for_each_engine(engine, ctx->i915, id) { struct intel_context *ce; @@ -359,7 +374,10 @@ void i915_gem_context_release(struct kref *ref) static void context_close(struct i915_gem_context *ctx) { + mutex_lock(&ctx->mutex); + i915_gem_context_set_closed(ctx); + ctx->file_priv = ERR_PTR(-EBADF); /* * This context will never again be assinged to HW, so we can @@ -374,7 +392,7 @@ static void context_close(struct i915_gem_context *ctx) */ lut_close(ctx); - ctx->file_priv = ERR_PTR(-EBADF); + mutex_unlock(&ctx->mutex); i915_gem_context_put(ctx); } @@ -429,7 +447,6 @@ __create_context(struct drm_i915_private *dev_priv) RCU_INIT_POINTER(ctx->engines, e); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); - INIT_LIST_HEAD(&ctx->handles_list); INIT_LIST_HEAD(&ctx->hw_id_link); /* NB: Mark all slices as needing a remap so that when the context first @@ -772,9 +789,7 @@ int i915_gem_context_open(struct drm_i915_private *i915, return 0; err_ctx: - mutex_lock(&i915->drm.struct_mutex); context_close(ctx); - mutex_unlock(&i915->drm.struct_mutex); err: idr_destroy(&file_priv->vm_idr); idr_destroy(&file_priv->context_idr); @@ -787,8 +802,6 @@ void i915_gem_context_close(struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; - lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex); - idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_destroy(&file_priv->context_idr); mutex_destroy(&file_priv->context_idr_lock); @@ -1093,7 +1106,9 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, goto unlock; /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ + mutex_lock(&ctx->mutex); lut_close(ctx); + mutex_unlock(&ctx->mutex); old = __set_ppgtt(ctx, ppgtt); @@ -1612,7 +1627,7 @@ set_engines(struct i915_gem_context *ctx, if (!set.engines) return -ENOMEM; - set.engines->i915 = ctx->i915; + init_rcu_head(&set.engines->rcu); for (n = 0; n < num_engines; n++) { struct i915_engine_class_instance ci; struct intel_engine_cs *engine; @@ -1666,8 +1681,7 @@ replace: rcu_swap_protected(ctx->engines, set.engines, 1); mutex_unlock(&ctx->engines_mutex); - INIT_RCU_WORK(&set.engines->rcu, free_engines_rcu); - queue_rcu_work(system_wq, &set.engines->rcu); + call_rcu(&set.engines->rcu, free_engines_rcu); return 0; } @@ -1682,7 +1696,7 @@ __copy_engines(struct i915_gem_engines *e) if (!copy) return ERR_PTR(-ENOMEM); - copy->i915 = e->i915; + init_rcu_head(©->rcu); for (n = 0; n < e->num_engines; n++) { if (e->engines[n]) copy->engines[n] = intel_context_get(e->engines[n]); @@ -1769,8 +1783,7 @@ get_engines(struct i915_gem_context *ctx, args->size = size; err_free: - INIT_RCU_WORK(&e->rcu, free_engines_rcu); - queue_rcu_work(system_wq, &e->rcu); + free_engines(e); return err; } @@ -1891,7 +1904,7 @@ static int clone_engines(struct i915_gem_context *dst, if (!clone) goto err_unlock; - clone->i915 = dst->i915; + init_rcu_head(&clone->rcu); for (n = 0; n < e->num_engines; n++) { struct intel_engine_cs *engine; @@ -2163,9 +2176,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, return 0; err_ctx: - mutex_lock(&dev->struct_mutex); context_close(ext_data.ctx); - mutex_unlock(&dev->struct_mutex); return ret; } @@ -2190,10 +2201,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, if (!ctx) return -ENOENT; - mutex_lock(&dev->struct_mutex); context_close(ctx); - mutex_unlock(&dev->struct_mutex); - return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index fb965ded2508..3db7448b9732 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -30,8 +30,7 @@ struct i915_timeline; struct intel_ring; struct i915_gem_engines { - struct rcu_work rcu; - struct drm_i915_private *i915; + struct rcu_head rcu; unsigned int num_engines; struct intel_context *engines[]; }; @@ -192,17 +191,12 @@ struct i915_gem_context { /** remap_slice: Bitmask of cache lines that need remapping */ u8 remap_slice; - /** handles_vma: rbtree to look up our context specific obj/vma for + /** + * handles_vma: rbtree to look up our context specific obj/vma for * the user handle. (user handles are per fd, but the binding is * per vm, which may be one per context or shared with the global GTT) */ struct radix_tree_root handles_vma; - - /** handles_list: reverse list of all the rbtree entries in use for - * this context, which allows us to free all the allocations on - * context close. - */ - struct list_head handles_list; }; #endif /* __I915_GEM_CONTEXT_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index ed522fdfbe7f..2c4f3229361d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -801,9 +801,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) unsigned int i, batch; int err; - if (unlikely(i915_gem_context_is_closed(eb->gem_context))) - return -ENOENT; - if (unlikely(i915_gem_context_is_banned(eb->gem_context))) return -EIO; @@ -812,6 +809,12 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) batch = eb_batch_index(eb); + mutex_lock(&eb->gem_context->mutex); + if (unlikely(i915_gem_context_is_closed(eb->gem_context))) { + err = -ENOENT; + goto err_ctx; + } + for (i = 0; i < eb->buffer_count; i++) { u32 handle = eb->exec[i].handle; struct i915_lut_handle *lut; @@ -845,13 +848,15 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) goto err_obj; } - /* transfer ref to ctx */ - if (!vma->open_count++) + /* transfer ref to lut */ + if (!atomic_fetch_inc(&vma->open_count)) i915_vma_reopen(vma); - list_add(&lut->obj_link, &obj->lut_list); - list_add(&lut->ctx_link, &eb->gem_context->handles_list); - lut->ctx = eb->gem_context; lut->handle = handle; + lut->ctx = eb->gem_context; + + i915_gem_object_lock(obj); + list_add(&lut->obj_link, &obj->lut_list); + i915_gem_object_unlock(obj); add_vma: err = eb_add_vma(eb, i, batch, vma); @@ -864,6 +869,8 @@ add_vma: eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i])); } + mutex_unlock(&eb->gem_context->mutex); + eb->args->flags |= __EXEC_VALIDATED; return eb_reserve(eb); @@ -871,6 +878,8 @@ err_obj: i915_gem_object_put(obj); err_vma: eb->vma[i] = NULL; +err_ctx: + mutex_unlock(&eb->gem_context->mutex); return err; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index b840cf179bbe..a0bc8f7ab780 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -105,39 +105,47 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) { - struct drm_i915_private *i915 = to_i915(gem->dev); struct drm_i915_gem_object *obj = to_intel_bo(gem); struct drm_i915_file_private *fpriv = file->driver_priv; struct i915_lut_handle *lut, *ln; + LIST_HEAD(close); - mutex_lock(&i915->drm.struct_mutex); - + i915_gem_object_lock(obj); list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { struct i915_gem_context *ctx = lut->ctx; - struct i915_vma *vma; - GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF)); if (ctx->file_priv != fpriv) continue; - vma = radix_tree_delete(&ctx->handles_vma, lut->handle); - GEM_BUG_ON(vma->obj != obj); + i915_gem_context_get(ctx); + list_move(&lut->obj_link, &close); + } + i915_gem_object_unlock(obj); - /* We allow the process to have multiple handles to the same + list_for_each_entry_safe(lut, ln, &close, obj_link) { + struct i915_gem_context *ctx = lut->ctx; + struct i915_vma *vma; + + /* + * We allow the process to have multiple handles to the same * vma, in the same fd namespace, by virtue of flink/open. */ - GEM_BUG_ON(!vma->open_count); - if (!--vma->open_count && !i915_vma_is_ggtt(vma)) - i915_vma_close(vma); - list_del(&lut->obj_link); - list_del(&lut->ctx_link); + mutex_lock(&ctx->mutex); + vma = radix_tree_delete(&ctx->handles_vma, lut->handle); + if (vma) { + GEM_BUG_ON(vma->obj != obj); + GEM_BUG_ON(!atomic_read(&vma->open_count)); + if (atomic_dec_and_test(&vma->open_count) && + !i915_vma_is_ggtt(vma)) + i915_vma_close(vma); + } + mutex_unlock(&ctx->mutex); + i915_gem_context_put(lut->ctx); i915_lut_handle_free(lut); i915_gem_object_put(obj); } - - mutex_unlock(&i915->drm.struct_mutex); } static bool discard_backing_storage(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 67a992d6ee0c..9c161ba73558 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -24,7 +24,6 @@ struct drm_i915_gem_object; */ struct i915_lut_handle { struct list_head obj_link; - struct list_head ctx_link; struct i915_gem_context *ctx; u32 handle; }; diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index 68d50da035e6..6578f2f6c3f8 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -30,7 +30,6 @@ mock_context(struct drm_i915_private *i915, RCU_INIT_POINTER(ctx->engines, e); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); - INIT_LIST_HEAD(&ctx->handles_list); INIT_LIST_HEAD(&ctx->hw_id_link); mutex_init(&ctx->mutex); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 89bf1e34feaa..dfe4b11ee423 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1899,10 +1899,12 @@ struct drm_i915_private { } timelines; struct list_head active_rings; - struct list_head closed_vma; struct intel_wakeref wakeref; + struct list_head closed_vma; + spinlock_t closed_lock; /* guards the list of closed_vma */ + /** * Is the GPU currently considered idle, or busy executing * userspace requests? Whilst idle, we allow runtime power diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4739a6307c32..9f2e213c6046 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1784,6 +1784,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) INIT_LIST_HEAD(&dev_priv->gt.active_rings); INIT_LIST_HEAD(&dev_priv->gt.closed_vma); + spin_lock_init(&dev_priv->gt.closed_lock); i915_gem_init__mm(dev_priv); i915_gem_init__pm(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 56a436858043..6fcf702d7ec1 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2074,6 +2074,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ INIT_LIST_HEAD(&vma->obj_link); + INIT_LIST_HEAD(&vma->closed_link); mutex_lock(&vma->vm->mutex); list_add(&vma->vm_link, &vma->vm->unbound_list); diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c index 5fbea0892f33..000e1a9b6750 100644 --- a/drivers/gpu/drm/i915/i915_timeline.c +++ b/drivers/gpu/drm/i915/i915_timeline.c @@ -61,7 +61,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE); - spin_lock(>->hwsp_lock); + spin_lock_irq(>->hwsp_lock); /* hwsp_free_list only contains HWSP that have available cachelines */ hwsp = list_first_entry_or_null(>->hwsp_free_list, @@ -69,7 +69,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) if (!hwsp) { struct i915_vma *vma; - spin_unlock(>->hwsp_lock); + spin_unlock_irq(>->hwsp_lock); hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL); if (!hwsp) @@ -86,7 +86,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) hwsp->free_bitmap = ~0ull; hwsp->gt = gt; - spin_lock(>->hwsp_lock); + spin_lock_irq(>->hwsp_lock); list_add(&hwsp->free_link, >->hwsp_free_list); } @@ -96,7 +96,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) if (!hwsp->free_bitmap) list_del(&hwsp->free_link); - spin_unlock(>->hwsp_lock); + spin_unlock_irq(>->hwsp_lock); GEM_BUG_ON(hwsp->vma->private != hwsp); return hwsp->vma; @@ -105,8 +105,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline) { struct i915_gt_timelines *gt = hwsp->gt; + unsigned long flags; - spin_lock(>->hwsp_lock); + spin_lock_irqsave(>->hwsp_lock, flags); /* As a cacheline becomes available, publish the HWSP on the freelist */ if (!hwsp->free_bitmap) @@ -122,7 +123,7 @@ static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline) kfree(hwsp); } - spin_unlock(>->hwsp_lock); + spin_unlock_irqrestore(>->hwsp_lock, flags); } static void __idle_cacheline_free(struct i915_timeline_cacheline *cl) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index b7fb7d216f77..f6ac8394da77 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -131,9 +131,6 @@ vma_create(struct drm_i915_gem_object *obj, if (vma == NULL) return ERR_PTR(-ENOMEM); - i915_active_init(vm->i915, &vma->active, __i915_vma_retire); - INIT_ACTIVE_REQUEST(&vma->last_fence); - vma->vm = vm; vma->ops = &vm->vma_ops; vma->obj = obj; @@ -141,6 +138,11 @@ vma_create(struct drm_i915_gem_object *obj, vma->size = obj->base.size; vma->display_alignment = I915_GTT_MIN_ALIGNMENT; + i915_active_init(vm->i915, &vma->active, __i915_vma_retire); + INIT_ACTIVE_REQUEST(&vma->last_fence); + + INIT_LIST_HEAD(&vma->closed_link); + if (view && view->type != I915_GGTT_VIEW_NORMAL) { vma->ggtt_view = *view; if (view->type == I915_GGTT_VIEW_PARTIAL) { @@ -787,10 +789,10 @@ err_unpin: void i915_vma_close(struct i915_vma *vma) { - lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); + struct drm_i915_private *i915 = vma->vm->i915; + unsigned long flags; GEM_BUG_ON(i915_vma_is_closed(vma)); - vma->flags |= I915_VMA_CLOSED; /* * We defer actually closing, unbinding and destroying the VMA until @@ -804,17 +806,26 @@ void i915_vma_close(struct i915_vma *vma) * causing us to rebind the VMA once more. This ends up being a lot * of wasted work for the steady state. */ - list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma); + spin_lock_irqsave(&i915->gt.closed_lock, flags); + list_add(&vma->closed_link, &i915->gt.closed_vma); + spin_unlock_irqrestore(&i915->gt.closed_lock, flags); } -void i915_vma_reopen(struct i915_vma *vma) +static void __i915_vma_remove_closed(struct i915_vma *vma) { - lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); + struct drm_i915_private *i915 = vma->vm->i915; - if (vma->flags & I915_VMA_CLOSED) { - vma->flags &= ~I915_VMA_CLOSED; - list_del(&vma->closed_link); - } + if (!i915_vma_is_closed(vma)) + return; + + spin_lock_irq(&i915->gt.closed_lock); + list_del_init(&vma->closed_link); + spin_unlock_irq(&i915->gt.closed_lock); +} + +void i915_vma_reopen(struct i915_vma *vma) +{ + __i915_vma_remove_closed(vma); } static void __i915_vma_destroy(struct i915_vma *vma) @@ -848,8 +859,7 @@ void i915_vma_destroy(struct i915_vma *vma) GEM_BUG_ON(i915_vma_is_pinned(vma)); - if (i915_vma_is_closed(vma)) - list_del(&vma->closed_link); + __i915_vma_remove_closed(vma); WARN_ON(i915_vma_unbind(vma)); GEM_BUG_ON(i915_vma_is_active(vma)); @@ -861,12 +871,16 @@ void i915_vma_parked(struct drm_i915_private *i915) { struct i915_vma *vma, *next; + spin_lock_irq(&i915->gt.closed_lock); list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) { - GEM_BUG_ON(!i915_vma_is_closed(vma)); + list_del_init(&vma->closed_link); + spin_unlock_irq(&i915->gt.closed_lock); + i915_vma_destroy(vma); - } - GEM_BUG_ON(!list_empty(&i915->gt.closed_vma)); + spin_lock_irq(&i915->gt.closed_lock); + } + spin_unlock_irq(&i915->gt.closed_lock); } static void __i915_vma_iounmap(struct i915_vma *vma) diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index bc15083bd479..0c57ab4fed5d 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -71,7 +71,7 @@ struct i915_vma { * handles (but same file) for execbuf, i.e. the number of aliases * that exist in the ctx->handle_vmas LUT for this vma. */ - unsigned int open_count; + atomic_t open_count; unsigned long flags; /** * How many users have pinned this object in GTT space. @@ -106,10 +106,9 @@ struct i915_vma { #define I915_VMA_GGTT BIT(11) #define I915_VMA_CAN_FENCE BIT(12) -#define I915_VMA_CLOSED BIT(13) -#define I915_VMA_USERFAULT_BIT 14 +#define I915_VMA_USERFAULT_BIT 13 #define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT) -#define I915_VMA_GGTT_WRITE BIT(15) +#define I915_VMA_GGTT_WRITE BIT(14) struct i915_active active; struct i915_active_request last_fence; @@ -192,11 +191,6 @@ static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma) return vma->flags & I915_VMA_CAN_FENCE; } -static inline bool i915_vma_is_closed(const struct i915_vma *vma) -{ - return vma->flags & I915_VMA_CLOSED; -} - static inline bool i915_vma_set_userfault(struct i915_vma *vma) { GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); @@ -213,6 +207,11 @@ static inline bool i915_vma_has_userfault(const struct i915_vma *vma) return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags); } +static inline bool i915_vma_is_closed(const struct i915_vma *vma) +{ + return !list_empty(&vma->closed_link); +} + static inline u32 i915_ggtt_offset(const struct i915_vma *vma) { GEM_BUG_ON(!i915_vma_is_ggtt(vma)); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index e25b74a27f83..b7f3fbb4ae89 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -203,6 +203,7 @@ struct drm_i915_private *mock_gem_device(void) INIT_LIST_HEAD(&i915->gt.active_rings); INIT_LIST_HEAD(&i915->gt.closed_vma); + spin_lock_init(&i915->gt.closed_lock); mutex_lock(&i915->drm.struct_mutex); -- cgit v1.2.3 From 1fe2d6f94f96e35f0d71721eb899f5f72d5b68bd Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 4 Jun 2019 16:24:08 +0100 Subject: drm/i915: Skip context_barrier emission for unused contexts The intent was to skip unused HW contexts by checking ce->state. However, this only works for execlists where the ppGTT pointers is stored inside the HW context. For gen7, the ppGTT is alongside the logical state and must be updated on all active engines but, crucially, only on active engines. As we need different checks, and to keep context_barrier_task() agnostic, pass in the predicate. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110836 Fixes: 62c8e423450d ("drm/i915: Skip unused contexts for context_barrier_task()") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Mika Kuoppala Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190604152408.24468-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 15 ++++++++++++++- drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c | 19 +++++++++++++++---- 2 files changed, 29 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 6cac1c144c79..dd9aa77e38ae 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -915,6 +915,7 @@ static void cb_retire(struct i915_active *base) I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); static int context_barrier_task(struct i915_gem_context *ctx, intel_engine_mask_t engines, + bool (*skip)(struct intel_context *ce, void *data), int (*emit)(struct i915_request *rq, void *data), void (*task)(void *data), void *data) @@ -944,7 +945,10 @@ static int context_barrier_task(struct i915_gem_context *ctx, break; } - if (!(ce->engine->mask & engines) || !ce->state) + if (!(ce->engine->mask & engines)) + continue; + + if (skip && skip(ce, data)) continue; rq = intel_context_create_request(ce); @@ -1071,6 +1075,14 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data) return 0; } +static bool skip_ppgtt_update(struct intel_context *ce, void *data) +{ + if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915)) + return !ce->state; + else + return !atomic_read(&ce->pin_count); +} + static int set_ppgtt(struct drm_i915_file_private *file_priv, struct i915_gem_context *ctx, struct drm_i915_gem_context_param *args) @@ -1118,6 +1130,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, * only indirectly through the context. */ err = context_barrier_task(ctx, ALL_ENGINES, + skip_ppgtt_update, emit_ppgtt_update, set_ppgtt_barrier, old); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 1bc3b8026400..41105f6ed206 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -1619,6 +1619,11 @@ __engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines) return "none"; } +static bool skip_unused_engines(struct intel_context *ce, void *data) +{ + return !ce->state; +} + static void mock_barrier_task(void *data) { unsigned int *counter = data; @@ -1651,7 +1656,7 @@ static int mock_context_barrier(void *arg) counter = 0; err = context_barrier_task(ctx, 0, - NULL, mock_barrier_task, &counter); + NULL, NULL, mock_barrier_task, &counter); if (err) { pr_err("Failed at line %d, err=%d\n", __LINE__, err); goto out; @@ -1664,7 +1669,10 @@ static int mock_context_barrier(void *arg) counter = 0; err = context_barrier_task(ctx, ALL_ENGINES, - NULL, mock_barrier_task, &counter); + skip_unused_engines, + NULL, + mock_barrier_task, + &counter); if (err) { pr_err("Failed at line %d, err=%d\n", __LINE__, err); goto out; @@ -1685,7 +1693,7 @@ static int mock_context_barrier(void *arg) counter = 0; context_barrier_inject_fault = BIT(RCS0); err = context_barrier_task(ctx, ALL_ENGINES, - NULL, mock_barrier_task, &counter); + NULL, NULL, mock_barrier_task, &counter); context_barrier_inject_fault = 0; if (err == -ENXIO) err = 0; @@ -1700,7 +1708,10 @@ static int mock_context_barrier(void *arg) counter = 0; err = context_barrier_task(ctx, ALL_ENGINES, - NULL, mock_barrier_task, &counter); + skip_unused_engines, + NULL, + mock_barrier_task, + &counter); if (err) { pr_err("Failed at line %d, err=%d\n", __LINE__, err); goto out; -- cgit v1.2.3 From ac543d7145bf3ad13f67a087196e6879e6993aac Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 31 May 2019 12:32:45 +0100 Subject: drm/i915: Report an earlier wedged event when suspending the engines On i915_gem_load_power_context() we do care whether or not we succeed in completing the switch back to the kernel context (via idling the engines). Currently, we detect if an error occurs while we wait, but we do not report one if it occurred beforehand (and the status of the switch is undefined). Check the current terminally wedged status on entering the wait, and report it after flushing the requests, as if it had occurred during our own wait. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110824 Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190531113245.30042-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 89bb6d822f6e..f40f13c0b8b7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -90,7 +90,7 @@ static int pm_notifier(struct notifier_block *nb, static bool switch_to_kernel_context_sync(struct drm_i915_private *i915) { - bool result = true; + bool result = !i915_terminally_wedged(i915); do { if (i915_gem_wait_for_idle(i915, -- cgit v1.2.3 From bc7b488b1d1c71dc4c5182206911127bc6c410d6 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Wed, 5 Jun 2019 16:55:35 -0700 Subject: drm/i915/dmc: protect against reading random memory While loading the DMC firmware we were double checking the headers made sense, but in no place we checked that we were actually reading memory we were supposed to. This could be wrong in case the firmware file is truncated or malformed. Before this patch: # ls -l /lib/firmware/i915/icl_dmc_ver1_07.bin -rw-r--r-- 1 root root 25716 Feb 1 12:26 icl_dmc_ver1_07.bin # truncate -s 25700 /lib/firmware/i915/icl_dmc_ver1_07.bin # modprobe i915 # dmesg| grep -i dmc [drm:intel_csr_ucode_init [i915]] Loading i915/icl_dmc_ver1_07.bin [drm] Finished loading DMC firmware i915/icl_dmc_ver1_07.bin (v1.7) i.e. it loads random data. Now it fails like below: [drm:intel_csr_ucode_init [i915]] Loading i915/icl_dmc_ver1_07.bin [drm:csr_load_work_fn [i915]] *ERROR* Truncated DMC firmware, rejecting. i915 0000:00:02.0: Failed to load DMC firmware i915/icl_dmc_ver1_07.bin. Disabling runtime power management. i915 0000:00:02.0: DMC firmware homepage: https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915 Before reading any part of the firmware file, validate the input first. Fixes: eb805623d8b1 ("drm/i915/skl: Add support to load SKL CSR firmware.") Cc: stable@vger.kernel.org Signed-off-by: Lucas De Marchi Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190605235535.17791-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/intel_csr.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 4527b9662330..bf0eebd385b9 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -303,10 +303,17 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv, u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; u32 i; u32 *dmc_payload; + size_t fsize; if (!fw) return NULL; + fsize = sizeof(struct intel_css_header) + + sizeof(struct intel_package_header) + + sizeof(struct intel_dmc_header); + if (fsize > fw->size) + goto error_truncated; + /* Extract CSS Header information*/ css_header = (struct intel_css_header *)fw->data; if (sizeof(struct intel_css_header) != @@ -366,6 +373,9 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv, /* Convert dmc_offset into number of bytes. By default it is in dwords*/ dmc_offset *= 4; readcount += dmc_offset; + fsize += dmc_offset; + if (fsize > fw->size) + goto error_truncated; /* Extract dmc_header information. */ dmc_header = (struct intel_dmc_header *)&fw->data[readcount]; @@ -397,6 +407,10 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv, /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ nbytes = dmc_header->fw_size * 4; + fsize += nbytes; + if (fsize > fw->size) + goto error_truncated; + if (nbytes > csr->max_fw_size) { DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes); return NULL; @@ -410,6 +424,10 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv, } return memcpy(dmc_payload, &fw->data[readcount], nbytes); + +error_truncated: + DRM_ERROR("Truncated DMC firmware, rejecting.\n"); + return NULL; } static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv) -- cgit v1.2.3 From b61ea001b2df056ca2088ee8c3bf5c9fd01593c4 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 7 Jun 2019 09:25:53 +0100 Subject: drm/i915: Reset only affected engines when handling error capture Pass down the engine mask to i915_clear_error_registers so only affected engines can be reset on the Gen6/7 path. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190607082557.31670-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_reset.c | 7 ++++--- drivers/gpu/drm/i915/gt/intel_reset.h | 3 ++- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 377bc546a68f..7bfb76eb0291 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1160,7 +1160,8 @@ static void clear_register(struct intel_uncore *uncore, i915_reg_t reg) intel_uncore_rmw(uncore, reg, 0, 0); } -void i915_clear_error_registers(struct drm_i915_private *i915) +void i915_clear_error_registers(struct drm_i915_private *i915, + intel_engine_mask_t engine_mask) { struct intel_uncore *uncore = &i915->uncore; u32 eir; @@ -1193,7 +1194,7 @@ void i915_clear_error_registers(struct drm_i915_private *i915) struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) { + for_each_engine_masked(engine, i915, engine_mask, id) { rmw_clear(uncore, RING_FAULT_REG(engine), RING_FAULT_VALID); intel_uncore_posting_read(uncore, @@ -1250,7 +1251,7 @@ void i915_handle_error(struct drm_i915_private *i915, if (flags & I915_ERROR_CAPTURE) { i915_capture_error_state(i915, engine_mask, msg); - i915_clear_error_registers(i915); + i915_clear_error_registers(i915, engine_mask); } /* diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h index b52efaab4941..4f3c1acac1a3 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.h +++ b/drivers/gpu/drm/i915/gt/intel_reset.h @@ -25,7 +25,8 @@ void i915_handle_error(struct drm_i915_private *i915, const char *fmt, ...); #define I915_ERROR_CAPTURE BIT(0) -void i915_clear_error_registers(struct drm_i915_private *i915); +void i915_clear_error_registers(struct drm_i915_private *i915, + intel_engine_mask_t engine_mask); void i915_reset(struct drm_i915_private *i915, intel_engine_mask_t stalled_mask, diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 6fcf702d7ec1..c5a94396024f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2359,7 +2359,7 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) else return; - i915_clear_error_registers(dev_priv); + i915_clear_error_registers(dev_priv, ALL_ENGINES); } void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) -- cgit v1.2.3 From 7c53c48d01a3401724ae1d58c6e707562f5437b1 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 7 Jun 2019 09:25:54 +0100 Subject: drm/i915: Tidy engine mask types in hangcheck We can use intel_engine_mask_t to align with the rest of the codebase. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190607082557.31670-2-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_hangcheck.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c index 3a4d09b80fa0..174bb0a60309 100644 --- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c @@ -223,8 +223,8 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine, } static void hangcheck_declare_hang(struct drm_i915_private *i915, - unsigned int hung, - unsigned int stuck) + intel_engine_mask_t hung, + intel_engine_mask_t stuck) { struct intel_engine_cs *engine; intel_engine_mask_t tmp; @@ -259,9 +259,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work) struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), gpu_error.hangcheck_work.work); + intel_engine_mask_t hung = 0, stuck = 0, wedged = 0; struct intel_engine_cs *engine; enum intel_engine_id id; - unsigned int hung = 0, stuck = 0, wedged = 0; intel_wakeref_t wakeref; if (!i915_modparams.enable_hangcheck) -- cgit v1.2.3 From 77a302e04312118067ae790b6fc7e51eff403d86 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 7 Jun 2019 11:15:35 +0100 Subject: drm/i915: Make Gen6/7 RING_FAULT_REG access engine centric Similar to earlier conversions, eliminate the implicit dev_priv by introducing some helpers which take the engine parameter (since the register itself is per engine). v2: * Always use parentheses in macro arguments. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190607101535.767-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_engine.h | 18 ++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_reset.c | 6 ++---- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +- drivers/gpu/drm/i915/i915_gpu_error.c | 2 +- 4 files changed, 22 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 1c0db151f0b1..201bbd2a4faf 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -68,6 +68,24 @@ struct drm_printer; #define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__) #define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__) +#define GEN6_RING_FAULT_REG_READ(engine__) \ + intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__)) + +#define GEN6_RING_FAULT_REG_POSTING_READ(engine__) \ + intel_uncore_posting_read((engine__)->uncore, RING_FAULT_REG(engine__)) + +#define GEN6_RING_FAULT_REG_RMW(engine__, clear__, set__) \ +({ \ + u32 __val; \ +\ + __val = intel_uncore_read((engine__)->uncore, \ + RING_FAULT_REG(engine__)); \ + __val &= ~(clear__); \ + __val |= (set__); \ + intel_uncore_write((engine__)->uncore, RING_FAULT_REG(engine__), \ + __val); \ +}) + /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. */ diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 7bfb76eb0291..de53927c583f 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1195,10 +1195,8 @@ void i915_clear_error_registers(struct drm_i915_private *i915, enum intel_engine_id id; for_each_engine_masked(engine, i915, engine_mask, id) { - rmw_clear(uncore, - RING_FAULT_REG(engine), RING_FAULT_VALID); - intel_uncore_posting_read(uncore, - RING_FAULT_REG(engine)); + GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0); + GEN6_RING_FAULT_REG_POSTING_READ(engine); } } } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index c5a94396024f..c82d8e3ac9df 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2306,7 +2306,7 @@ static void gen6_check_faults(struct drm_i915_private *dev_priv) u32 fault; for_each_engine(engine, dev_priv, id) { - fault = I915_READ(RING_FAULT_REG(engine)); + fault = GEN6_RING_FAULT_REG_READ(engine); if (fault & RING_FAULT_VALID) { DRM_DEBUG_DRIVER("Unexpected fault\n" "\tAddr: 0x%08lx\n" diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 707811256501..2f85de034d8f 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1149,7 +1149,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error, if (INTEL_GEN(dev_priv) >= 8) ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG); else - ee->fault_reg = I915_READ(RING_FAULT_REG(engine)); + ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine); } if (INTEL_GEN(dev_priv) >= 4) { -- cgit v1.2.3 From f736ae1b10319b087bc10c59b3a2eecec56db533 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 7 Jun 2019 09:25:56 +0100 Subject: drm/i915: Extract engine fault reset to a helper Just tidying the flow a bit. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190607082557.31670-4-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_reset.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index de53927c583f..a6ecfdc735c4 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1160,6 +1160,12 @@ static void clear_register(struct intel_uncore *uncore, i915_reg_t reg) intel_uncore_rmw(uncore, reg, 0, 0); } +static void gen8_clear_engine_error_register(struct intel_engine_cs *engine) +{ + GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0); + GEN6_RING_FAULT_REG_POSTING_READ(engine); +} + void i915_clear_error_registers(struct drm_i915_private *i915, intel_engine_mask_t engine_mask) { @@ -1194,10 +1200,8 @@ void i915_clear_error_registers(struct drm_i915_private *i915, struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine_masked(engine, i915, engine_mask, id) { - GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0); - GEN6_RING_FAULT_REG_POSTING_READ(engine); - } + for_each_engine_masked(engine, i915, engine_mask, id) + gen8_clear_engine_error_register(engine); } } -- cgit v1.2.3 From bcc726bea2cd9a3f222736a038e6b1a952bcb426 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 7 Jun 2019 09:25:57 +0100 Subject: drm/i915: Unexport i915_gem_init/fini_aliasing_ppgtt These two are only used from within i915_gem_gtt.c and can trivially be made static. Signed-off-by: Tvrtko Ursulin Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190607082557.31670-5-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 8 ++++---- drivers/gpu/drm/i915/i915_gem_gtt.h | 3 --- 2 files changed, 4 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index c82d8e3ac9df..550cf4b63586 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2814,7 +2814,7 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node, *end -= I915_GTT_PAGE_SIZE; } -int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915) +static int init_aliasing_ppgtt(struct drm_i915_private *i915) { struct i915_ggtt *ggtt = &i915->ggtt; struct i915_hw_ppgtt *ppgtt; @@ -2854,7 +2854,7 @@ err_ppgtt: return err; } -void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915) +static void fini_aliasing_ppgtt(struct drm_i915_private *i915) { struct i915_ggtt *ggtt = &i915->ggtt; struct i915_hw_ppgtt *ppgtt; @@ -2924,7 +2924,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) { - ret = i915_gem_init_aliasing_ppgtt(dev_priv); + ret = init_aliasing_ppgtt(dev_priv); if (ret) goto err_appgtt; } @@ -2951,7 +2951,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) ggtt->vm.closed = true; mutex_lock(&dev_priv->drm.struct_mutex); - i915_gem_fini_aliasing_ppgtt(dev_priv); + fini_aliasing_ppgtt(dev_priv); list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) WARN_ON(i915_vma_unbind(vma)); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 152a03560c22..12856f9dd1d1 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -617,9 +617,6 @@ const struct intel_ppat_entry * intel_ppat_get(struct drm_i915_private *i915, u8 value); void intel_ppat_put(const struct intel_ppat_entry *entry); -int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915); -void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915); - int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv); int i915_ggtt_init_hw(struct drm_i915_private *dev_priv); int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv); -- cgit v1.2.3 From dbc651836372a7b3aaebe2f924db872c5d0804df Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 7 Jun 2019 09:45:20 +0100 Subject: drm/i915: Convert some more bits to use engine mmio accessors Remove a couple dev_priv locals as a consequence. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190607084521.16845-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_lrc.c | 27 +++++++++++++-------------- drivers/gpu/drm/i915/i915_gem_gtt.c | 5 +++-- drivers/gpu/drm/i915/i915_gpu_error.c | 2 +- drivers/gpu/drm/i915/i915_reg.h | 2 +- drivers/gpu/drm/i915/intel_guc_submission.c | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index fed704802c57..f27b6c002627 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2021,31 +2021,30 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) static void enable_execlists(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ - if (INTEL_GEN(dev_priv) >= 11) - I915_WRITE(RING_MODE_GEN7(engine), - _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE)); + if (INTEL_GEN(engine->i915) >= 11) + ENGINE_WRITE(engine, + RING_MODE_GEN7, + _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE)); else - I915_WRITE(RING_MODE_GEN7(engine), - _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); + ENGINE_WRITE(engine, + RING_MODE_GEN7, + _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); - I915_WRITE(RING_MI_MODE(engine->mmio_base), - _MASKED_BIT_DISABLE(STOP_RING)); + ENGINE_WRITE(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); - I915_WRITE(RING_HWS_PGA(engine->mmio_base), - i915_ggtt_offset(engine->status_page.vma)); - POSTING_READ(RING_HWS_PGA(engine->mmio_base)); + ENGINE_WRITE(engine, + RING_HWS_PGA, + i915_ggtt_offset(engine->status_page.vma)); + ENGINE_POSTING_READ(engine, RING_HWS_PGA); } static bool unexpected_starting_state(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; bool unexpected = false; - if (I915_READ(RING_MI_MODE(engine->mmio_base)) & STOP_RING) { + if (ENGINE_READ(engine, RING_MI_MODE) & STOP_RING) { DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n"); unexpected = true; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 550cf4b63586..87be9c1b6021 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1713,8 +1713,9 @@ static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) for_each_engine(engine, dev_priv, id) { /* GFX_MODE is per-ring on gen7+ */ - I915_WRITE(RING_MODE_GEN7(engine), - _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + ENGINE_WRITE(engine, + RING_MODE_GEN7, + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); } } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 2f85de034d8f..193a93857d99 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1219,7 +1219,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error, if (HAS_PPGTT(dev_priv)) { int i; - ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); + ee->vm_info.gfx_mode = ENGINE_READ(engine, RING_MODE_GEN7); if (IS_GEN(dev_priv, 6)) { ee->vm_info.pp_dir_base = diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index b7c13d5deb15..8778f56a034c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2698,7 +2698,7 @@ enum i915_power_well_id { #define GFX_MODE _MMIO(0x2520) #define GFX_MODE_GEN7 _MMIO(0x229c) -#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base + 0x29c) +#define RING_MODE_GEN7(base) _MMIO((base) + 0x29c) #define GFX_RUN_LIST_ENABLE (1 << 15) #define GFX_INTERRUPT_STEERING (1 << 14) #define GFX_TLB_INVALIDATE_EXPLICIT (1 << 13) diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index a4f98ccef0fe..89592ef778b8 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -1306,7 +1306,7 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv) */ irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); for_each_engine(engine, dev_priv, id) - I915_WRITE(RING_MODE_GEN7(engine), irqs); + ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); /* route USER_INTERRUPT to Host, all others are sent to GuC. */ irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | @@ -1353,7 +1353,7 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv) irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); for_each_engine(engine, dev_priv, id) - I915_WRITE(RING_MODE_GEN7(engine), irqs); + ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); /* route all GT interrupts to the host */ I915_WRITE(GUC_BCS_RCS_IER, 0); -- cgit v1.2.3 From f6e903db89bce04426e9ec6bc9c3d5e0cce382b9 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 7 Jun 2019 09:45:21 +0100 Subject: drm/i915: Tidy intel_execlists_submission_init Get to uncore from the engine for better logic organization and use already available i915 everywhere. Signed-off-by: Tvrtko Ursulin Suggested-by: Rodrigo Vivi Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190607084521.16845-2-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_lrc.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index f27b6c002627..497ac036c4a9 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2738,8 +2738,9 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) int intel_execlists_submission_init(struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = engine->i915; struct intel_engine_execlists * const execlists = &engine->execlists; + struct drm_i915_private *i915 = engine->i915; + struct intel_uncore *uncore = engine->uncore; u32 base = engine->mmio_base; int ret; @@ -2759,12 +2760,12 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine) DRM_ERROR("WA batch buffer initialization failed\n"); if (HAS_LOGICAL_RING_ELSQ(i915)) { - execlists->submit_reg = i915->uncore.regs + + execlists->submit_reg = uncore->regs + i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base)); - execlists->ctrl_reg = i915->uncore.regs + + execlists->ctrl_reg = uncore->regs + i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base)); } else { - execlists->submit_reg = i915->uncore.regs + + execlists->submit_reg = uncore->regs + i915_mmio_reg_offset(RING_ELSP(base)); } @@ -2779,7 +2780,7 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine) execlists->csb_write = &engine->status_page.addr[intel_hws_csb_write_index(i915)]; - if (INTEL_GEN(engine->i915) < 11) + if (INTEL_GEN(i915) < 11) execlists->csb_size = GEN8_CSB_ENTRIES; else execlists->csb_size = GEN11_CSB_ENTRIES; -- cgit v1.2.3 From 97068c1b905d6f3d901b0afc8360e98a5dbea2fb Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 4 Jun 2019 17:02:13 +0300 Subject: drm/i915: Move intel_dp->prepare_link_train assignment into ddi code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's a bit silly to go through intel_dp.c to assign the prepare_link_train vfunc for ddi platforms when we can just assign it directly from intel_ddi.c. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190604140214.9947-1-ville.syrjala@linux.intel.com Reviewed-by: Manasi Navare --- drivers/gpu/drm/i915/intel_ddi.c | 5 ++++- drivers/gpu/drm/i915/intel_ddi.h | 1 - drivers/gpu/drm/i915/intel_dp.c | 4 ---- 3 files changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 350eaf54f01f..609ccd240efb 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -3650,7 +3650,7 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder, intel_ddi_main_link_aux_domain(dig_port)); } -void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) +static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = @@ -3958,6 +3958,9 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port) return NULL; intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); + intel_dig_port->dp.prepare_link_retrain = + intel_ddi_prepare_link_retrain; + if (!intel_dp_init_connector(intel_dig_port, connector)) { kfree(connector); return NULL; diff --git a/drivers/gpu/drm/i915/intel_ddi.h b/drivers/gpu/drm/i915/intel_ddi.h index 9cf69175942e..a08365da2643 100644 --- a/drivers/gpu/drm/i915/intel_ddi.h +++ b/drivers/gpu/drm/i915/intel_ddi.h @@ -31,7 +31,6 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state); void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state); -void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index b099a9dc28fd..98410fd2af85 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -7349,10 +7349,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp->pps_pipe = INVALID_PIPE; intel_dp->active_pipe = INVALID_PIPE; - /* intel_dp vfuncs */ - if (HAS_DDI(dev_priv)) - intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain; - /* Preserve the current hw state. */ intel_dp->DP = I915_READ(intel_dp->output_reg); intel_dp->attached_connector = intel_connector; -- cgit v1.2.3 From 0502a1af95a1a5611493052da1a56403b3a5f6fe Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 4 Jun 2019 17:02:14 +0300 Subject: drm/i915: Drop pointless WARN_ON MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit intel_dp_link_down() is static and it's only called from the pre-ddi DP functions, so having a WARN_ON(HAS_DDI) in there is quite pointless. Remove it. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190604140214.9947-2-ville.syrjala@linux.intel.com Reviewed-by: Manasi Navare --- drivers/gpu/drm/i915/intel_dp.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 98410fd2af85..3aef12041672 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -3995,9 +3995,6 @@ intel_dp_link_down(struct intel_encoder *encoder, enum port port = encoder->port; u32 DP = intel_dp->DP; - if (WARN_ON(HAS_DDI(dev_priv))) - return; - if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) return; -- cgit v1.2.3 From 38f300410f3e15b6fec76c8d8baed7111b5ea4e4 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 3 Jun 2019 17:25:00 +0300 Subject: drm/i915: Fix per-pixel alpha with CCS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We forgot to set .has_alpha=true for the A+CCS formats when the code started to consult .has_alpha. This manifests as A+CCS being treated as X+CCS which means no per-pixel alpha blending. Fix the format list appropriately. Cc: stable@vger.kernel.org Cc: Maarten Lankhorst Cc: Matt Roper Cc: Heinrich Fink Reported-by: Heinrich Fink Tested-by: Heinrich Fink Fixes: b20815255693 ("drm/i915: Add plane alpha blending support, v2.") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190603142500.25680-1-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1560030563b6..62fa573f90e8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2463,10 +2463,14 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) * main surface. */ static const struct drm_format_info ccs_formats[] = { - { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, - { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, - { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, - { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, }; static const struct drm_format_info * -- cgit v1.2.3 From cc54d5e8d816d374360b5e8911edacd955040c63 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 9 Apr 2019 17:40:48 +0300 Subject: drm/i915/sdvo: Fix AVI infoframe TX rate readout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The AVI infoframe readout code currently issues a SDVO_CMD_GET_HBUF_TXRATE before SDVO_CMD_SET_HBUF_INDEX, which is not the correct order for these two operations. So far this wasn't a problem since we left the index pointing at the AVI infoframe buffer at the end of the modeset. However once we start to write to other buffers (namely ELD) that is no longer going to be true. Fix up the order so that we always read out the TX rate for the correct buffer. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190409144054.24561-2-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/intel_sdvo.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 6c98b35790db..4cf48314889f 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1005,6 +1005,11 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo, if (av_split < if_index) return 0; + if (!intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_HBUF_INDEX, + set_buf_index, 2)) + return -ENXIO; + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_TXRATE, &tx_rate, 1)) @@ -1013,11 +1018,6 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo, if (tx_rate == SDVO_HBUF_TX_DISABLED) return 0; - if (!intel_sdvo_set_value(intel_sdvo, - SDVO_CMD_SET_HBUF_INDEX, - set_buf_index, 2)) - return -ENXIO; - if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO, &hbuf_size, 1)) return -ENXIO; -- cgit v1.2.3 From dc49a56bd43bb04982e64b44436831da801d0237 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 9 Apr 2019 17:40:49 +0300 Subject: drm/i915/sdvo: Implement proper HDMI audio support for SDVO MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Our SDVO audio support is pretty bogus. We can't push audio over the SDVO bus, so trying to enable audio in the SDVO control register doesn't do anything. In fact it looks like the SDVO encoder will always mix in the audio coming over HDA, and there's no (at least documented) way to disable that from our side. So HDMI audio does work currently on gen4 but only by luck really. On gen3 it got broken by the referenced commit. And what has always been missing on every platform is the ELD. To pass the ELD to the audio driver we need to write it to magic buffer in the SDVO encoder hardware which then gets pulled out via HDA in the other end. Ie. pretty much the same thing we had for native HDMI before we started to just pass the ELD between the drivers. This sort of explains why we even have that silly hardware buffer with native HDMI. $ cat /proc/asound/card0/eld#1.0 -monitor_present 0 -eld_valid 0 +monitor_present 1 +eld_valid 1 +monitor_name LG TV +connection_type HDMI +... This also fixes our state readout since we can now query the SDVO encoder about the state of the "ELD valid" and "presence detect" bits. As mentioned those don't actually control whether audio gets sent over the HDMI cable, but it's the best we can do. And with the state checker appeased we can re-enable HDMI audio for gen3. Cc: stable@vger.kernel.org Cc: Daniel Vetter Cc: zardam@gmail.com Tested-by: zardam@gmail.com Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=108976 Fixes: de44e256b92c ("drm/i915/sdvo: Shut up state checker with hdmi cards on gen3") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190409144054.24561-3-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/intel_sdvo.c | 58 +++++++++++++++++++++++++++------- drivers/gpu/drm/i915/intel_sdvo_regs.h | 3 ++ 2 files changed, 50 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 4cf48314889f..899f8278d29c 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -920,6 +920,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); } +static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo, + u8 audio_state) +{ + return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_AUDIO_STAT, + &audio_state, 1); +} + #if 0 static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) { @@ -1491,11 +1498,6 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, else sdvox |= SDVO_PIPE_SEL(crtc->pipe); - if (crtc_state->has_audio) { - WARN_ON_ONCE(INTEL_GEN(dev_priv) < 4); - sdvox |= SDVO_AUDIO_ENABLE; - } - if (INTEL_GEN(dev_priv) >= 4) { /* done in crtc_mode_set as the dpll_md reg must be written early */ } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || @@ -1639,8 +1641,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, if (sdvox & HDMI_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; - if (sdvox & SDVO_AUDIO_ENABLE) - pipe_config->has_audio = true; + if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT, + &val, 1)) { + u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT; + + if ((val & mask) == mask) + pipe_config->has_audio = true; + } if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &val, 1)) { @@ -1651,6 +1658,32 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config); } +static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo) +{ + intel_sdvo_set_audio_state(intel_sdvo, 0); +} + +static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + struct drm_connector *connector = conn_state->connector; + u8 *eld = connector->eld; + + eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; + + intel_sdvo_set_audio_state(intel_sdvo, 0); + + intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD, + SDVO_HBUF_TX_DISABLED, + eld, drm_eld_size(eld)); + + intel_sdvo_set_audio_state(intel_sdvo, SDVO_AUDIO_ELD_VALID | + SDVO_AUDIO_PRESENCE_DETECT); +} + static void intel_disable_sdvo(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *conn_state) @@ -1660,6 +1693,9 @@ static void intel_disable_sdvo(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); u32 temp; + if (old_crtc_state->has_audio) + intel_sdvo_disable_audio(intel_sdvo); + intel_sdvo_set_active_outputs(intel_sdvo, 0); if (0) intel_sdvo_set_encoder_power_state(intel_sdvo, @@ -1745,6 +1781,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder, intel_sdvo_set_encoder_power_state(intel_sdvo, DRM_MODE_DPMS_ON); intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); + + if (pipe_config->has_audio) + intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state); } static enum drm_mode_status @@ -2607,7 +2646,6 @@ static bool intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { struct drm_encoder *encoder = &intel_sdvo->base.base; - struct drm_i915_private *dev_priv = to_i915(encoder->dev); struct drm_connector *connector; struct intel_encoder *intel_encoder = to_intel_encoder(encoder); struct intel_connector *intel_connector; @@ -2644,9 +2682,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; - /* gen3 doesn't do the hdmi bits in the SDVO register */ - if (INTEL_GEN(dev_priv) >= 4 && - intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { + if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; intel_sdvo_connector->is_hdmi = true; } diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index db0ed499268a..e9ba3b047f93 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -707,6 +707,9 @@ struct intel_sdvo_enhancements_arg { #define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90 #define SDVO_CMD_SET_AUDIO_STAT 0x91 #define SDVO_CMD_GET_AUDIO_STAT 0x92 + #define SDVO_AUDIO_ELD_VALID (1 << 0) + #define SDVO_AUDIO_PRESENCE_DETECT (1 << 1) + #define SDVO_AUDIO_CP_READY (1 << 2) #define SDVO_CMD_SET_HBUF_INDEX 0x93 #define SDVO_HBUF_INDEX_ELD 0 #define SDVO_HBUF_INDEX_AVI_IF 1 -- cgit v1.2.3 From dd6090f8780a213dfcb6ba5718c4e1057d69cb0c Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 9 Apr 2019 17:40:50 +0300 Subject: drm/i915: Rename SDVO_AUDIO_ENABLE to HDMI_AUDIO_ENABLE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The "audio enable" bit on the SDVO/HDMI control register is only meant for HDMI. Audio is never delivered over the SDVO bus. Rename the define to reflect this fact. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190409144054.24561-4-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 2 +- drivers/gpu/drm/i915/intel_hdmi.c | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8778f56a034c..dcf3c4d8d827 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4557,7 +4557,7 @@ enum { #define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */ #define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */ #define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */ -#define SDVO_AUDIO_ENABLE (1 << 6) +#define HDMI_AUDIO_ENABLE (1 << 6) /* HDMI only */ /* VSYNC/HSYNC bits new with 965, default is to be set */ #define SDVO_VSYNC_ACTIVE_HIGH (1 << 4) #define SDVO_HSYNC_ACTIVE_HIGH (1 << 3) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 097bfa504ece..187a2b828b97 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1807,7 +1807,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, if (pipe_config->infoframes.enable) pipe_config->has_infoframe = true; - if (tmp & SDVO_AUDIO_ENABLE) + if (tmp & HDMI_AUDIO_ENABLE) pipe_config->has_audio = true; if (!HAS_PCH_SPLIT(dev_priv) && @@ -1866,7 +1866,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder, temp |= SDVO_ENABLE; if (pipe_config->has_audio) - temp |= SDVO_AUDIO_ENABLE; + temp |= HDMI_AUDIO_ENABLE; I915_WRITE(intel_hdmi->hdmi_reg, temp); POSTING_READ(intel_hdmi->hdmi_reg); @@ -1888,7 +1888,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder, temp |= SDVO_ENABLE; if (pipe_config->has_audio) - temp |= SDVO_AUDIO_ENABLE; + temp |= HDMI_AUDIO_ENABLE; /* * HW workaround, need to write this twice for issue @@ -1940,7 +1940,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder, temp |= SDVO_ENABLE; if (pipe_config->has_audio) - temp |= SDVO_AUDIO_ENABLE; + temp |= HDMI_AUDIO_ENABLE; /* * WaEnableHDMI8bpcBefore12bpc:snb,ivb @@ -2000,7 +2000,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, temp = I915_READ(intel_hdmi->hdmi_reg); - temp &= ~(SDVO_ENABLE | SDVO_AUDIO_ENABLE); + temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE); I915_WRITE(intel_hdmi->hdmi_reg, temp); POSTING_READ(intel_hdmi->hdmi_reg); -- cgit v1.2.3 From b5716a4efe5cbaabcea2027656367ac5ba5addf9 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 10 Apr 2019 20:08:35 +0300 Subject: drm/i915/sdvo: Check that we have space for the infoframe MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before we go writing the infoframe let's make sure we have the space for it. Not that it really matters since the write loop would just terminate early in that case. v2: Check after the debug print and ++ (Chris) Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190410170835.18867-1-ville.syrjala@linux.intel.com Reviewed-by: Radhakrishna Sripada --- drivers/gpu/drm/i915/intel_sdvo.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 899f8278d29c..bc1ef161df32 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -980,6 +980,9 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n", if_index, length, hbuf_size); + if (hbuf_size < length) + return false; + for (i = 0; i < hbuf_size; i += 8) { memset(tmp, 0, 8); if (i < length) -- cgit v1.2.3 From 700bbf83eabd26f918a783f69ed2a85fcb28bf7f Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 9 Apr 2019 17:40:52 +0300 Subject: drm/i915/sdvo: Don't unpack stack garbage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass the length returned by intel_sdvo_read_infoframe() to hdmi_infoframe_unpack() so that we don't try to unpack any leftover stack garbage. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190409144054.24561-6-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/intel_sdvo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index bc1ef161df32..1c8ad680813c 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1132,7 +1132,7 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo, crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI); - ret = hdmi_infoframe_unpack(frame, sdvo_data, sizeof(sdvo_data)); + ret = hdmi_infoframe_unpack(frame, sdvo_data, len); if (ret) { DRM_DEBUG_KMS("Failed to unpack AVI infoframe\n"); return; -- cgit v1.2.3 From 7f668346e70fcf1b7b56a9c31fba5070d210995b Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 9 Apr 2019 17:40:53 +0300 Subject: drm/i915/sdvo: Don't write stack garbage into the hbuf MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass the length returned by hdmi_infoframe_pack_only() to intel_sdvo_write_infoframe() so that we don't end up writing stack garbage into the hbuf. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190409144054.24561-7-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/intel_sdvo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 1c8ad680813c..6d78fd165642 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1106,7 +1106,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, SDVO_HBUF_TX_VSYNC, - sdvo_data, sizeof(sdvo_data)); + sdvo_data, len); } static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo, -- cgit v1.2.3 From bca29283dcc058739c41d2fd9110d8ebc3e0f5e8 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 10 Apr 2019 20:09:41 +0300 Subject: drm/i915/sdvo: Actually print the reason why the SDVO command failed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's much easier to figure out why the SDVO encoder refuses to cooperate if we can see what status we got back. v2: Zero initialize only the first character, not the whole buffer Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson #v1 Link: https://patchwork.freedesktop.org/patch/msgid/20190410170941.28142-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/intel_sdvo.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 6d78fd165642..0860ae36bb87 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -522,6 +522,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, #define BUF_LEN 256 char buffer[BUF_LEN]; + buffer[0] = '\0'; /* * The documentation states that all commands will be @@ -585,7 +586,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, return true; log_fail: - DRM_DEBUG_KMS("%s: R: ... failed\n", SDVO_NAME(intel_sdvo)); + DRM_DEBUG_KMS("%s: R: ... failed %s\n", + SDVO_NAME(intel_sdvo), buffer); return false; } -- cgit v1.2.3 From 0e29eb9d9160685a1c18534f05a28a260fbfadd5 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 5 Jun 2019 20:17:33 +0200 Subject: drm/i915/dsi: Move logging of DSI VBT parameters to a helper function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a preparation patch for moving the calling of *_dphy_param_init() out of intel_dsi_vbt_init. Reviewed-by: Ville Syrjälä Signed-off-by: Hans de Goede Link: https://patchwork.freedesktop.org/patch/msgid/20190605181735.7020-1-hdegoede@redhat.com --- drivers/gpu/drm/i915/intel_dsi_vbt.c | 77 ++++++++++++++++++++---------------- 1 file changed, 42 insertions(+), 35 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c index 7cdde1d04f4b..3f74762aa229 100644 --- a/drivers/gpu/drm/i915/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c @@ -537,6 +537,44 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) msleep(msec); } +static void intel_dsi_log_params(struct intel_dsi *intel_dsi) +{ + DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk); + DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap); + DRM_DEBUG_KMS("Lane count %d\n", intel_dsi->lane_count); + DRM_DEBUG_KMS("DPHY param reg 0x%x\n", intel_dsi->dphy_reg); + DRM_DEBUG_KMS("Video mode format %s\n", + intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ? + "non-burst with sync pulse" : + intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ? + "non-burst with sync events" : + intel_dsi->video_mode_format == VIDEO_MODE_BURST ? + "burst" : ""); + DRM_DEBUG_KMS("Burst mode ratio %d\n", intel_dsi->burst_mode_ratio); + DRM_DEBUG_KMS("Reset timer %d\n", intel_dsi->rst_timer_val); + DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt)); + DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop)); + DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video"); + if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) + DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n"); + else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT) + DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n"); + else + DRM_DEBUG_KMS("Dual link: NONE\n"); + DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format); + DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div); + DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout); + DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val); + DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count); + DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count); + DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk); + DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer); + DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count); + DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count); + DRM_DEBUG_KMS("BTA %s\n", + enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA))); +} + #define ICL_PREPARE_CNT_MAX 0x7 #define ICL_CLK_ZERO_CNT_MAX 0xf #define ICL_TRAIL_CNT_MAX 0x7 @@ -640,6 +678,8 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) HS_TRAIL(trail_cnt) | HS_EXIT_OVERRIDE | HS_EXIT(exit_zero_cnt)); + + intel_dsi_log_params(intel_dsi); } static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) @@ -799,6 +839,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8, 8); intel_dsi->clk_hs_to_lp_count += extra_byte_count; + + intel_dsi_log_params(intel_dsi); } bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) @@ -893,41 +935,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) else vlv_dphy_param_init(intel_dsi); - DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk); - DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap); - DRM_DEBUG_KMS("Lane count %d\n", intel_dsi->lane_count); - DRM_DEBUG_KMS("DPHY param reg 0x%x\n", intel_dsi->dphy_reg); - DRM_DEBUG_KMS("Video mode format %s\n", - intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ? - "non-burst with sync pulse" : - intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ? - "non-burst with sync events" : - intel_dsi->video_mode_format == VIDEO_MODE_BURST ? - "burst" : ""); - DRM_DEBUG_KMS("Burst mode ratio %d\n", intel_dsi->burst_mode_ratio); - DRM_DEBUG_KMS("Reset timer %d\n", intel_dsi->rst_timer_val); - DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt)); - DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop)); - DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video"); - if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) - DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n"); - else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT) - DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n"); - else - DRM_DEBUG_KMS("Dual link: NONE\n"); - DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format); - DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div); - DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout); - DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val); - DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count); - DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count); - DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk); - DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer); - DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count); - DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count); - DRM_DEBUG_KMS("BTA %s\n", - enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA))); - /* delays in VBT are in unit of 100us, so need to convert * here in ms * Delay (100us) * 100 /1000 = Delay / 10 (ms) */ -- cgit v1.2.3 From 2def5ae7d7fb8578b53b0f8595d8ec90b8eb1aeb Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 5 Jun 2019 20:17:34 +0200 Subject: drm/i915/dsi: Move vlv/icl_dphy_param_init call out of intel_dsi_vbt_init (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The vlv/icl_dphy_param_init calls do various calculations to set dphy parameters based on the pclk. Move the calling of vlv/icl_dphy_param_init to vlv_dsi_init to give vlv_dsi_init a chance to tweak the pclk before these calculations are done. Changes in v2: -Also moves the icl and vlv specific dphy_param_init functions from the generic intel_dsi_vbt.c file into the icl_ and vlv_dsi.c specific files. Note icl_dphy_param_init() and vlv_dphy_param_init() are only moved, otherwise they are completely unchanged. Reviewed-by: Ville Syrjälä Signed-off-by: Hans de Goede Link: https://patchwork.freedesktop.org/patch/msgid/20190605181735.7020-2-hdegoede@redhat.com --- drivers/gpu/drm/i915/icl_dsi.c | 108 ++++++++++++++ drivers/gpu/drm/i915/intel_dsi.h | 1 + drivers/gpu/drm/i915/intel_dsi_vbt.c | 282 +---------------------------------- drivers/gpu/drm/i915/vlv_dsi.c | 170 +++++++++++++++++++++ 4 files changed, 280 insertions(+), 281 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c index 1e240ad665b5..74448e6bf749 100644 --- a/drivers/gpu/drm/i915/icl_dsi.c +++ b/drivers/gpu/drm/i915/icl_dsi.c @@ -1380,6 +1380,113 @@ static const struct mipi_dsi_host_ops gen11_dsi_host_ops = { .transfer = gen11_dsi_host_transfer, }; +#define ICL_PREPARE_CNT_MAX 0x7 +#define ICL_CLK_ZERO_CNT_MAX 0xf +#define ICL_TRAIL_CNT_MAX 0x7 +#define ICL_TCLK_PRE_CNT_MAX 0x3 +#define ICL_TCLK_POST_CNT_MAX 0x7 +#define ICL_HS_ZERO_CNT_MAX 0xf +#define ICL_EXIT_ZERO_CNT_MAX 0x7 + +static void icl_dphy_param_init(struct intel_dsi *intel_dsi) +{ + struct drm_device *dev = intel_dsi->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; + u32 tlpx_ns; + u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; + u32 ths_prepare_ns, tclk_trail_ns; + u32 hs_zero_cnt; + u32 tclk_pre_cnt, tclk_post_cnt; + + tlpx_ns = intel_dsi_tlpx_ns(intel_dsi); + + tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail); + ths_prepare_ns = max(mipi_config->ths_prepare, + mipi_config->tclk_prepare); + + /* + * prepare cnt in escape clocks + * this field represents a hexadecimal value with a precision + * of 1.2 – i.e. the most significant bit is the integer + * and the least significant 2 bits are fraction bits. + * so, the field can represent a range of 0.25 to 1.75 + */ + prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns); + if (prepare_cnt > ICL_PREPARE_CNT_MAX) { + DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt); + prepare_cnt = ICL_PREPARE_CNT_MAX; + } + + /* clk zero count in escape clocks */ + clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero - + ths_prepare_ns, tlpx_ns); + if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) { + DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt); + clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX; + } + + /* trail cnt in escape clocks*/ + trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns); + if (trail_cnt > ICL_TRAIL_CNT_MAX) { + DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt); + trail_cnt = ICL_TRAIL_CNT_MAX; + } + + /* tclk pre count in escape clocks */ + tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns); + if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) { + DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt); + tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX; + } + + /* tclk post count in escape clocks */ + tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns); + if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) { + DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt); + tclk_post_cnt = ICL_TCLK_POST_CNT_MAX; + } + + /* hs zero cnt in escape clocks */ + hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero - + ths_prepare_ns, tlpx_ns); + if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) { + DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt); + hs_zero_cnt = ICL_HS_ZERO_CNT_MAX; + } + + /* hs exit zero cnt in escape clocks */ + exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns); + if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) { + DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt); + exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX; + } + + /* clock lane dphy timings */ + intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE | + CLK_PREPARE(prepare_cnt) | + CLK_ZERO_OVERRIDE | + CLK_ZERO(clk_zero_cnt) | + CLK_PRE_OVERRIDE | + CLK_PRE(tclk_pre_cnt) | + CLK_POST_OVERRIDE | + CLK_POST(tclk_post_cnt) | + CLK_TRAIL_OVERRIDE | + CLK_TRAIL(trail_cnt)); + + /* data lanes dphy timings */ + intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE | + HS_PREPARE(prepare_cnt) | + HS_ZERO_OVERRIDE | + HS_ZERO(hs_zero_cnt) | + HS_TRAIL_OVERRIDE | + HS_TRAIL(trail_cnt) | + HS_EXIT_OVERRIDE | + HS_EXIT(exit_zero_cnt)); + + intel_dsi_log_params(intel_dsi); +} + void icl_dsi_init(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; @@ -1472,6 +1579,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) goto err; } + icl_dphy_param_init(intel_dsi); return; err: diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index f9b90061d912..6d20434636cd 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -199,5 +199,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, enum mipi_seq seq_id); void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); +void intel_dsi_log_params(struct intel_dsi *intel_dsi); #endif /* _INTEL_DSI_H */ diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c index 3f74762aa229..e5b178660408 100644 --- a/drivers/gpu/drm/i915/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c @@ -46,13 +46,6 @@ #define MIPI_VIRTUAL_CHANNEL_SHIFT 1 #define MIPI_PORT_SHIFT 3 -#define PREPARE_CNT_MAX 0x3F -#define EXIT_ZERO_CNT_MAX 0x3F -#define CLK_ZERO_CNT_MAX 0xFF -#define TRAIL_CNT_MAX 0x1F - -#define NS_KHZ_RATIO 1000000 - /* base offsets for gpio pads */ #define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130 #define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120 @@ -537,7 +530,7 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) msleep(msec); } -static void intel_dsi_log_params(struct intel_dsi *intel_dsi) +void intel_dsi_log_params(struct intel_dsi *intel_dsi) { DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk); DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap); @@ -575,274 +568,6 @@ static void intel_dsi_log_params(struct intel_dsi *intel_dsi) enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA))); } -#define ICL_PREPARE_CNT_MAX 0x7 -#define ICL_CLK_ZERO_CNT_MAX 0xf -#define ICL_TRAIL_CNT_MAX 0x7 -#define ICL_TCLK_PRE_CNT_MAX 0x3 -#define ICL_TCLK_POST_CNT_MAX 0x7 -#define ICL_HS_ZERO_CNT_MAX 0xf -#define ICL_EXIT_ZERO_CNT_MAX 0x7 - -static void icl_dphy_param_init(struct intel_dsi *intel_dsi) -{ - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; - u32 tlpx_ns; - u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; - u32 ths_prepare_ns, tclk_trail_ns; - u32 hs_zero_cnt; - u32 tclk_pre_cnt, tclk_post_cnt; - - tlpx_ns = intel_dsi_tlpx_ns(intel_dsi); - - tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail); - ths_prepare_ns = max(mipi_config->ths_prepare, - mipi_config->tclk_prepare); - - /* - * prepare cnt in escape clocks - * this field represents a hexadecimal value with a precision - * of 1.2 – i.e. the most significant bit is the integer - * and the least significant 2 bits are fraction bits. - * so, the field can represent a range of 0.25 to 1.75 - */ - prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns); - if (prepare_cnt > ICL_PREPARE_CNT_MAX) { - DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt); - prepare_cnt = ICL_PREPARE_CNT_MAX; - } - - /* clk zero count in escape clocks */ - clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero - - ths_prepare_ns, tlpx_ns); - if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) { - DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt); - clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX; - } - - /* trail cnt in escape clocks*/ - trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns); - if (trail_cnt > ICL_TRAIL_CNT_MAX) { - DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt); - trail_cnt = ICL_TRAIL_CNT_MAX; - } - - /* tclk pre count in escape clocks */ - tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns); - if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) { - DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt); - tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX; - } - - /* tclk post count in escape clocks */ - tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns); - if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) { - DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt); - tclk_post_cnt = ICL_TCLK_POST_CNT_MAX; - } - - /* hs zero cnt in escape clocks */ - hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero - - ths_prepare_ns, tlpx_ns); - if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) { - DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt); - hs_zero_cnt = ICL_HS_ZERO_CNT_MAX; - } - - /* hs exit zero cnt in escape clocks */ - exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns); - if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) { - DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt); - exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX; - } - - /* clock lane dphy timings */ - intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE | - CLK_PREPARE(prepare_cnt) | - CLK_ZERO_OVERRIDE | - CLK_ZERO(clk_zero_cnt) | - CLK_PRE_OVERRIDE | - CLK_PRE(tclk_pre_cnt) | - CLK_POST_OVERRIDE | - CLK_POST(tclk_post_cnt) | - CLK_TRAIL_OVERRIDE | - CLK_TRAIL(trail_cnt)); - - /* data lanes dphy timings */ - intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE | - HS_PREPARE(prepare_cnt) | - HS_ZERO_OVERRIDE | - HS_ZERO(hs_zero_cnt) | - HS_TRAIL_OVERRIDE | - HS_TRAIL(trail_cnt) | - HS_EXIT_OVERRIDE | - HS_EXIT(exit_zero_cnt)); - - intel_dsi_log_params(intel_dsi); -} - -static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) -{ - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; - u32 tlpx_ns, extra_byte_count, tlpx_ui; - u32 ui_num, ui_den; - u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; - u32 ths_prepare_ns, tclk_trail_ns; - u32 tclk_prepare_clkzero, ths_prepare_hszero; - u32 lp_to_hs_switch, hs_to_lp_switch; - u32 mul; - - tlpx_ns = intel_dsi_tlpx_ns(intel_dsi); - - switch (intel_dsi->lane_count) { - case 1: - case 2: - extra_byte_count = 2; - break; - case 3: - extra_byte_count = 4; - break; - case 4: - default: - extra_byte_count = 3; - break; - } - - /* in Kbps */ - ui_num = NS_KHZ_RATIO; - ui_den = intel_dsi_bitrate(intel_dsi); - - tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero; - ths_prepare_hszero = mipi_config->ths_prepare_hszero; - - /* - * B060 - * LP byte clock = TLPX/ (8UI) - */ - intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num); - - /* DDR clock period = 2 * UI - * UI(sec) = 1/(bitrate * 10^3) (bitrate is in KHZ) - * UI(nsec) = 10^6 / bitrate - * DDR clock period (nsec) = 2 * UI = (2 * 10^6)/ bitrate - * DDR clock count = ns_value / DDR clock period - * - * For GEMINILAKE dphy_param_reg will be programmed in terms of - * HS byte clock count for other platform in HS ddr clock count - */ - mul = IS_GEMINILAKE(dev_priv) ? 8 : 2; - ths_prepare_ns = max(mipi_config->ths_prepare, - mipi_config->tclk_prepare); - - /* prepare count */ - prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul); - - if (prepare_cnt > PREPARE_CNT_MAX) { - DRM_DEBUG_KMS("prepare count too high %u\n", prepare_cnt); - prepare_cnt = PREPARE_CNT_MAX; - } - - /* exit zero count */ - exit_zero_cnt = DIV_ROUND_UP( - (ths_prepare_hszero - ths_prepare_ns) * ui_den, - ui_num * mul - ); - - /* - * Exit zero is unified val ths_zero and ths_exit - * minimum value for ths_exit = 110ns - * min (exit_zero_cnt * 2) = 110/UI - * exit_zero_cnt = 55/UI - */ - if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num) - exit_zero_cnt += 1; - - if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) { - DRM_DEBUG_KMS("exit zero count too high %u\n", exit_zero_cnt); - exit_zero_cnt = EXIT_ZERO_CNT_MAX; - } - - /* clk zero count */ - clk_zero_cnt = DIV_ROUND_UP( - (tclk_prepare_clkzero - ths_prepare_ns) - * ui_den, ui_num * mul); - - if (clk_zero_cnt > CLK_ZERO_CNT_MAX) { - DRM_DEBUG_KMS("clock zero count too high %u\n", clk_zero_cnt); - clk_zero_cnt = CLK_ZERO_CNT_MAX; - } - - /* trail count */ - tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail); - trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul); - - if (trail_cnt > TRAIL_CNT_MAX) { - DRM_DEBUG_KMS("trail count too high %u\n", trail_cnt); - trail_cnt = TRAIL_CNT_MAX; - } - - /* B080 */ - intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 | - clk_zero_cnt << 8 | prepare_cnt; - - /* - * LP to HS switch count = 4TLPX + PREP_COUNT * mul + EXIT_ZERO_COUNT * - * mul + 10UI + Extra Byte Count - * - * HS to LP switch count = THS-TRAIL + 2TLPX + Extra Byte Count - * Extra Byte Count is calculated according to number of lanes. - * High Low Switch Count is the Max of LP to HS and - * HS to LP switch count - * - */ - tlpx_ui = DIV_ROUND_UP(tlpx_ns * ui_den, ui_num); - - /* B044 */ - /* FIXME: - * The comment above does not match with the code */ - lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * mul + - exit_zero_cnt * mul + 10, 8); - - hs_to_lp_switch = DIV_ROUND_UP(mipi_config->ths_trail + 2 * tlpx_ui, 8); - - intel_dsi->hs_to_lp_count = max(lp_to_hs_switch, hs_to_lp_switch); - intel_dsi->hs_to_lp_count += extra_byte_count; - - /* B088 */ - /* LP -> HS for clock lanes - * LP clk sync + LP11 + LP01 + tclk_prepare + tclk_zero + - * extra byte count - * 2TPLX + 1TLPX + 1 TPLX(in ns) + prepare_cnt * 2 + clk_zero_cnt * - * 2(in UI) + extra byte count - * In byteclks = (4TLPX + prepare_cnt * 2 + clk_zero_cnt *2 (in UI)) / - * 8 + extra byte count - */ - intel_dsi->clk_lp_to_hs_count = - DIV_ROUND_UP( - 4 * tlpx_ui + prepare_cnt * 2 + - clk_zero_cnt * 2, - 8); - - intel_dsi->clk_lp_to_hs_count += extra_byte_count; - - /* HS->LP for Clock Lanes - * Low Power clock synchronisations + 1Tx byteclk + tclk_trail + - * Extra byte count - * 2TLPX + 8UI + (trail_count*2)(in UI) + Extra byte count - * In byteclks = (2*TLpx(in UI) + trail_count*2 +8)(in UI)/8 + - * Extra byte count - */ - intel_dsi->clk_hs_to_lp_count = - DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8, - 8); - intel_dsi->clk_hs_to_lp_count += extra_byte_count; - - intel_dsi_log_params(intel_dsi); -} - bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) { struct drm_device *dev = intel_dsi->base.base.dev; @@ -930,11 +655,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) intel_dsi->burst_mode_ratio = burst_mode_ratio; - if (INTEL_GEN(dev_priv) >= 11) - icl_dphy_param_init(intel_dsi); - else - vlv_dphy_param_init(intel_dsi); - /* delays in VBT are in unit of 100us, so need to convert * here in ms * Delay (100us) * 100 /1000 = Delay / 10 (ms) */ diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index bfe2891eac37..c94e58d0ce40 100644 --- a/drivers/gpu/drm/i915/vlv_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c @@ -1669,6 +1669,174 @@ static void intel_dsi_add_properties(struct intel_connector *connector) } } +#define NS_KHZ_RATIO 1000000 + +#define PREPARE_CNT_MAX 0x3F +#define EXIT_ZERO_CNT_MAX 0x3F +#define CLK_ZERO_CNT_MAX 0xFF +#define TRAIL_CNT_MAX 0x1F + +static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) +{ + struct drm_device *dev = intel_dsi->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; + u32 tlpx_ns, extra_byte_count, tlpx_ui; + u32 ui_num, ui_den; + u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; + u32 ths_prepare_ns, tclk_trail_ns; + u32 tclk_prepare_clkzero, ths_prepare_hszero; + u32 lp_to_hs_switch, hs_to_lp_switch; + u32 mul; + + tlpx_ns = intel_dsi_tlpx_ns(intel_dsi); + + switch (intel_dsi->lane_count) { + case 1: + case 2: + extra_byte_count = 2; + break; + case 3: + extra_byte_count = 4; + break; + case 4: + default: + extra_byte_count = 3; + break; + } + + /* in Kbps */ + ui_num = NS_KHZ_RATIO; + ui_den = intel_dsi_bitrate(intel_dsi); + + tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero; + ths_prepare_hszero = mipi_config->ths_prepare_hszero; + + /* + * B060 + * LP byte clock = TLPX/ (8UI) + */ + intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num); + + /* DDR clock period = 2 * UI + * UI(sec) = 1/(bitrate * 10^3) (bitrate is in KHZ) + * UI(nsec) = 10^6 / bitrate + * DDR clock period (nsec) = 2 * UI = (2 * 10^6)/ bitrate + * DDR clock count = ns_value / DDR clock period + * + * For GEMINILAKE dphy_param_reg will be programmed in terms of + * HS byte clock count for other platform in HS ddr clock count + */ + mul = IS_GEMINILAKE(dev_priv) ? 8 : 2; + ths_prepare_ns = max(mipi_config->ths_prepare, + mipi_config->tclk_prepare); + + /* prepare count */ + prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul); + + if (prepare_cnt > PREPARE_CNT_MAX) { + DRM_DEBUG_KMS("prepare count too high %u\n", prepare_cnt); + prepare_cnt = PREPARE_CNT_MAX; + } + + /* exit zero count */ + exit_zero_cnt = DIV_ROUND_UP( + (ths_prepare_hszero - ths_prepare_ns) * ui_den, + ui_num * mul + ); + + /* + * Exit zero is unified val ths_zero and ths_exit + * minimum value for ths_exit = 110ns + * min (exit_zero_cnt * 2) = 110/UI + * exit_zero_cnt = 55/UI + */ + if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num) + exit_zero_cnt += 1; + + if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) { + DRM_DEBUG_KMS("exit zero count too high %u\n", exit_zero_cnt); + exit_zero_cnt = EXIT_ZERO_CNT_MAX; + } + + /* clk zero count */ + clk_zero_cnt = DIV_ROUND_UP( + (tclk_prepare_clkzero - ths_prepare_ns) + * ui_den, ui_num * mul); + + if (clk_zero_cnt > CLK_ZERO_CNT_MAX) { + DRM_DEBUG_KMS("clock zero count too high %u\n", clk_zero_cnt); + clk_zero_cnt = CLK_ZERO_CNT_MAX; + } + + /* trail count */ + tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail); + trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul); + + if (trail_cnt > TRAIL_CNT_MAX) { + DRM_DEBUG_KMS("trail count too high %u\n", trail_cnt); + trail_cnt = TRAIL_CNT_MAX; + } + + /* B080 */ + intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 | + clk_zero_cnt << 8 | prepare_cnt; + + /* + * LP to HS switch count = 4TLPX + PREP_COUNT * mul + EXIT_ZERO_COUNT * + * mul + 10UI + Extra Byte Count + * + * HS to LP switch count = THS-TRAIL + 2TLPX + Extra Byte Count + * Extra Byte Count is calculated according to number of lanes. + * High Low Switch Count is the Max of LP to HS and + * HS to LP switch count + * + */ + tlpx_ui = DIV_ROUND_UP(tlpx_ns * ui_den, ui_num); + + /* B044 */ + /* FIXME: + * The comment above does not match with the code */ + lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * mul + + exit_zero_cnt * mul + 10, 8); + + hs_to_lp_switch = DIV_ROUND_UP(mipi_config->ths_trail + 2 * tlpx_ui, 8); + + intel_dsi->hs_to_lp_count = max(lp_to_hs_switch, hs_to_lp_switch); + intel_dsi->hs_to_lp_count += extra_byte_count; + + /* B088 */ + /* LP -> HS for clock lanes + * LP clk sync + LP11 + LP01 + tclk_prepare + tclk_zero + + * extra byte count + * 2TPLX + 1TLPX + 1 TPLX(in ns) + prepare_cnt * 2 + clk_zero_cnt * + * 2(in UI) + extra byte count + * In byteclks = (4TLPX + prepare_cnt * 2 + clk_zero_cnt *2 (in UI)) / + * 8 + extra byte count + */ + intel_dsi->clk_lp_to_hs_count = + DIV_ROUND_UP( + 4 * tlpx_ui + prepare_cnt * 2 + + clk_zero_cnt * 2, + 8); + + intel_dsi->clk_lp_to_hs_count += extra_byte_count; + + /* HS->LP for Clock Lanes + * Low Power clock synchronisations + 1Tx byteclk + tclk_trail + + * Extra byte count + * 2TLPX + 8UI + (trail_count*2)(in UI) + Extra byte count + * In byteclks = (2*TLpx(in UI) + trail_count*2 +8)(in UI)/8 + + * Extra byte count + */ + intel_dsi->clk_hs_to_lp_count = + DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8, + 8); + intel_dsi->clk_hs_to_lp_count += extra_byte_count; + + intel_dsi_log_params(intel_dsi); +} + void vlv_dsi_init(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; @@ -1758,6 +1926,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) goto err; } + vlv_dphy_param_init(intel_dsi); + /* * In case of BYT with CRC PMIC, we need to use GPIO for * Panel control. -- cgit v1.2.3 From 6be306bee7dd80905329769292e21b5e2d166b8f Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 5 Jun 2019 20:17:35 +0200 Subject: drm/i915/dsi: Read back pclk set by GOP and use that as pclk (v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The GOP sometimes initializes the pclk at a (slightly) different frequency then the pclk which we've calculated. This commit makes the DSI code read-back the pclk set by the GOP and if that is within a reasonable margin of the calculated pclk, uses that instead. This fixes the first modeset being a full modeset instead of a fast modeset on systems where the GOP pclk is different. Changes in v2: -Use intel_encoder_current_mode() to get the pclk setup by the GOP Changes in v3: -Back to the readback approach, skipping the dsi_pll.ctrl / .dev checks in intel_pipe_config_compare() when adjust is set leads to: [drm:pipe_config_err [i915]] *ERROR* mismatch in dsi_pll.ctrl (...) [drm:pipe_config_err [i915]] *ERROR* mismatch in dsi_pll.div (...) -Do the readback and pclk overriding from vlv_dsi_init(), rather then from intel_dsi_vbt_init() as the vbt code should not be touching the hw Reviewed-by: Ville Syrjälä Signed-off-by: Hans de Goede Link: https://patchwork.freedesktop.org/patch/msgid/20190605181735.7020-3-hdegoede@redhat.com --- drivers/gpu/drm/i915/vlv_dsi.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index c94e58d0ce40..e272d826210a 100644 --- a/drivers/gpu/drm/i915/vlv_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c @@ -1845,7 +1845,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) struct drm_encoder *encoder; struct intel_connector *intel_connector; struct drm_connector *connector; - struct drm_display_mode *fixed_mode; + struct drm_display_mode *current_mode, *fixed_mode; enum port port; DRM_DEBUG_KMS("\n"); @@ -1889,6 +1889,9 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_connector->get_hw_state = intel_connector_get_hw_state; intel_encoder->port = port; + intel_encoder->type = INTEL_OUTPUT_DSI; + intel_encoder->power_domain = POWER_DOMAIN_PORT_DSI; + intel_encoder->cloneable = 0; /* * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI @@ -1926,6 +1929,20 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) goto err; } + /* Use clock read-back from current hw-state for fastboot */ + current_mode = intel_encoder_current_mode(intel_encoder); + if (current_mode) { + DRM_DEBUG_KMS("Calculated pclk %d GOP %d\n", + intel_dsi->pclk, current_mode->clock); + if (intel_fuzzy_clock_check(intel_dsi->pclk, + current_mode->clock)) { + DRM_DEBUG_KMS("Using GOP pclk\n"); + intel_dsi->pclk = current_mode->clock; + } + + kfree(current_mode); + } + vlv_dphy_param_init(intel_dsi); /* @@ -1943,9 +1960,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) } } - intel_encoder->type = INTEL_OUTPUT_DSI; - intel_encoder->power_domain = POWER_DOMAIN_PORT_DSI; - intel_encoder->cloneable = 0; drm_connector_init(dev, connector, &intel_dsi_connector_funcs, DRM_MODE_CONNECTOR_DSI); -- cgit v1.2.3 From 33ec6c9eb35e6bcf60b5a134d8e8e1ed836a0bc1 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Thu, 6 Jun 2019 15:42:24 -0700 Subject: drm/i915/guc: always use Command Transport Buffers Now that we've moved the Gen9 GuC blobs to version 32 we have CTB support on all gens, so no need to restrict the usage to Gen11+. Note that MMIO communication is still required for CTB initialization. v2: fix commit message nits (Michal) Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Reviewed-by: Michal Wajdeczko Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190606224225.14287-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_pci.c | 1 - drivers/gpu/drm/i915/intel_device_info.h | 1 - drivers/gpu/drm/i915/intel_guc.c | 45 +++++--------------------------- drivers/gpu/drm/i915/intel_guc.h | 1 - drivers/gpu/drm/i915/intel_guc_ct.c | 14 ---------- drivers/gpu/drm/i915/intel_uc.c | 19 +++----------- 7 files changed, 10 insertions(+), 72 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index dfe4b11ee423..82e55c65289a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2393,7 +2393,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, * properties, so we have separate macros to test them. */ #define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc) -#define HAS_GUC_CT(dev_priv) (INTEL_INFO(dev_priv)->has_guc_ct) #define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) #define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index e761ea86b481..482f1d0f1770 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -746,7 +746,6 @@ static const struct intel_device_info intel_cannonlake_info = { }, \ GEN(11), \ .ddb_size = 2048, \ - .has_guc_ct = 1, \ .has_logical_ring_elsq = 1, \ .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024 } diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index d67dedf0cbd8..1fb8b50df7df 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -112,7 +112,6 @@ enum intel_ppgtt_type { func(has_reset_engine); \ func(has_fpga_dbg); \ func(has_guc); \ - func(has_guc_ct); \ func(has_l3_dpf); \ func(has_llc); \ func(has_logical_ring_contexts); \ diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index b88c349c4fa6..43232242d167 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -56,7 +56,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc) enum forcewake_domains fw_domains = 0; unsigned int i; - if (HAS_GUC_CT(dev_priv) && INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(dev_priv) >= 11) { guc->send_regs.base = i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0)); guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT; @@ -232,11 +232,9 @@ int intel_guc_init(struct intel_guc *guc) goto err_log; GEM_BUG_ON(!guc->ads_vma); - if (HAS_GUC_CT(dev_priv)) { - ret = intel_guc_ct_init(&guc->ct); - if (ret) - goto err_ads; - } + ret = intel_guc_ct_init(&guc->ct); + if (ret) + goto err_ads; /* We need to notify the guc whenever we change the GGTT */ i915_ggtt_enable_guc(dev_priv); @@ -262,8 +260,7 @@ void intel_guc_fini(struct intel_guc *guc) i915_ggtt_disable_guc(dev_priv); - if (HAS_GUC_CT(dev_priv)) - intel_guc_ct_fini(&guc->ct); + intel_guc_ct_fini(&guc->ct); intel_guc_ads_destroy(guc); intel_guc_log_destroy(&guc->log); @@ -430,9 +427,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK); /* If CT is available, we expect to use MMIO only during init/fini */ - GEM_BUG_ON(HAS_GUC_CT(dev_priv) && - *action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER && - *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER); + GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER && + *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER); mutex_lock(&guc->send_mutex); intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains); @@ -481,33 +477,6 @@ out: return ret; } -void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - u32 msg, val; - - /* - * Sample the log buffer flush related bits & clear them out now - * itself from the message identity register to minimize the - * probability of losing a flush interrupt, when there are back - * to back flush interrupts. - * There can be a new flush interrupt, for different log buffer - * type (like for ISR), whilst Host is handling one (for DPC). - * Since same bit is used in message register for ISR & DPC, it - * could happen that GuC sets the bit for 2nd interrupt but Host - * clears out the bit on handling the 1st interrupt. - */ - disable_rpm_wakeref_asserts(dev_priv); - spin_lock(&guc->irq_lock); - val = I915_READ(SOFT_SCRATCH(15)); - msg = val & guc->msg_enabled_mask; - I915_WRITE(SOFT_SCRATCH(15), val & ~msg); - spin_unlock(&guc->irq_lock); - enable_rpm_wakeref_asserts(dev_priv); - - intel_guc_to_host_process_recv_msg(guc, &msg, 1); -} - int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, const u32 *payload, u32 len) { diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index cbfed7a77c8b..e07e4c69cf08 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -165,7 +165,6 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size); void intel_guc_to_host_event_handler(struct intel_guc *guc); void intel_guc_to_host_event_handler_nop(struct intel_guc *guc); -void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc); int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, const u32 *payload, u32 len); int intel_guc_sample_forcewake(struct intel_guc *guc); diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c index 2d5dc2aa22a7..3921809f812b 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/intel_guc_ct.c @@ -848,8 +848,6 @@ static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc) * Allocate memory required for communication via * the CT channel. * - * Shall only be called for platforms with HAS_GUC_CT. - * * Return: 0 on success, a negative errno code on failure. */ int intel_guc_ct_init(struct intel_guc_ct *ct) @@ -875,8 +873,6 @@ int intel_guc_ct_init(struct intel_guc_ct *ct) * * Deallocate memory required for communication via * the CT channel. - * - * Shall only be called for platforms with HAS_GUC_CT. */ void intel_guc_ct_fini(struct intel_guc_ct *ct) { @@ -890,19 +886,14 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct) * intel_guc_ct_enable - Enable buffer based command transport. * @ct: pointer to CT struct * - * Shall only be called for platforms with HAS_GUC_CT. - * * Return: 0 on success, a negative errno code on failure. */ int intel_guc_ct_enable(struct intel_guc_ct *ct) { struct intel_guc *guc = ct_to_guc(ct); - struct drm_i915_private *i915 = guc_to_i915(guc); struct intel_guc_ct_channel *ctch = &ct->host_channel; int err; - GEM_BUG_ON(!HAS_GUC_CT(i915)); - if (ctch->enabled) return 0; @@ -920,17 +911,12 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct) /** * intel_guc_ct_disable - Disable buffer based command transport. * @ct: pointer to CT struct - * - * Shall only be called for platforms with HAS_GUC_CT. */ void intel_guc_ct_disable(struct intel_guc_ct *ct) { struct intel_guc *guc = ct_to_guc(ct); - struct drm_i915_private *i915 = guc_to_i915(guc); struct intel_guc_ct_channel *ctch = &ct->host_channel; - GEM_BUG_ON(!HAS_GUC_CT(i915)); - if (!ctch->enabled) return; diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index a5ba0f007959..a8e7f0ba7c3b 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -235,24 +235,14 @@ static void guc_disable_interrupts(struct intel_guc *guc) static int guc_enable_communication(struct intel_guc *guc) { - struct drm_i915_private *i915 = guc_to_i915(guc); - guc_enable_interrupts(guc); - if (HAS_GUC_CT(i915)) - return intel_guc_ct_enable(&guc->ct); - - guc->send = intel_guc_send_mmio; - guc->handler = intel_guc_to_host_event_handler_mmio; - return 0; + return intel_guc_ct_enable(&guc->ct); } static void guc_stop_communication(struct intel_guc *guc) { - struct drm_i915_private *i915 = guc_to_i915(guc); - - if (HAS_GUC_CT(i915)) - intel_guc_ct_stop(&guc->ct); + intel_guc_ct_stop(&guc->ct); guc->send = intel_guc_send_nop; guc->handler = intel_guc_to_host_event_handler_nop; @@ -260,10 +250,7 @@ static void guc_stop_communication(struct intel_guc *guc) static void guc_disable_communication(struct intel_guc *guc) { - struct drm_i915_private *i915 = guc_to_i915(guc); - - if (HAS_GUC_CT(i915)) - intel_guc_ct_disable(&guc->ct); + intel_guc_ct_disable(&guc->ct); guc_disable_interrupts(guc); -- cgit v1.2.3 From 23529cbe91569a12374e6008f8c7d173f3243f08 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Thu, 6 Jun 2019 15:42:25 -0700 Subject: drm/i915/wopcm: update default size for gen11+ The size has been increased to 2MB starting from Gen11. GuC and HuC FWs fit in 1MB so we were fine even with the legacy define, but let's still move to the correct one before the blobs grow to avoid being caught off guard in the future. v2: return early if the platform doesn't have GuC, fix nits (Michal) Bspec: 12690 Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Reviewed-by: Michal Wajdeczko Reviewed-by: Stuart Summers [ickle: use SZ consistently] Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190606224225.14287-2-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/intel_wopcm.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index f82a415ea2ba..7b4ba84b9fb8 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -41,26 +41,27 @@ * context). */ -/* Default WOPCM size 1MB. */ -#define GEN9_WOPCM_SIZE (1024 * 1024) +/* Default WOPCM size is 2MB from Gen11, 1MB on previous platforms */ +#define GEN11_WOPCM_SIZE SZ_2M +#define GEN9_WOPCM_SIZE SZ_1M /* 16KB WOPCM (RSVD WOPCM) is reserved from HuC firmware top. */ -#define WOPCM_RESERVED_SIZE (16 * 1024) +#define WOPCM_RESERVED_SIZE SZ_16K /* 16KB reserved at the beginning of GuC WOPCM. */ -#define GUC_WOPCM_RESERVED (16 * 1024) +#define GUC_WOPCM_RESERVED SZ_16K /* 8KB from GUC_WOPCM_RESERVED is reserved for GuC stack. */ -#define GUC_WOPCM_STACK_RESERVED (8 * 1024) +#define GUC_WOPCM_STACK_RESERVED SZ_8K /* GuC WOPCM Offset value needs to be aligned to 16KB. */ #define GUC_WOPCM_OFFSET_ALIGNMENT (1UL << GUC_WOPCM_OFFSET_SHIFT) /* 24KB at the end of WOPCM is reserved for RC6 CTX on BXT. */ -#define BXT_WOPCM_RC6_CTX_RESERVED (24 * 1024) +#define BXT_WOPCM_RC6_CTX_RESERVED (SZ_16K + SZ_8K) /* 36KB WOPCM reserved at the end of WOPCM on CNL. */ -#define CNL_WOPCM_HW_CTX_RESERVED (36 * 1024) +#define CNL_WOPCM_HW_CTX_RESERVED (SZ_32K + SZ_4K) /* 128KB from GUC_WOPCM_RESERVED is reserved for FW on Gen9. */ -#define GEN9_GUC_FW_RESERVED (128 * 1024) +#define GEN9_GUC_FW_RESERVED SZ_128K #define GEN9_GUC_WOPCM_OFFSET (GUC_WOPCM_RESERVED + GEN9_GUC_FW_RESERVED) /** @@ -71,7 +72,15 @@ */ void intel_wopcm_init_early(struct intel_wopcm *wopcm) { - wopcm->size = GEN9_WOPCM_SIZE; + struct drm_i915_private *i915 = wopcm_to_i915(wopcm); + + if (!HAS_GUC(i915)) + return; + + if (INTEL_GEN(i915) >= 11) + wopcm->size = GEN11_WOPCM_SIZE; + else + wopcm->size = GEN9_WOPCM_SIZE; DRM_DEBUG_DRIVER("WOPCM size: %uKiB\n", wopcm->size / 1024); } -- cgit v1.2.3 From d13616db8bac1fa1b2efa814c6a4b14e52d2144e Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 6 Jun 2019 15:22:00 +0300 Subject: drm/i915: move pm related declarations to intel_pm.h Move more missed declarations from i915_drv.h to intel_pm.h. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190606122203.13416-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 10 ---------- drivers/gpu/drm/i915/i915_irq.c | 1 + drivers/gpu/drm/i915/intel_pm.h | 9 +++++++++ 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 82e55c65289a..919208b31859 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2501,10 +2501,6 @@ extern void i915_driver_unload(struct drm_device *dev); extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); extern void intel_hangcheck_init(struct drm_i915_private *dev_priv); -extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); -extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); -extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); -extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv); @@ -2832,13 +2828,7 @@ extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, extern void intel_display_resume(struct drm_device *dev); extern void i915_redisable_vga(struct drm_i915_private *dev_priv); extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); -extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv); -extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val); -extern void intel_rps_mark_interactive(struct drm_i915_private *i915, - bool interactive); -extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, - bool enable); int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index ca8f4226e598..11c451358fb8 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -44,6 +44,7 @@ #include "intel_fifo_underrun.h" #include "intel_hotplug.h" #include "intel_lpe_audio.h" +#include "intel_pm.h" #include "intel_psr.h" /** diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index 17339c99440c..1b489fa399e1 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -77,5 +77,14 @@ u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, i915_reg_t reg); u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1); +unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); +unsigned long i915_mch_val(struct drm_i915_private *dev_priv); +unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); +void i915_update_gfx_val(struct drm_i915_private *dev_priv); + +bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); +int intel_set_rps(struct drm_i915_private *dev_priv, u8 val); +void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive); +bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); #endif /* __INTEL_PM_H__ */ -- cgit v1.2.3 From a6617183b7d2799a507a73095bb2f885f31906cb Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 6 Jun 2019 15:22:01 +0300 Subject: drm/i915: remove some unused declarations from intel_drv.h intel_mark_busy(), intel_mark_idle(), and skl_cdclk_get_vco() no longer exist. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190606122203.13416-2-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_drv.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index d0aeb383024a..e3f2b9976ead 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1468,8 +1468,6 @@ void intel_add_fb_offsets(int *x, int *y, unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info); bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv); -void intel_mark_busy(struct drm_i915_private *dev_priv); -void intel_mark_idle(struct drm_i915_private *dev_priv); int intel_display_suspend(struct drm_device *dev); void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv); void intel_encoder_destroy(struct drm_encoder *encoder); @@ -1578,7 +1576,6 @@ void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) void intel_prepare_reset(struct drm_i915_private *dev_priv); void intel_finish_reset(struct drm_i915_private *dev_priv); -unsigned int skl_cdclk_get_vco(unsigned int freq); void intel_dp_get_m_n(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, -- cgit v1.2.3 From f59d6414a6d105bd3e8e1650f236af40d7633894 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 6 Jun 2019 15:22:02 +0300 Subject: drm/i915: move more atomic plane declarations to intel_atomic_plane.h Some function declarations in intel_drv.h were missed when intel_atomic_plane.h was created. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190606122203.13416-3-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_atomic_plane.h | 16 ++++++++++++++++ drivers/gpu/drm/i915/intel_drv.h | 12 ------------ 2 files changed, 16 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.h b/drivers/gpu/drm/i915/intel_atomic_plane.h index 0a9651376d0e..24320041498d 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/intel_atomic_plane.h @@ -6,7 +6,11 @@ #ifndef __INTEL_ATOMIC_PLANE_H__ #define __INTEL_ATOMIC_PLANE_H__ +#include + +struct drm_crtc_state; struct drm_plane; +struct drm_property; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; @@ -38,5 +42,17 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, struct intel_plane_state *intel_state); +int intel_plane_atomic_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + u64 *val); +int intel_plane_atomic_set_property(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + u64 val); +int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, + struct drm_crtc_state *crtc_state, + const struct intel_plane_state *old_plane_state, + struct drm_plane_state *plane_state); #endif /* __INTEL_ATOMIC_PLANE_H__ */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index e3f2b9976ead..3e337317f77e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1535,18 +1535,6 @@ int intel_prepare_plane_fb(struct drm_plane *plane, struct drm_plane_state *new_state); void intel_cleanup_plane_fb(struct drm_plane *plane, struct drm_plane_state *old_state); -int intel_plane_atomic_get_property(struct drm_plane *plane, - const struct drm_plane_state *state, - struct drm_property *property, - u64 *val); -int intel_plane_atomic_set_property(struct drm_plane *plane, - struct drm_plane_state *state, - struct drm_property *property, - u64 val); -int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, - struct drm_crtc_state *crtc_state, - const struct intel_plane_state *old_plane_state, - struct drm_plane_state *plane_state); void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, enum pipe pipe); -- cgit v1.2.3 From c57984478ba97dd3b0ea00933a1bb08a35abda11 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 6 Jun 2019 15:22:03 +0300 Subject: drm/i915/frontbuffer: remove obsolete comment about mark busy/idle This no longer exists. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190606122203.13416-4-jani.nikula@intel.com --- drivers/gpu/drm/i915/intel_frontbuffer.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c index aa34e33b6087..d6036b9ad16a 100644 --- a/drivers/gpu/drm/i915/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/intel_frontbuffer.c @@ -53,14 +53,8 @@ * busyness. There is no direct way to detect idleness. Instead an idle timer * work delayed work should be started from the flush and flip functions and * cancelled as soon as busyness is detected. - * - * Note that there's also an older frontbuffer activity tracking scheme which - * just tracks general activity. This is done by the various mark_busy and - * mark_idle functions. For display power management features using these - * functions is deprecated and should be avoided. */ - #include "i915_drv.h" #include "intel_dp.h" #include "intel_drv.h" -- cgit v1.2.3 From cf20b411bb02dc2a55b38d75c5f493ffecd85c66 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 7 Jun 2019 13:31:18 +0200 Subject: drm/i915: Grammar s/the its/its/ Fix the grammar. Signed-off-by: Geert Uytterhoeven Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190607113118.14645-1-geert+renesas@glider.be --- drivers/gpu/drm/i915/intel_dpll_mgr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h index b5dd9c7ad772..d0570414f3d1 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h @@ -293,7 +293,7 @@ struct intel_shared_dpll { /** * @state: * - * Store the state for the pll, including the its hw state + * Store the state for the pll, including its hw state * and CRTCs using it. */ struct intel_shared_dpll_state state; -- cgit v1.2.3 From 6a8cc66ffe0fc87b0df10405ceb107f64795c28b Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 7 Jun 2019 12:59:32 +0100 Subject: drm/i915: Move i915_check_and_clear_faults to intel_reset.c The code is logically about reset so it makes sense. It also enables making i915_clear_error_registers static. Signed-off-by: Tvrtko Ursulin Suggested-by: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190607115932.20271-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_reset.c | 69 +++++++++++++++++++++++++++++++++-- drivers/gpu/drm/i915/gt/intel_reset.h | 3 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 63 -------------------------------- drivers/gpu/drm/i915/i915_gem_gtt.h | 1 - 4 files changed, 67 insertions(+), 69 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index a6ecfdc735c4..60d24110af80 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1166,8 +1166,8 @@ static void gen8_clear_engine_error_register(struct intel_engine_cs *engine) GEN6_RING_FAULT_REG_POSTING_READ(engine); } -void i915_clear_error_registers(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask) +static void clear_error_registers(struct drm_i915_private *i915, + intel_engine_mask_t engine_mask) { struct intel_uncore *uncore = &i915->uncore; u32 eir; @@ -1205,6 +1205,69 @@ void i915_clear_error_registers(struct drm_i915_private *i915, } } +static void gen6_check_faults(struct drm_i915_private *dev_priv) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 fault; + + for_each_engine(engine, dev_priv, id) { + fault = GEN6_RING_FAULT_REG_READ(engine); + if (fault & RING_FAULT_VALID) { + DRM_DEBUG_DRIVER("Unexpected fault\n" + "\tAddr: 0x%08lx\n" + "\tAddress space: %s\n" + "\tSource ID: %d\n" + "\tType: %d\n", + fault & PAGE_MASK, + fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", + RING_FAULT_SRCID(fault), + RING_FAULT_FAULT_TYPE(fault)); + } + } +} + +static void gen8_check_faults(struct drm_i915_private *dev_priv) +{ + u32 fault = I915_READ(GEN8_RING_FAULT_REG); + + if (fault & RING_FAULT_VALID) { + u32 fault_data0, fault_data1; + u64 fault_addr; + + fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0); + fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1); + fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | + ((u64)fault_data0 << 12); + + DRM_DEBUG_DRIVER("Unexpected fault\n" + "\tAddr: 0x%08x_%08x\n" + "\tAddress space: %s\n" + "\tEngine ID: %d\n" + "\tSource ID: %d\n" + "\tType: %d\n", + upper_32_bits(fault_addr), + lower_32_bits(fault_addr), + fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", + GEN8_RING_FAULT_ENGINE_ID(fault), + RING_FAULT_SRCID(fault), + RING_FAULT_FAULT_TYPE(fault)); + } +} + +void i915_check_and_clear_faults(struct drm_i915_private *i915) +{ + /* From GEN8 onwards we only have one 'All Engine Fault Register' */ + if (INTEL_GEN(i915) >= 8) + gen8_check_faults(i915); + else if (INTEL_GEN(i915) >= 6) + gen6_check_faults(i915); + else + return; + + clear_error_registers(i915, ALL_ENGINES); +} + /** * i915_handle_error - handle a gpu error * @i915: i915 device private @@ -1253,7 +1316,7 @@ void i915_handle_error(struct drm_i915_private *i915, if (flags & I915_ERROR_CAPTURE) { i915_capture_error_state(i915, engine_mask, msg); - i915_clear_error_registers(i915, engine_mask); + clear_error_registers(i915, engine_mask); } /* diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h index 4f3c1acac1a3..580ebdb59eca 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.h +++ b/drivers/gpu/drm/i915/gt/intel_reset.h @@ -25,8 +25,7 @@ void i915_handle_error(struct drm_i915_private *i915, const char *fmt, ...); #define I915_ERROR_CAPTURE BIT(0) -void i915_clear_error_registers(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask); +void i915_check_and_clear_faults(struct drm_i915_private *i915); void i915_reset(struct drm_i915_private *i915, intel_engine_mask_t stalled_mask, diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 87be9c1b6021..2e15850bd987 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2300,69 +2300,6 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv) return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active(); } -static void gen6_check_faults(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - u32 fault; - - for_each_engine(engine, dev_priv, id) { - fault = GEN6_RING_FAULT_REG_READ(engine); - if (fault & RING_FAULT_VALID) { - DRM_DEBUG_DRIVER("Unexpected fault\n" - "\tAddr: 0x%08lx\n" - "\tAddress space: %s\n" - "\tSource ID: %d\n" - "\tType: %d\n", - fault & PAGE_MASK, - fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", - RING_FAULT_SRCID(fault), - RING_FAULT_FAULT_TYPE(fault)); - } - } -} - -static void gen8_check_faults(struct drm_i915_private *dev_priv) -{ - u32 fault = I915_READ(GEN8_RING_FAULT_REG); - - if (fault & RING_FAULT_VALID) { - u32 fault_data0, fault_data1; - u64 fault_addr; - - fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0); - fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1); - fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | - ((u64)fault_data0 << 12); - - DRM_DEBUG_DRIVER("Unexpected fault\n" - "\tAddr: 0x%08x_%08x\n" - "\tAddress space: %s\n" - "\tEngine ID: %d\n" - "\tSource ID: %d\n" - "\tType: %d\n", - upper_32_bits(fault_addr), - lower_32_bits(fault_addr), - fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", - GEN8_RING_FAULT_ENGINE_ID(fault), - RING_FAULT_SRCID(fault), - RING_FAULT_FAULT_TYPE(fault)); - } -} - -void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) -{ - /* From GEN8 onwards we only have one 'All Engine Fault Register' */ - if (INTEL_GEN(dev_priv) >= 8) - gen8_check_faults(dev_priv); - else if (INTEL_GEN(dev_priv) >= 6) - gen6_check_faults(dev_priv); - else - return; - - i915_clear_error_registers(dev_priv, ALL_ENGINES); -} - void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) { struct i915_ggtt *ggtt = &dev_priv->ggtt; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 12856f9dd1d1..97700a37c12b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -646,7 +646,6 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base); void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base); void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base); -void i915_check_and_clear_faults(struct drm_i915_private *dev_priv); void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv); void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv); -- cgit v1.2.3 From bf210f6c9e6fd8dc0d154ad18f741f20e64a3fce Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Sun, 2 Jun 2019 01:58:45 +0300 Subject: drm/i915/perf: fix whitelist on Gen10+ Gen10 added an additional NOA_WRITE register (high bits) and we forgot to whitelist it for userspace. Fixes: 95690a02fb5d96 ("drm/i915/perf: enable perf support on CNL") Signed-off-by: Lionel Landwerlin Reviewed-by: Kenneth Graunke Link: https://patchwork.freedesktop.org/patch/msgid/20190601225845.12600-1-lionel.g.landwerlin@intel.com --- drivers/gpu/drm/i915/i915_perf.c | 1 + drivers/gpu/drm/i915/i915_reg.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 2e33a9b4eae7..d92ddfada262 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -3027,6 +3027,7 @@ static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) { return gen8_is_valid_mux_addr(dev_priv, addr) || + addr == i915_mmio_reg_offset(GEN10_NOA_WRITE_HIGH) || (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) && addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI)); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index dcf3c4d8d827..c87d288abb19 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1063,6 +1063,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define NOA_DATA _MMIO(0x986C) #define NOA_WRITE _MMIO(0x9888) +#define GEN10_NOA_WRITE_HIGH _MMIO(0x9884) #define _GEN7_PIPEA_DE_LOAD_SL 0x70068 #define _GEN7_PIPEB_DE_LOAD_SL 0x71068 -- cgit v1.2.3 From 4d94961d7f283880e2ae453209cdf067010ddd2e Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 10 Jun 2019 13:06:03 +0100 Subject: drm/i915: Eliminate unused mmio accessors On the path of removing mmio accessors with implicit dev_priv, easy first step is to remove all such unused macros. Signed-off-by: Tvrtko Ursulin Suggested-by: Jani Nikula Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190610120608.15477-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 919208b31859..20eb37b760c4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2842,12 +2842,9 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__) #define I915_READ8(reg__) __I915_REG_OP(read8, dev_priv, (reg__)) -#define I915_WRITE8(reg__, val__) __I915_REG_OP(write8, dev_priv, (reg__), (val__)) #define I915_READ16(reg__) __I915_REG_OP(read16, dev_priv, (reg__)) #define I915_WRITE16(reg__, val__) __I915_REG_OP(write16, dev_priv, (reg__), (val__)) -#define I915_READ16_NOTRACE(reg__) __I915_REG_OP(read16_notrace, dev_priv, (reg__)) -#define I915_WRITE16_NOTRACE(reg__, val__) __I915_REG_OP(write16_notrace, dev_priv, (reg__), (val__)) #define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__)) #define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__)) @@ -2903,7 +2900,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, */ #define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__)) #define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__)) -#define I915_WRITE64_FW(reg__, val__) __I915_REG_OP(write64_fw, dev_priv, (reg__), (val__)) #define POSTING_READ_FW(reg__) __I915_REG_OP(posting_read_fw, dev_priv, (reg__)) /* "Broadcast RGB" property */ -- cgit v1.2.3 From 8ed3a623048246918f5bd47e0e08a71cb773b154 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 10 Jun 2019 13:06:04 +0100 Subject: drm/i915: Convert i915_reg_read_ioctl to use explicit mmio accessors No excuse for code located in intel_uncore.c to not use intel_uncore_ helpers. Signed-off-by: Tvrtko Ursulin Suggested-by: Jani Nikula Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190610120608.15477-2-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/intel_uncore.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index f78668123f02..85171a8b866a 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1672,7 +1672,8 @@ static const struct reg_whitelist { int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); + struct intel_uncore *uncore = &i915->uncore; struct drm_i915_reg_read *reg = data; struct reg_whitelist const *entry; intel_wakeref_t wakeref; @@ -1689,7 +1690,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, GEM_BUG_ON(entry->size > 8); GEM_BUG_ON(entry_offset & (entry->size - 1)); - if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask && + if (INTEL_INFO(i915)->gen_mask & entry->gen_mask && entry_offset == (reg->offset & -entry->size)) break; entry++; @@ -1701,18 +1702,22 @@ int i915_reg_read_ioctl(struct drm_device *dev, flags = reg->offset & (entry->size - 1); - with_intel_runtime_pm(dev_priv, wakeref) { + with_intel_runtime_pm(i915, wakeref) { if (entry->size == 8 && flags == I915_REG_READ_8B_WA) - reg->val = I915_READ64_2x32(entry->offset_ldw, - entry->offset_udw); + reg->val = intel_uncore_read64_2x32(uncore, + entry->offset_ldw, + entry->offset_udw); else if (entry->size == 8 && flags == 0) - reg->val = I915_READ64(entry->offset_ldw); + reg->val = intel_uncore_read64(uncore, + entry->offset_ldw); else if (entry->size == 4 && flags == 0) - reg->val = I915_READ(entry->offset_ldw); + reg->val = intel_uncore_read(uncore, entry->offset_ldw); else if (entry->size == 2 && flags == 0) - reg->val = I915_READ16(entry->offset_ldw); + reg->val = intel_uncore_read16(uncore, + entry->offset_ldw); else if (entry->size == 1 && flags == 0) - reg->val = I915_READ8(entry->offset_ldw); + reg->val = intel_uncore_read8(uncore, + entry->offset_ldw); else ret = -EINVAL; } -- cgit v1.2.3 From 8500f14b6398d35779acc6ad8b76dbbec2487a8b Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 10 Jun 2019 13:06:05 +0100 Subject: drm/i915: Convert icl_get_stolen_reserved to uncore mmio accessors More removal of implicit dev_priv. Signed-off-by: Tvrtko Ursulin Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190610120608.15477-3-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 84d4f549eb21..432037e5d0a5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -325,11 +325,11 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, *size = stolen_top - *base; } -static void icl_get_stolen_reserved(struct drm_i915_private *dev_priv, +static void icl_get_stolen_reserved(struct drm_i915_private *i915, resource_size_t *base, resource_size_t *size) { - u64 reg_val = I915_READ64(GEN6_STOLEN_RESERVED); + u64 reg_val = intel_uncore_read64(&i915->uncore, GEN6_STOLEN_RESERVED); DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); -- cgit v1.2.3 From 7f1502d9961477c40ea4c93c9f59f342867ebe2e Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 10 Jun 2019 13:06:06 +0100 Subject: drm/i915: Convert gem_record_fences to uncore mmio accessors More implicit dev_priv removal. Signed-off-by: Tvrtko Ursulin Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190610120608.15477-4-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gpu_error.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 193a93857d99..a523bf050a25 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1123,17 +1123,23 @@ static u32 i915_error_generate_code(struct i915_gpu_state *error, static void gem_record_fences(struct i915_gpu_state *error) { struct drm_i915_private *dev_priv = error->i915; + struct intel_uncore *uncore = &dev_priv->uncore; int i; if (INTEL_GEN(dev_priv) >= 6) { for (i = 0; i < dev_priv->num_fence_regs; i++) - error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i)); + error->fence[i] = + intel_uncore_read64(uncore, + FENCE_REG_GEN6_LO(i)); } else if (INTEL_GEN(dev_priv) >= 4) { for (i = 0; i < dev_priv->num_fence_regs; i++) - error->fence[i] = I915_READ64(FENCE_REG_965_LO(i)); + error->fence[i] = + intel_uncore_read64(uncore, + FENCE_REG_965_LO(i)); } else { for (i = 0; i < dev_priv->num_fence_regs; i++) - error->fence[i] = I915_READ(FENCE_REG(i)); + error->fence[i] = + intel_uncore_read(uncore, FENCE_REG(i)); } error->nfence = i; } -- cgit v1.2.3 From 1cea02dbca840491800abf68f1e2c34d3e0c473a Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 10 Jun 2019 13:06:07 +0100 Subject: drm/i915: Convert intel_read_wm_latency to uncore mmio accessors More implicit dev_priv removal. Signed-off-by: Tvrtko Ursulin Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190610120608.15477-5-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/intel_pm.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 8f82cb72d3a6..d7272d4ff258 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2813,6 +2813,8 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate) static void intel_read_wm_latency(struct drm_i915_private *dev_priv, u16 wm[8]) { + struct intel_uncore *uncore = &dev_priv->uncore; + if (INTEL_GEN(dev_priv) >= 9) { u32 val; int ret, i; @@ -2894,7 +2896,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, wm[0] += 1; } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - u64 sskpd = I915_READ64(MCH_SSKPD); + u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD); wm[0] = (sskpd >> 56) & 0xFF; if (wm[0] == 0) @@ -2904,14 +2906,14 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, wm[3] = (sskpd >> 20) & 0x1FF; wm[4] = (sskpd >> 32) & 0x1FF; } else if (INTEL_GEN(dev_priv) >= 6) { - u32 sskpd = I915_READ(MCH_SSKPD); + u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD); wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK; wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; } else if (INTEL_GEN(dev_priv) >= 5) { - u32 mltr = I915_READ(MLTR_ILK); + u32 mltr = intel_uncore_read(uncore, MLTR_ILK); /* ILK primary LP0 latency is 700 ns */ wm[0] = 7; -- cgit v1.2.3 From 6caed5c938f6f316b837e55fd78bb9041362327e Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 10 Jun 2019 13:06:08 +0100 Subject: drm/i915: Remove I915_READ64 and I915_READ64_32x2 Now that all their users are gone we can remove the macros and accompanying duplicated comment. Signed-off-by: Tvrtko Ursulin Suggested-by: Jani Nikula Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190610120608.15477-6-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.h | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 20eb37b760c4..d3c02e009a98 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2851,24 +2851,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, #define I915_READ_NOTRACE(reg__) __I915_REG_OP(read_notrace, dev_priv, (reg__)) #define I915_WRITE_NOTRACE(reg__, val__) __I915_REG_OP(write_notrace, dev_priv, (reg__), (val__)) -/* Be very careful with read/write 64-bit values. On 32-bit machines, they - * will be implemented using 2 32-bit writes in an arbitrary order with - * an arbitrary delay between them. This can cause the hardware to - * act upon the intermediate value, possibly leading to corruption and - * machine death. For this reason we do not support I915_WRITE64, or - * dev_priv->uncore.funcs.mmio_writeq. - * - * When reading a 64-bit value as two 32-bit values, the delay may cause - * the two reads to mismatch, e.g. a timestamp overflowing. Also note that - * occasionally a 64-bit register does not actualy support a full readq - * and must be read using two 32-bit reads. - * - * You have been warned. - */ -#define I915_READ64(reg__) __I915_REG_OP(read64, dev_priv, (reg__)) -#define I915_READ64_2x32(lower_reg__, upper_reg__) \ - __I915_REG_OP(read64_2x32, dev_priv, (lower_reg__), (upper_reg__)) - #define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__)) #define POSTING_READ16(reg__) __I915_REG_OP(posting_read16, dev_priv, (reg__)) -- cgit v1.2.3 From f398bbde9e97769630c101ddc032a9b253d97df0 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 10 Jun 2019 13:57:06 +0100 Subject: drm/i915: Make read_subslice_reg take engine The function operates on the render engine so make the input reflect it. v2: * Pass engine to read_subslice_reg. (Chris) * Drop inline from read_subslice_reg. Signed-off-by: Tvrtko Ursulin Suggested-by: Rodrigo Vivi Cc: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190610125706.26110-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 6b838948ba24..c0d986db5a75 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -972,11 +972,12 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) return mcr_s_ss_select; } -static inline u32 -read_subslice_reg(struct drm_i915_private *dev_priv, int slice, - int subslice, i915_reg_t reg) +static u32 +read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice, + i915_reg_t reg) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct drm_i915_private *i915 = engine->i915; + struct intel_uncore *uncore = engine->uncore; u32 mcr_slice_subslice_mask; u32 mcr_slice_subslice_select; u32 default_mcr_s_ss_select; @@ -984,7 +985,7 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice, u32 ret; enum forcewake_domains fw_domains; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(i915) >= 11) { mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK; mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) | @@ -996,7 +997,7 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice, GEN8_MCR_SUBSLICE(subslice); } - default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv); + default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(i915); fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); @@ -1033,7 +1034,7 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice, void intel_engine_get_instdone(struct intel_engine_cs *engine, struct intel_instdone *instdone) { - struct drm_i915_private *dev_priv = engine->i915; + struct drm_i915_private *i915 = engine->i915; struct intel_uncore *uncore = engine->uncore; u32 mmio_base = engine->mmio_base; int slice; @@ -1041,7 +1042,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, memset(instdone, 0, sizeof(*instdone)); - switch (INTEL_GEN(dev_priv)) { + switch (INTEL_GEN(i915)) { default: instdone->instdone = intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); @@ -1051,12 +1052,12 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, instdone->slice_common = intel_uncore_read(uncore, GEN7_SC_INSTDONE); - for_each_instdone_slice_subslice(dev_priv, slice, subslice) { + for_each_instdone_slice_subslice(i915, slice, subslice) { instdone->sampler[slice][subslice] = - read_subslice_reg(dev_priv, slice, subslice, + read_subslice_reg(engine, slice, subslice, GEN7_SAMPLER_INSTDONE); instdone->row[slice][subslice] = - read_subslice_reg(dev_priv, slice, subslice, + read_subslice_reg(engine, slice, subslice, GEN7_ROW_INSTDONE); } break; -- cgit v1.2.3 From f4d57d838c48ebb123f9032cca0e5697c457868f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 10 Jun 2019 11:36:10 +0100 Subject: drm/i915: Allow interrupts when taking the timeline->mutex Before we commit ourselves to writing commands into the ringbuffer and submitting the request, allow signals to interrupt acquisition of the timeline mutex. We allow ourselves to be interrupted at any time later if we need to block for space in the ring, anyway. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190610103610.19883-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_context.h | 5 +++-- drivers/gpu/drm/i915/i915_request.c | 5 ++++- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 63392c88cd98..6d5453ba2c1e 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -113,10 +113,11 @@ static inline void intel_context_put(struct intel_context *ce) kref_put(&ce->ref, ce->ops->destroy); } -static inline void intel_context_timeline_lock(struct intel_context *ce) +static inline int __must_check +intel_context_timeline_lock(struct intel_context *ce) __acquires(&ce->ring->timeline->mutex) { - mutex_lock(&ce->ring->timeline->mutex); + return mutex_lock_interruptible(&ce->ring->timeline->mutex); } static inline void intel_context_timeline_unlock(struct intel_context *ce) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index da1e6984a8cc..e9b59eea4f10 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -784,8 +784,11 @@ struct i915_request * i915_request_create(struct intel_context *ce) { struct i915_request *rq; + int err; - intel_context_timeline_lock(ce); + err = intel_context_timeline_lock(ce); + if (err) + return ERR_PTR(err); /* Move our oldest request to the slab-cache (if not in use!) */ rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link); -- cgit v1.2.3 From a8cff4c8283af35546339c9ada5a90a70fe4a075 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 10 Jun 2019 15:54:30 +0100 Subject: drm/i915: Promote i915->mm.obj_lock to be irqsafe The intent is to be able to update the mm.lists from inside an irqsoff section (e.g. from a softirq rcu workqueue), ergo we need to make the i915->mm.obj_lock irqsafe. v2: can_discard_pages() ensures we are shrinkable v3: Beware shadowing of 'flags' Fixes: 3b4fa9640ccd ("drm/i915: Track the purgeable objects on a separate eviction list") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110869 Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190610145430.17717-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_domain.c | 23 ++++++++++------- drivers/gpu/drm/i915/gem/i915_gem_object.c | 12 ++++++--- drivers/gpu/drm/i915/gem/i915_gem_pages.c | 16 +++++++++--- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 38 +++++++++++++++------------- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 5 ++-- drivers/gpu/drm/i915/i915_debugfs.c | 10 +++++--- drivers/gpu/drm/i915/i915_gem.c | 8 ++++-- drivers/gpu/drm/i915/i915_vma.c | 17 ++++++++----- 8 files changed, 80 insertions(+), 49 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index e5deae62681f..31929220b90f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -475,15 +475,20 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) } mutex_unlock(&i915->ggtt.vm.mutex); - if (i915_gem_object_is_shrinkable(obj) && - obj->mm.madv == I915_MADV_WILLNEED) { - struct list_head *list; - - spin_lock(&i915->mm.obj_lock); - list = obj->bind_count ? - &i915->mm.bound_list : &i915->mm.unbound_list; - list_move_tail(&obj->mm.link, list); - spin_unlock(&i915->mm.obj_lock); + if (i915_gem_object_is_shrinkable(obj)) { + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); + + if (obj->mm.madv == I915_MADV_WILLNEED) { + struct list_head *list; + + list = obj->bind_count ? + &i915->mm.bound_list : &i915->mm.unbound_list; + list_move_tail(&obj->mm.link, list); + } + + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index a0bc8f7ab780..d02a1aff2058 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -207,9 +207,11 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, */ if (i915_gem_object_has_pages(obj) && i915_gem_object_is_shrinkable(obj)) { - spin_lock(&i915->mm.obj_lock); + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); list_del_init(&obj->mm.link); - spin_unlock(&i915->mm.obj_lock); + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } mutex_unlock(&i915->drm.struct_mutex); @@ -330,9 +332,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) obj->mm.madv = I915_MADV_DONTNEED; if (i915_gem_object_has_pages(obj)) { - spin_lock(&i915->mm.obj_lock); + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); list_move_tail(&obj->mm.link, &i915->mm.purge_list); - spin_unlock(&i915->mm.obj_lock); + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 7e64fd6bc19b..7ff907d6d0c6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -57,11 +57,15 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); if (i915_gem_object_is_shrinkable(obj)) { - spin_lock(&i915->mm.obj_lock); + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); + i915->mm.shrink_count++; i915->mm.shrink_memory += obj->base.size; list_add(&obj->mm.link, &i915->mm.unbound_list); - spin_unlock(&i915->mm.obj_lock); + + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } } @@ -151,11 +155,15 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) return pages; if (i915_gem_object_is_shrinkable(obj)) { - spin_lock(&i915->mm.obj_lock); + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); + list_del(&obj->mm.link); i915->mm.shrink_count--; i915->mm.shrink_memory -= obj->base.size; - spin_unlock(&i915->mm.obj_lock); + + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } if (obj->mm.mapping) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index d71e630c6fb8..70a4c9d3c098 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -138,7 +138,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915, unsigned long target, unsigned long *nr_scanned, - unsigned flags) + unsigned int shrink) { const struct { struct list_head *list; @@ -154,7 +154,7 @@ i915_gem_shrink(struct drm_i915_private *i915, unsigned long scanned = 0; bool unlock; - if (!shrinker_lock(i915, flags, &unlock)) + if (!shrinker_lock(i915, shrink, &unlock)) return 0; /* @@ -166,12 +166,12 @@ i915_gem_shrink(struct drm_i915_private *i915, * We don't care about errors here; if we cannot wait upon the GPU, * we will free as much as we can and hope to get a second chance. */ - if (flags & I915_SHRINK_ACTIVE) + if (shrink & I915_SHRINK_ACTIVE) i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT); - trace_i915_gem_shrink(i915, target, flags); + trace_i915_gem_shrink(i915, target, shrink); i915_retire_requests(i915); /* @@ -179,10 +179,10 @@ i915_gem_shrink(struct drm_i915_private *i915, * device just to recover a little memory. If absolutely necessary, * we will force the wake during oom-notifier. */ - if (flags & I915_SHRINK_BOUND) { + if (shrink & I915_SHRINK_BOUND) { wakeref = intel_runtime_pm_get_if_in_use(i915); if (!wakeref) - flags &= ~I915_SHRINK_BOUND; + shrink &= ~I915_SHRINK_BOUND; } /* @@ -207,8 +207,9 @@ i915_gem_shrink(struct drm_i915_private *i915, for (phase = phases; phase->list; phase++) { struct list_head still_in_list; struct drm_i915_gem_object *obj; + unsigned long flags; - if ((flags & phase->bit) == 0) + if ((shrink & phase->bit) == 0) continue; INIT_LIST_HEAD(&still_in_list); @@ -220,50 +221,50 @@ i915_gem_shrink(struct drm_i915_private *i915, * to be able to shrink their pages, so they remain on * the unbound/bound list until actually freed. */ - spin_lock(&i915->mm.obj_lock); + spin_lock_irqsave(&i915->mm.obj_lock, flags); while (count < target && (obj = list_first_entry_or_null(phase->list, typeof(*obj), mm.link))) { list_move_tail(&obj->mm.link, &still_in_list); - if (flags & I915_SHRINK_VMAPS && + if (shrink & I915_SHRINK_VMAPS && !is_vmalloc_addr(obj->mm.mapping)) continue; - if (!(flags & I915_SHRINK_ACTIVE) && + if (!(shrink & I915_SHRINK_ACTIVE) && (i915_gem_object_is_active(obj) || i915_gem_object_is_framebuffer(obj))) continue; - if (!(flags & I915_SHRINK_BOUND) && + if (!(shrink & I915_SHRINK_BOUND) && READ_ONCE(obj->bind_count)) continue; if (!can_release_pages(obj)) continue; - spin_unlock(&i915->mm.obj_lock); + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); if (unsafe_drop_pages(obj)) { /* May arrive from get_pages on another bo */ mutex_lock_nested(&obj->mm.lock, I915_MM_SHRINKER); if (!i915_gem_object_has_pages(obj)) { - try_to_writeback(obj, flags); + try_to_writeback(obj, shrink); count += obj->base.size >> PAGE_SHIFT; } mutex_unlock(&obj->mm.lock); } scanned += obj->base.size >> PAGE_SHIFT; - spin_lock(&i915->mm.obj_lock); + spin_lock_irqsave(&i915->mm.obj_lock, flags); } list_splice_tail(&still_in_list, phase->list); - spin_unlock(&i915->mm.obj_lock); + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } - if (flags & I915_SHRINK_BOUND) + if (shrink & I915_SHRINK_BOUND) intel_runtime_pm_put(i915, wakeref); i915_retire_requests(i915); @@ -379,6 +380,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) struct drm_i915_gem_object *obj; unsigned long unevictable, bound, unbound, freed_pages; intel_wakeref_t wakeref; + unsigned long flags; freed_pages = 0; with_intel_runtime_pm(i915, wakeref) @@ -392,7 +394,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) * being pointed to by hardware. */ unbound = bound = unevictable = 0; - spin_lock(&i915->mm.obj_lock); + spin_lock_irqsave(&i915->mm.obj_lock, flags); list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) { if (!can_release_pages(obj)) unevictable += obj->base.size >> PAGE_SHIFT; @@ -405,7 +407,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) else bound += obj->base.size >> PAGE_SHIFT; } - spin_unlock(&i915->mm.obj_lock); + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); if (freed_pages || unbound || bound) pr_info("Purging GPU memory, %lu pages freed, " diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 432037e5d0a5..8f619ad2fab8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -613,6 +613,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; struct i915_vma *vma; + unsigned long flags; int ret; if (!drm_mm_initialized(&dev_priv->mm.stolen)) @@ -689,10 +690,10 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv list_move_tail(&vma->vm_link, &ggtt->vm.bound_list); mutex_unlock(&ggtt->vm.mutex); - spin_lock(&dev_priv->mm.obj_lock); + spin_lock_irqsave(&dev_priv->mm.obj_lock, flags); GEM_BUG_ON(i915_gem_object_is_shrinkable(obj)); obj->bind_count++; - spin_unlock(&dev_priv->mm.obj_lock); + spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags); return obj; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index f212241a2758..2e79f0e4c5af 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -269,6 +269,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) struct drm_i915_gem_object *obj; u64 total_obj_size, total_gtt_size; unsigned long total, count, n; + unsigned long flags; int ret; total = READ_ONCE(dev_priv->mm.shrink_count); @@ -282,7 +283,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) total_obj_size = total_gtt_size = count = 0; - spin_lock(&dev_priv->mm.obj_lock); + spin_lock_irqsave(&dev_priv->mm.obj_lock, flags); list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { if (count == total) break; @@ -305,7 +306,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) objects[count++] = obj; total_obj_size += obj->base.size; } - spin_unlock(&dev_priv->mm.obj_lock); + spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags); sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL); @@ -457,6 +458,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) u64 size, mapped_size, purgeable_size, dpy_size, huge_size; struct drm_i915_gem_object *obj; unsigned int page_sizes = 0; + unsigned long flags; char buf[80]; int ret; @@ -469,7 +471,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) purgeable_size = purgeable_count = 0; huge_size = huge_count = 0; - spin_lock(&dev_priv->mm.obj_lock); + spin_lock_irqsave(&dev_priv->mm.obj_lock, flags); list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) { size += obj->base.size; ++count; @@ -518,7 +520,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) page_sizes |= obj->mm.page_sizes.sg; } } - spin_unlock(&dev_priv->mm.obj_lock); + spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags); seq_printf(m, "%u bound objects, %llu bytes\n", count, size); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9f2e213c6046..e980c1ee3dcf 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1138,7 +1138,10 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct list_head *list; if (i915_gem_object_is_shrinkable(obj)) { - spin_lock(&i915->mm.obj_lock); + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); + if (obj->mm.madv != I915_MADV_WILLNEED) list = &i915->mm.purge_list; else if (obj->bind_count) @@ -1146,7 +1149,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, else list = &i915->mm.unbound_list; list_move_tail(&obj->mm.link, list); - spin_unlock(&i915->mm.obj_lock); + + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index f6ac8394da77..80050f6a0893 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -80,11 +80,14 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason) static void obj_bump_mru(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); - spin_lock(&i915->mm.obj_lock); if (obj->bind_count) list_move_tail(&obj->mm.link, &i915->mm.bound_list); - spin_unlock(&i915->mm.obj_lock); + + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); obj->mm.dirty = true; /* be paranoid */ } @@ -678,8 +681,9 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) if (vma->obj) { struct drm_i915_gem_object *obj = vma->obj; + unsigned long flags; - spin_lock(&dev_priv->mm.obj_lock); + spin_lock_irqsave(&dev_priv->mm.obj_lock, flags); if (i915_gem_object_is_shrinkable(obj)) list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); @@ -687,7 +691,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) obj->bind_count++; assert_bind_count(obj); - spin_unlock(&dev_priv->mm.obj_lock); + spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags); } return 0; @@ -721,8 +725,9 @@ i915_vma_remove(struct i915_vma *vma) */ if (vma->obj) { struct drm_i915_gem_object *obj = vma->obj; + unsigned long flags; - spin_lock(&i915->mm.obj_lock); + spin_lock_irqsave(&i915->mm.obj_lock, flags); GEM_BUG_ON(obj->bind_count == 0); if (--obj->bind_count == 0 && @@ -730,7 +735,7 @@ i915_vma_remove(struct i915_vma *vma) obj->mm.madv == I915_MADV_WILLNEED) list_move_tail(&obj->mm.link, &i915->mm.unbound_list); - spin_unlock(&i915->mm.obj_lock); + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); /* * And finally now the object is completely decoupled from this -- cgit v1.2.3 From e568ac3874be7dcef3da0cc3bd6b91ca9dd14aa0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 11 Jun 2019 10:12:37 +0100 Subject: drm/i915: Pull kref into i915_address_space Make the kref common to both derived structs (i915_ggtt and i915_ppgtt) so that we can safely reference count an abstract ctx->vm address space. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190611091238.15808-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_client_blt.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_context.c | 132 +++++++++++---------- drivers/gpu/drm/i915/gem/i915_gem_context_types.h | 6 +- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_object_blt.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 6 +- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 26 ++-- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 36 +++--- drivers/gpu/drm/i915/gem/selftests/mock_context.c | 3 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 9 +- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 26 ++-- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 7 +- drivers/gpu/drm/i915/gt/selftest_lrc.c | 2 +- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 10 +- drivers/gpu/drm/i915/gvt/scheduler.c | 8 +- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 6 - drivers/gpu/drm/i915/i915_gem_gtt.c | 35 +++--- drivers/gpu/drm/i915/i915_gem_gtt.h | 27 +++-- drivers/gpu/drm/i915/i915_gpu_error.c | 2 +- drivers/gpu/drm/i915/i915_trace.h | 2 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 10 +- drivers/gpu/drm/i915/selftests/i915_request.c | 3 +- drivers/gpu/drm/i915/selftests/i915_vma.c | 4 +- drivers/gpu/drm/i915/selftests/igt_spinner.c | 5 +- drivers/gpu/drm/i915/selftests/mock_gtt.c | 1 - 26 files changed, 188 insertions(+), 192 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c index 4899ca1dd76c..f253ec5765ad 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c @@ -250,13 +250,11 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj, { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_gem_context *ctx = ce->gem_context; - struct i915_address_space *vm; + struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct clear_pages_work *work; struct i915_sleeve *sleeve; int err; - vm = ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; - sleeve = create_sleeve(vm, obj, pages, page_sizes); if (IS_ERR(sleeve)) return PTR_ERR(sleeve); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index dd9aa77e38ae..75b0d1dbb215 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -309,7 +309,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); release_hw_id(ctx); - i915_ppgtt_put(ctx->ppgtt); + if (ctx->vm) + i915_vm_put(ctx->vm); free_engines(rcu_access_pointer(ctx->engines)); mutex_destroy(&ctx->engines_mutex); @@ -397,7 +398,7 @@ static void context_close(struct i915_gem_context *ctx) } static u32 default_desc_template(const struct drm_i915_private *i915, - const struct i915_hw_ppgtt *ppgtt) + const struct i915_address_space *vm) { u32 address_mode; u32 desc; @@ -405,7 +406,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915, desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; address_mode = INTEL_LEGACY_32B_CONTEXT; - if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm)) + if (vm && i915_vm_is_4lvl(vm)) address_mode = INTEL_LEGACY_64B_CONTEXT; desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; @@ -421,7 +422,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915, } static struct i915_gem_context * -__create_context(struct drm_i915_private *dev_priv) +__create_context(struct drm_i915_private *i915) { struct i915_gem_context *ctx; struct i915_gem_engines *e; @@ -433,8 +434,8 @@ __create_context(struct drm_i915_private *dev_priv) return ERR_PTR(-ENOMEM); kref_init(&ctx->ref); - list_add_tail(&ctx->link, &dev_priv->contexts.list); - ctx->i915 = dev_priv; + list_add_tail(&ctx->link, &i915->contexts.list); + ctx->i915 = i915; ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); mutex_init(&ctx->mutex); @@ -452,14 +453,14 @@ __create_context(struct drm_i915_private *dev_priv) /* NB: Mark all slices as needing a remap so that when the context first * loads it will restore whatever remap state already exists. If there * is no remap info, it will be a NOP. */ - ctx->remap_slice = ALL_L3_SLICES(dev_priv); + ctx->remap_slice = ALL_L3_SLICES(i915); i915_gem_context_set_bannable(ctx); i915_gem_context_set_recoverable(ctx); ctx->ring_size = 4 * PAGE_SIZE; ctx->desc_template = - default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt); + default_desc_template(i915, &i915->mm.aliasing_ppgtt->vm); for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; @@ -471,26 +472,26 @@ err_free: return ERR_PTR(err); } -static struct i915_hw_ppgtt * -__set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt) +static struct i915_address_space * +__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) { - struct i915_hw_ppgtt *old = ctx->ppgtt; + struct i915_address_space *old = ctx->vm; - ctx->ppgtt = i915_ppgtt_get(ppgtt); - ctx->desc_template = default_desc_template(ctx->i915, ppgtt); + ctx->vm = i915_vm_get(vm); + ctx->desc_template = default_desc_template(ctx->i915, vm); return old; } static void __assign_ppgtt(struct i915_gem_context *ctx, - struct i915_hw_ppgtt *ppgtt) + struct i915_address_space *vm) { - if (ppgtt == ctx->ppgtt) + if (vm == ctx->vm) return; - ppgtt = __set_ppgtt(ctx, ppgtt); - if (ppgtt) - i915_ppgtt_put(ppgtt); + vm = __set_ppgtt(ctx, vm); + if (vm) + i915_vm_put(vm); } static struct i915_gem_context * @@ -522,8 +523,8 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) return ERR_CAST(ppgtt); } - __assign_ppgtt(ctx, ppgtt); - i915_ppgtt_put(ppgtt); + __assign_ppgtt(ctx, &ppgtt->vm); + i915_vm_put(&ppgtt->vm); } if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { @@ -723,7 +724,7 @@ static int context_idr_cleanup(int id, void *p, void *data) static int vm_idr_cleanup(int id, void *p, void *data) { - i915_ppgtt_put(p); + i915_vm_put(p); return 0; } @@ -733,8 +734,8 @@ static int gem_context_register(struct i915_gem_context *ctx, int ret; ctx->file_priv = fpriv; - if (ctx->ppgtt) - ctx->ppgtt->vm.file = fpriv; + if (ctx->vm) + ctx->vm->file = fpriv; ctx->pid = get_task_pid(current, PIDTYPE_PID); ctx->name = kasprintf(GFP_KERNEL, "%s[%d]", @@ -844,7 +845,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, if (err) goto err_put; - err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL); + err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL); if (err < 0) goto err_unlock; @@ -858,7 +859,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, err_unlock: mutex_unlock(&file_priv->vm_idr_lock); err_put: - i915_ppgtt_put(ppgtt); + i915_vm_put(&ppgtt->vm); return err; } @@ -867,7 +868,7 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, { struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_vm_control *args = data; - struct i915_hw_ppgtt *ppgtt; + struct i915_address_space *vm; int err; u32 id; @@ -885,13 +886,13 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, if (err) return err; - ppgtt = idr_remove(&file_priv->vm_idr, id); + vm = idr_remove(&file_priv->vm_idr, id); mutex_unlock(&file_priv->vm_idr_lock); - if (!ppgtt) + if (!vm) return -ENOENT; - i915_ppgtt_put(ppgtt); + i915_vm_put(vm); return 0; } @@ -981,10 +982,10 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv, struct i915_gem_context *ctx, struct drm_i915_gem_context_param *args) { - struct i915_hw_ppgtt *ppgtt; + struct i915_address_space *vm; int ret; - if (!ctx->ppgtt) + if (!ctx->vm) return -ENODEV; /* XXX rcu acquire? */ @@ -992,19 +993,19 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv, if (ret) return ret; - ppgtt = i915_ppgtt_get(ctx->ppgtt); + vm = i915_vm_get(ctx->vm); mutex_unlock(&ctx->i915->drm.struct_mutex); ret = mutex_lock_interruptible(&file_priv->vm_idr_lock); if (ret) goto err_put; - ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL); + ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL); GEM_BUG_ON(!ret); if (ret < 0) goto err_unlock; - i915_ppgtt_get(ppgtt); + i915_vm_get(vm); args->size = 0; args->value = ret; @@ -1013,29 +1014,30 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv, err_unlock: mutex_unlock(&file_priv->vm_idr_lock); err_put: - i915_ppgtt_put(ppgtt); + i915_vm_put(vm); return ret; } static void set_ppgtt_barrier(void *data) { - struct i915_hw_ppgtt *old = data; + struct i915_address_space *old = data; - if (INTEL_GEN(old->vm.i915) < 8) - gen6_ppgtt_unpin_all(old); + if (INTEL_GEN(old->i915) < 8) + gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old)); - i915_ppgtt_put(old); + i915_vm_put(old); } static int emit_ppgtt_update(struct i915_request *rq, void *data) { - struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt; + struct i915_address_space *vm = rq->gem_context->vm; struct intel_engine_cs *engine = rq->engine; u32 base = engine->mmio_base; u32 *cs; int i; - if (i915_vm_is_4lvl(&ppgtt->vm)) { + if (i915_vm_is_4lvl(vm)) { + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4); cs = intel_ring_begin(rq, 6); @@ -1052,6 +1054,8 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data) *cs++ = MI_NOOP; intel_ring_advance(rq, cs); } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); if (IS_ERR(cs)) return PTR_ERR(cs); @@ -1069,7 +1073,7 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data) intel_ring_advance(rq, cs); } else { /* ppGTT is not part of the legacy context image */ - gen6_ppgtt_pin(ppgtt); + gen6_ppgtt_pin(i915_vm_to_ppgtt(vm)); } return 0; @@ -1087,13 +1091,13 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, struct i915_gem_context *ctx, struct drm_i915_gem_context_param *args) { - struct i915_hw_ppgtt *ppgtt, *old; + struct i915_address_space *vm, *old; int err; if (args->size) return -EINVAL; - if (!ctx->ppgtt) + if (!ctx->vm) return -ENODEV; if (upper_32_bits(args->value)) @@ -1103,18 +1107,18 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, if (err) return err; - ppgtt = idr_find(&file_priv->vm_idr, args->value); - if (ppgtt) - i915_ppgtt_get(ppgtt); + vm = idr_find(&file_priv->vm_idr, args->value); + if (vm) + i915_vm_get(vm); mutex_unlock(&file_priv->vm_idr_lock); - if (!ppgtt) + if (!vm) return -ENOENT; err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); if (err) goto out; - if (ppgtt == ctx->ppgtt) + if (vm == ctx->vm) goto unlock; /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ @@ -1122,7 +1126,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, lut_close(ctx); mutex_unlock(&ctx->mutex); - old = __set_ppgtt(ctx, ppgtt); + old = __set_ppgtt(ctx, vm); /* * We need to flush any requests using the current ppgtt before @@ -1135,16 +1139,16 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, set_ppgtt_barrier, old); if (err) { - ctx->ppgtt = old; + ctx->vm = old; ctx->desc_template = default_desc_template(ctx->i915, old); - i915_ppgtt_put(ppgtt); + i915_vm_put(vm); } unlock: mutex_unlock(&ctx->i915->drm.struct_mutex); out: - i915_ppgtt_put(ppgtt); + i915_vm_put(vm); return err; } @@ -2033,15 +2037,15 @@ static int clone_timeline(struct i915_gem_context *dst, static int clone_vm(struct i915_gem_context *dst, struct i915_gem_context *src) { - struct i915_hw_ppgtt *ppgtt; + struct i915_address_space *vm; rcu_read_lock(); do { - ppgtt = READ_ONCE(src->ppgtt); - if (!ppgtt) + vm = READ_ONCE(src->vm); + if (!vm) break; - if (!kref_get_unless_zero(&ppgtt->ref)) + if (!kref_get_unless_zero(&vm->ref)) continue; /* @@ -2059,16 +2063,16 @@ static int clone_vm(struct i915_gem_context *dst, * it cannot be reallocated elsewhere. */ - if (ppgtt == READ_ONCE(src->ppgtt)) + if (vm == READ_ONCE(src->vm)) break; - i915_ppgtt_put(ppgtt); + i915_vm_put(vm); } while (1); rcu_read_unlock(); - if (ppgtt) { - __assign_ppgtt(dst, ppgtt); - i915_ppgtt_put(ppgtt); + if (vm) { + __assign_ppgtt(dst, vm); + i915_vm_put(vm); } return 0; @@ -2293,8 +2297,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, case I915_CONTEXT_PARAM_GTT_SIZE: args->size = 0; - if (ctx->ppgtt) - args->value = ctx->ppgtt->vm.total; + if (ctx->vm) + args->value = ctx->vm->total; else if (to_i915(dev)->mm.aliasing_ppgtt) args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total; else diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index 3db7448b9732..cc513410eeef 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -25,7 +25,7 @@ struct pid; struct drm_i915_private; struct drm_i915_file_private; -struct i915_hw_ppgtt; +struct i915_address_space; struct i915_timeline; struct intel_ring; @@ -80,7 +80,7 @@ struct i915_gem_context { struct i915_timeline *timeline; /** - * @ppgtt: unique address space (GTT) + * @vm: unique address space (GTT) * * In full-ppgtt mode, each context has its own address space ensuring * complete seperation of one client from all others. @@ -88,7 +88,7 @@ struct i915_gem_context { * In other modes, this is a NULL pointer with the expectation that * the caller uses the shared global GTT. */ - struct i915_hw_ppgtt *ppgtt; + struct i915_address_space *vm; /** * @pid: process id of creator diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 2c4f3229361d..528eea44dccf 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -723,8 +723,8 @@ static int eb_select_context(struct i915_execbuffer *eb) return -ENOENT; eb->gem_context = ctx; - if (ctx->ppgtt) { - eb->vm = &ctx->ppgtt->vm; + if (ctx->vm) { + eb->vm = ctx->vm; eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; } else { eb->vm = &eb->i915->ggtt.vm; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c index fc8ee7ef3d69..cb42e3a312e2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -49,14 +49,12 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_gem_context *ctx = ce->gem_context; - struct i915_address_space *vm; + struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct i915_request *rq; struct i915_vma *vma; int err; /* XXX: ce->vm please */ - vm = ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; - vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index cfa990edb351..528b61678334 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -768,14 +768,14 @@ i915_gem_userptr_ioctl(struct drm_device *dev, return -EFAULT; if (args->flags & I915_USERPTR_READ_ONLY) { - struct i915_hw_ppgtt *ppgtt; + struct i915_address_space *vm; /* * On almost all of the older hw, we cannot tell the GPU that * a page is readonly. */ - ppgtt = dev_priv->kernel_context->ppgtt; - if (!ppgtt || !ppgtt->vm.has_read_only) + vm = dev_priv->kernel_context->vm; + if (!vm || !vm->has_read_only) return -ENODEV; } diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index ec2985c0a92e..232d5cf4396c 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1038,8 +1038,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx, u32 dword, u32 val) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_address_space *vm = - ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; + struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; struct i915_vma *vma; int err; @@ -1092,8 +1091,7 @@ static int igt_write_huge(struct i915_gem_context *ctx, struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_address_space *vm = - ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; + struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; static struct intel_engine_cs *engines[I915_NUM_ENGINES]; struct intel_engine_cs *engine; I915_RND_STATE(prng); @@ -1419,7 +1417,7 @@ static int igt_ppgtt_pin_update(void *arg) struct i915_gem_context *ctx = arg; struct drm_i915_private *dev_priv = ctx->i915; unsigned long supported = INTEL_INFO(dev_priv)->page_sizes; - struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; + struct i915_address_space *vm = ctx->vm; struct drm_i915_gem_object *obj; struct i915_vma *vma; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; @@ -1434,7 +1432,7 @@ static int igt_ppgtt_pin_update(void *arg) * huge-gtt-pages. */ - if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) { + if (!vm || !i915_vm_is_4lvl(vm)) { pr_info("48b PPGTT not supported, skipping\n"); return 0; } @@ -1449,7 +1447,7 @@ static int igt_ppgtt_pin_update(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - vma = i915_vma_instance(obj, &ppgtt->vm, NULL); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_put; @@ -1503,7 +1501,7 @@ static int igt_ppgtt_pin_update(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - vma = i915_vma_instance(obj, &ppgtt->vm, NULL); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_put; @@ -1541,8 +1539,7 @@ static int igt_tmpfs_fallback(void *arg) struct i915_gem_context *ctx = arg; struct drm_i915_private *i915 = ctx->i915; struct vfsmount *gemfs = i915->mm.gemfs; - struct i915_address_space *vm = - ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; + struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct drm_i915_gem_object *obj; struct i915_vma *vma; u32 *vaddr; @@ -1599,8 +1596,7 @@ static int igt_shrink_thp(void *arg) { struct i915_gem_context *ctx = arg; struct drm_i915_private *i915 = ctx->i915; - struct i915_address_space *vm = - ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; + struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct drm_i915_gem_object *obj; struct i915_vma *vma; unsigned int flags = PIN_USER; @@ -1721,7 +1717,7 @@ int i915_gem_huge_page_mock_selftests(void) err = i915_subtests(tests, ppgtt); out_close: - i915_ppgtt_put(ppgtt); + i915_vm_put(&ppgtt->vm); out_unlock: mutex_unlock(&dev_priv->drm.struct_mutex); @@ -1766,8 +1762,8 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) goto out_unlock; } - if (ctx->ppgtt) - ctx->ppgtt->vm.scrub_64K = true; + if (ctx->vm) + ctx->vm->scrub_64K = true; err = i915_subtests(tests, ctx); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 41105f6ed206..74b0e5871c4b 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -248,8 +248,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj, unsigned int dw) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_address_space *vm = - ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; + struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct i915_request *rq; struct i915_vma *vma; struct i915_vma *batch; @@ -438,8 +437,7 @@ create_test_object(struct i915_gem_context *ctx, struct list_head *objects) { struct drm_i915_gem_object *obj; - struct i915_address_space *vm = - ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm; + struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm; u64 size; int err; @@ -541,7 +539,7 @@ static int igt_ctx_exec(void *arg) pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), engine->name, ctx->hw_id, - yesno(!!ctx->ppgtt), err); + yesno(!!ctx->vm), err); goto out_unlock; } @@ -612,7 +610,7 @@ static int igt_shared_ctx_exec(void *arg) goto out_unlock; } - if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */ + if (!parent->vm) { /* not full-ppgtt; nothing to share */ err = 0; goto out_unlock; } @@ -643,7 +641,7 @@ static int igt_shared_ctx_exec(void *arg) goto out_test; } - __assign_ppgtt(ctx, parent->ppgtt); + __assign_ppgtt(ctx, parent->vm); if (!obj) { obj = create_test_object(parent, file, &objects); @@ -661,7 +659,7 @@ static int igt_shared_ctx_exec(void *arg) pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), engine->name, ctx->hw_id, - yesno(!!ctx->ppgtt), err); + yesno(!!ctx->vm), err); kernel_context_close(ctx); goto out_test; } @@ -758,7 +756,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); - vma = i915_vma_instance(obj, &ce->gem_context->ppgtt->vm, NULL); + vma = i915_vma_instance(obj, ce->gem_context->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -1176,8 +1174,8 @@ static int igt_ctx_readonly(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj = NULL; + struct i915_address_space *vm; struct i915_gem_context *ctx; - struct i915_hw_ppgtt *ppgtt; unsigned long idx, ndwords, dw; struct igt_live_test t; struct drm_file *file; @@ -1208,8 +1206,8 @@ static int igt_ctx_readonly(void *arg) goto out_unlock; } - ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt; - if (!ppgtt || !ppgtt->vm.has_read_only) { + vm = ctx->vm ?: &i915->mm.aliasing_ppgtt->vm; + if (!vm || !vm->has_read_only) { err = 0; goto out_unlock; } @@ -1244,7 +1242,7 @@ static int igt_ctx_readonly(void *arg) pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), engine->name, ctx->hw_id, - yesno(!!ctx->ppgtt), err); + yesno(!!ctx->vm), err); goto out_unlock; } @@ -1288,7 +1286,7 @@ out_unlock: static int check_scratch(struct i915_gem_context *ctx, u64 offset) { struct drm_mm_node *node = - __drm_mm_interval_first(&ctx->ppgtt->vm.mm, + __drm_mm_interval_first(&ctx->vm->mm, offset, offset + sizeof(u32) - 1); if (!node || node->start > offset) return 0; @@ -1336,7 +1334,7 @@ static int write_to_scratch(struct i915_gem_context *ctx, __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); + vma = i915_vma_instance(obj, ctx->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err; @@ -1433,7 +1431,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); - vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); + vma = i915_vma_instance(obj, ctx->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err; @@ -1542,11 +1540,11 @@ static int igt_vm_isolation(void *arg) } /* We can only test vm isolation, if the vm are distinct */ - if (ctx_a->ppgtt == ctx_b->ppgtt) + if (ctx_a->vm == ctx_b->vm) goto out_unlock; - vm_total = ctx_a->ppgtt->vm.total; - GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total); + vm_total = ctx_a->vm->total; + GEM_BUG_ON(ctx_b->vm->total != vm_total); vm_total -= I915_GTT_PAGE_SIZE; wakeref = intel_runtime_pm_get(i915); diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index 6578f2f6c3f8..82371c60d4aa 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -48,7 +48,8 @@ mock_context(struct drm_i915_private *i915, if (!ppgtt) goto err_put; - __set_ppgtt(ctx, ppgtt); + __set_ppgtt(ctx, &ppgtt->vm); + i915_vm_put(&ppgtt->vm); } return ctx; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 497ac036c4a9..579d605fbfc3 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1505,7 +1505,7 @@ __execlists_context_pin(struct intel_context *ce, void *vaddr; int ret; - GEM_BUG_ON(!ce->gem_context->ppgtt); + GEM_BUG_ON(!ce->gem_context->vm); ret = execlists_context_deferred_alloc(ce, engine); if (ret) @@ -1621,7 +1621,8 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq) static int emit_pdps(struct i915_request *rq) { const struct intel_engine_cs * const engine = rq->engine; - struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt; + struct i915_hw_ppgtt * const ppgtt = + i915_vm_to_ppgtt(rq->gem_context->vm); int err, i; u32 *cs; @@ -1694,7 +1695,7 @@ static int execlists_request_alloc(struct i915_request *request) */ /* Unconditionally invalidate GPU caches and TLBs. */ - if (i915_vm_is_4lvl(&request->gem_context->ppgtt->vm)) + if (i915_vm_is_4lvl(request->gem_context->vm)) ret = request->engine->emit_flush(request, EMIT_INVALIDATE); else ret = emit_pdps(request); @@ -2824,7 +2825,7 @@ static void execlists_init_reg_state(u32 *regs, struct intel_engine_cs *engine, struct intel_ring *ring) { - struct i915_hw_ppgtt *ppgtt = ce->gem_context->ppgtt; + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->gem_context->vm); bool rcs = engine->class == RENDER_CLASS; u32 base = engine->mmio_base; diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index ff58d658e3e2..6369cc5e22b4 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1330,23 +1330,23 @@ static void ring_context_destroy(struct kref *ref) static int __context_pin_ppgtt(struct i915_gem_context *ctx) { - struct i915_hw_ppgtt *ppgtt; + struct i915_address_space *vm; int err = 0; - ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt; - if (ppgtt) - err = gen6_ppgtt_pin(ppgtt); + vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm; + if (vm) + err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm))); return err; } static void __context_unpin_ppgtt(struct i915_gem_context *ctx) { - struct i915_hw_ppgtt *ppgtt; + struct i915_address_space *vm; - ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt; - if (ppgtt) - gen6_ppgtt_unpin(ppgtt); + vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm; + if (vm) + gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); } static int __context_pin(struct intel_context *ce) @@ -1704,14 +1704,16 @@ static int switch_context(struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; struct i915_gem_context *ctx = rq->gem_context; - struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; + struct i915_address_space *vm = + ctx->vm ?: &rq->i915->mm.aliasing_ppgtt->vm; unsigned int unwind_mm = 0; u32 hw_flags = 0; int ret, i; GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); - if (ppgtt) { + if (vm) { + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); int loops; /* @@ -1758,7 +1760,7 @@ static int switch_context(struct i915_request *rq) goto err_mm; } - if (ppgtt) { + if (vm) { ret = engine->emit_flush(rq, EMIT_INVALIDATE); if (ret) goto err_mm; @@ -1801,7 +1803,7 @@ static int switch_context(struct i915_request *rq) err_mm: if (unwind_mm) - ppgtt->pd_dirty_engines |= unwind_mm; + i915_vm_to_ppgtt(vm)->pd_dirty_engines |= unwind_mm; err: return ret; } diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 3be67e561c26..45379a63e013 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -128,8 +128,7 @@ static struct i915_request * hang_create_request(struct hang *h, struct intel_engine_cs *engine) { struct drm_i915_private *i915 = h->i915; - struct i915_address_space *vm = - h->ctx->ppgtt ? &h->ctx->ppgtt->vm : &i915->ggtt.vm; + struct i915_address_space *vm = h->ctx->vm ?: &i915->ggtt.vm; struct i915_request *rq = NULL; struct i915_vma *hws, *vma; unsigned int flags; @@ -1354,8 +1353,8 @@ static int igt_reset_evict_ppgtt(void *arg) } err = 0; - if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */ - err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm, + if (ctx->vm) /* aliasing == global gtt locking, covered above */ + err = __igt_reset_evict_vma(i915, ctx->vm, evict_vma, EXEC_OBJECT_WRITE); out: diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 06593254b7d6..f0ca2a09dabd 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -1090,7 +1090,7 @@ static int smoke_submit(struct preempt_smoke *smoke, int err = 0; if (batch) { - vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL); + vma = i915_vma_instance(batch, ctx->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 2cb1519fde42..c8d335d63f9c 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -358,7 +358,7 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx) if (IS_ERR(obj)) return ERR_CAST(obj); - vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); + vma = i915_vma_instance(obj, ctx->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_obj; @@ -442,7 +442,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, int err = 0, i, v; u32 *cs, *results; - scratch = create_scratch(&ctx->ppgtt->vm, 2 * ARRAY_SIZE(values) + 1); + scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1); if (IS_ERR(scratch)) return PTR_ERR(scratch); @@ -925,7 +925,7 @@ static int live_isolated_whitelist(void *arg) if (!intel_engines_has_context_isolation(i915)) return 0; - if (!i915->kernel_context->ppgtt) + if (!i915->kernel_context->vm) return 0; for (i = 0; i < ARRAY_SIZE(client); i++) { @@ -937,14 +937,14 @@ static int live_isolated_whitelist(void *arg) goto err; } - client[i].scratch[0] = create_scratch(&c->ppgtt->vm, 1024); + client[i].scratch[0] = create_scratch(c->vm, 1024); if (IS_ERR(client[i].scratch[0])) { err = PTR_ERR(client[i].scratch[0]); kernel_context_close(c); goto err; } - client[i].scratch[1] = create_scratch(&c->ppgtt->vm, 1024); + client[i].scratch[1] = create_scratch(c->vm, 1024); if (IS_ERR(client[i].scratch[1])) { err = PTR_ERR(client[i].scratch[1]); i915_vma_unpin_and_release(&client[i].scratch[0], 0); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index a09ad98a3f43..ce6d2f06e8f7 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -368,7 +368,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, struct i915_gem_context *ctx) { struct intel_vgpu_mm *mm = workload->shadow_mm; - struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm); int i = 0; if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) @@ -1130,7 +1130,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); - i915_context_ppgtt_root_restore(s, s->shadow[0]->gem_context->ppgtt); + i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->gem_context->vm)); for_each_engine(engine, vgpu->gvt->dev_priv, id) intel_context_unpin(s->shadow[id]); @@ -1195,7 +1195,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) if (IS_ERR(ctx)) return PTR_ERR(ctx); - i915_context_ppgtt_root_save(s, ctx->ppgtt); + i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm)); for_each_engine(engine, vgpu->gvt->dev_priv, i) { struct intel_context *ce; @@ -1238,7 +1238,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) return 0; out_shadow_ctx: - i915_context_ppgtt_root_restore(s, ctx->ppgtt); + i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm)); for_each_engine(engine, vgpu->gvt->dev_priv, i) { if (IS_ERR(s->shadow[i])) break; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 2e79f0e4c5af..b070e6f3780c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -427,7 +427,7 @@ static void print_context_stats(struct seq_file *m, i915_gem_context_unlock_engines(ctx); if (!IS_ERR_OR_NULL(ctx->file_priv)) { - struct file_stats stats = { .vm = &ctx->ppgtt->vm, }; + struct file_stats stats = { .vm = ctx->vm, }; struct drm_file *file = ctx->file_priv->file; struct task_struct *task; char name[80]; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d3c02e009a98..6e7e72ab5360 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2658,12 +2658,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags); -static inline struct i915_hw_ppgtt * -i915_vm_to_ppgtt(struct i915_address_space *vm) -{ - return container_of(vm, struct i915_hw_ppgtt, vm); -} - /* i915_gem_fence_reg.c */ struct drm_i915_fence_reg * i915_reserve_fence(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 2e15850bd987..69eb0bf7670b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -483,6 +483,8 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page) static void i915_address_space_init(struct i915_address_space *vm, int subclass) { + kref_init(&vm->ref); + /* * The vm->mutex must be reclaim safe (for use in the shrinker). * Do a dummy acquire now under fs_reclaim so that any allocation @@ -1230,9 +1232,8 @@ static int gen8_init_scratch(struct i915_address_space *vm) */ if (vm->has_read_only && vm->i915->kernel_context && - vm->i915->kernel_context->ppgtt) { - struct i915_address_space *clone = - &vm->i915->kernel_context->ppgtt->vm; + vm->i915->kernel_context->vm) { + struct i915_address_space *clone = vm->i915->kernel_context->vm; GEM_BUG_ON(!clone->has_read_only); @@ -1591,8 +1592,6 @@ unwind: static void ppgtt_init(struct drm_i915_private *i915, struct i915_hw_ppgtt *ppgtt) { - kref_init(&ppgtt->ref); - ppgtt->vm.i915 = i915; ppgtt->vm.dma = &i915->drm.pdev->dev; ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); @@ -2272,21 +2271,23 @@ static void ppgtt_destroy_vma(struct i915_address_space *vm) } } -void i915_ppgtt_release(struct kref *kref) +void i915_vm_release(struct kref *kref) { - struct i915_hw_ppgtt *ppgtt = - container_of(kref, struct i915_hw_ppgtt, ref); + struct i915_address_space *vm = + container_of(kref, struct i915_address_space, ref); - trace_i915_ppgtt_release(&ppgtt->vm); + GEM_BUG_ON(i915_is_ggtt(vm)); + trace_i915_ppgtt_release(vm); - ppgtt_destroy_vma(&ppgtt->vm); + ppgtt_destroy_vma(vm); - GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list)); - GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list)); + GEM_BUG_ON(!list_empty(&vm->bound_list)); + GEM_BUG_ON(!list_empty(&vm->unbound_list)); - ppgtt->vm.cleanup(&ppgtt->vm); - i915_address_space_fini(&ppgtt->vm); - kfree(ppgtt); + vm->cleanup(vm); + i915_address_space_fini(vm); + + kfree(vm); } /* Certain Gen5 chipsets require require idling the GPU before @@ -2788,7 +2789,7 @@ static int init_aliasing_ppgtt(struct drm_i915_private *i915) return 0; err_ppgtt: - i915_ppgtt_put(ppgtt); + i915_vm_put(&ppgtt->vm); return err; } @@ -2801,7 +2802,7 @@ static void fini_aliasing_ppgtt(struct drm_i915_private *i915) if (!ppgtt) return; - i915_ppgtt_put(ppgtt); + i915_vm_put(&ppgtt->vm); ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 97700a37c12b..3b6336ef64f2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -293,6 +293,8 @@ struct pagestash { }; struct i915_address_space { + struct kref ref; + struct drm_mm mm; struct drm_i915_private *i915; struct device *dma; @@ -412,7 +414,6 @@ struct i915_ggtt { struct i915_hw_ppgtt { struct i915_address_space vm; - struct kref ref; intel_engine_mask_t pd_dirty_engines; union { @@ -582,10 +583,19 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n) static inline struct i915_ggtt * i915_vm_to_ggtt(struct i915_address_space *vm) { + BUILD_BUG_ON(offsetof(struct i915_ggtt, vm)); GEM_BUG_ON(!i915_is_ggtt(vm)); return container_of(vm, struct i915_ggtt, vm); } +static inline struct i915_hw_ppgtt * +i915_vm_to_ppgtt(struct i915_address_space *vm) +{ + BUILD_BUG_ON(offsetof(struct i915_hw_ppgtt, vm)); + GEM_BUG_ON(i915_is_ggtt(vm)); + return container_of(vm, struct i915_hw_ppgtt, vm); +} + #define INTEL_MAX_PPAT_ENTRIES 8 #define INTEL_PPAT_PERFECT_MATCH (~0U) @@ -628,18 +638,19 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv); struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv); -void i915_ppgtt_release(struct kref *kref); -static inline struct i915_hw_ppgtt *i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) +static inline struct i915_address_space * +i915_vm_get(struct i915_address_space *vm) { - kref_get(&ppgtt->ref); - return ppgtt; + kref_get(&vm->ref); + return vm; } -static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) +void i915_vm_release(struct kref *kref); + +static inline void i915_vm_put(struct i915_address_space *vm) { - if (ppgtt) - kref_put(&ppgtt->ref, i915_ppgtt_release); + kref_put(&vm->ref, i915_vm_release); } int gen6_ppgtt_pin(struct i915_hw_ppgtt *base); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index a523bf050a25..dc026d5cd7a0 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1431,7 +1431,7 @@ static void gem_record_rings(struct i915_gpu_state *error) struct i915_gem_context *ctx = request->gem_context; struct intel_ring *ring; - ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm; + ee->vm = ctx->vm ?: &ggtt->vm; record_context(&ee->context, ctx); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 83b389e34b50..5c8cfaa70d72 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -977,7 +977,7 @@ DECLARE_EVENT_CLASS(i915_context, __entry->dev = ctx->i915->drm.primary->index; __entry->ctx = ctx; __entry->hw_id = ctx->hw_id; - __entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL; + __entry->vm = ctx->vm; ), TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u", diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index f1e95eaf6923..35db67757432 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -209,7 +209,7 @@ static int igt_ppgtt_alloc(void *arg) err_ppgtt_cleanup: mutex_lock(&dev_priv->drm.struct_mutex); - i915_ppgtt_put(ppgtt); + i915_vm_put(&ppgtt->vm); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } @@ -1021,7 +1021,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time); - i915_ppgtt_put(ppgtt); + i915_vm_put(&ppgtt->vm); out_unlock: mutex_unlock(&dev_priv->drm.struct_mutex); @@ -1251,7 +1251,6 @@ static int exercise_mock(struct drm_i915_private *i915, { const u64 limit = totalram_pages() << PAGE_SHIFT; struct i915_gem_context *ctx; - struct i915_hw_ppgtt *ppgtt; IGT_TIMEOUT(end_time); int err; @@ -1259,10 +1258,7 @@ static int exercise_mock(struct drm_i915_private *i915, if (!ctx) return -ENOMEM; - ppgtt = ctx->ppgtt; - GEM_BUG_ON(!ppgtt); - - err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time); + err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time); mock_context_close(ctx); return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index dfaa5bc52ecc..11278bac3a24 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -754,8 +754,7 @@ out_unlock: static struct i915_vma *recursive_batch(struct drm_i915_private *i915) { struct i915_gem_context *ctx = i915->kernel_context; - struct i915_address_space *vm = - ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; + struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct drm_i915_gem_object *obj; const int gen = INTEL_GEN(i915); struct i915_vma *vma; diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 6f3e41c0cb3f..a166d9405a94 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -38,7 +38,7 @@ static bool assert_vma(struct i915_vma *vma, { bool ok = true; - if (vma->vm != &ctx->ppgtt->vm) { + if (vma->vm != ctx->vm) { pr_err("VMA created with wrong VM\n"); ok = false; } @@ -113,7 +113,7 @@ static int create_vmas(struct drm_i915_private *i915, list_for_each_entry(obj, objects, st_link) { for (pinned = 0; pinned <= 1; pinned++) { list_for_each_entry(ctx, contexts, link) { - struct i915_address_space *vm = &ctx->ppgtt->vm; + struct i915_address_space *vm = ctx->vm; struct i915_vma *vma; int err; diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 3ea77c0ca678..1e59b543cf27 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -89,17 +89,16 @@ igt_spinner_create_request(struct igt_spinner *spin, struct intel_engine_cs *engine, u32 arbitration_command) { - struct i915_address_space *vm = &ctx->ppgtt->vm; struct i915_request *rq = NULL; struct i915_vma *hws, *vma; u32 *batch; int err; - vma = i915_vma_instance(spin->obj, vm, NULL); + vma = i915_vma_instance(spin->obj, ctx->vm, NULL); if (IS_ERR(vma)) return ERR_CAST(vma); - hws = i915_vma_instance(spin->hws, vm, NULL); + hws = i915_vma_instance(spin->hws, ctx->vm, NULL); if (IS_ERR(hws)) return ERR_CAST(hws); diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index cd83929fde8e..9e61c2f06cc9 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -65,7 +65,6 @@ mock_ppgtt(struct drm_i915_private *i915, if (!ppgtt) return NULL; - kref_init(&ppgtt->ref); ppgtt->vm.i915 = i915; ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); ppgtt->vm.file = ERR_PTR(-ENODEV); -- cgit v1.2.3 From ab53497b57573e0a1b2b5349651108fd69c28a2e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 11 Jun 2019 10:12:38 +0100 Subject: drm/i915: Rename i915_hw_ppgtt to i915_ppgtt Keeping the _hw_ in there does not help to distinguish it from its only brethren i915_ggtt, so drop it. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190611091238.15808-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 8 +-- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 12 ++-- drivers/gpu/drm/i915/gem/selftests/mock_context.c | 2 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 4 +- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 5 +- drivers/gpu/drm/i915/gvt/scheduler.c | 6 +- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 78 +++++++++++------------ drivers/gpu/drm/i915/i915_gem_gtt.h | 28 ++++---- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 6 +- drivers/gpu/drm/i915/selftests/mock_gtt.c | 6 +- drivers/gpu/drm/i915/selftests/mock_gtt.h | 4 +- 12 files changed, 78 insertions(+), 83 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 75b0d1dbb215..c86ca9f21532 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -513,7 +513,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) return ctx; if (HAS_FULL_PPGTT(dev_priv)) { - struct i915_hw_ppgtt *ppgtt; + struct i915_ppgtt *ppgtt; ppgtt = i915_ppgtt_create(dev_priv); if (IS_ERR(ppgtt)) { @@ -818,7 +818,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_vm_control *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; - struct i915_hw_ppgtt *ppgtt; + struct i915_ppgtt *ppgtt; int err; if (!HAS_FULL_PPGTT(i915)) @@ -1037,7 +1037,7 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data) int i; if (i915_vm_is_4lvl(vm)) { - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4); cs = intel_ring_begin(rq, 6); @@ -1054,7 +1054,7 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data) *cs++ = MI_NOOP; intel_ring_advance(rq, cs); } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); if (IS_ERR(cs)) diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 232d5cf4396c..73e667b31cc4 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -368,7 +368,7 @@ static int igt_check_page_sizes(struct i915_vma *vma) static int igt_mock_exhaust_device_supported_pages(void *arg) { - struct i915_hw_ppgtt *ppgtt = arg; + struct i915_ppgtt *ppgtt = arg; struct drm_i915_private *i915 = ppgtt->vm.i915; unsigned int saved_mask = INTEL_INFO(i915)->page_sizes; struct drm_i915_gem_object *obj; @@ -447,7 +447,7 @@ out_device: static int igt_mock_ppgtt_misaligned_dma(void *arg) { - struct i915_hw_ppgtt *ppgtt = arg; + struct i915_ppgtt *ppgtt = arg; struct drm_i915_private *i915 = ppgtt->vm.i915; unsigned long supported = INTEL_INFO(i915)->page_sizes; struct drm_i915_gem_object *obj; @@ -575,7 +575,7 @@ out_put: } static void close_object_list(struct list_head *objects, - struct i915_hw_ppgtt *ppgtt) + struct i915_ppgtt *ppgtt) { struct drm_i915_gem_object *obj, *on; @@ -595,7 +595,7 @@ static void close_object_list(struct list_head *objects, static int igt_mock_ppgtt_huge_fill(void *arg) { - struct i915_hw_ppgtt *ppgtt = arg; + struct i915_ppgtt *ppgtt = arg; struct drm_i915_private *i915 = ppgtt->vm.i915; unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT; unsigned long page_num; @@ -716,7 +716,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg) static int igt_mock_ppgtt_64K(void *arg) { - struct i915_hw_ppgtt *ppgtt = arg; + struct i915_ppgtt *ppgtt = arg; struct drm_i915_private *i915 = ppgtt->vm.i915; struct drm_i915_gem_object *obj; const struct object_info { @@ -1683,7 +1683,7 @@ int i915_gem_huge_page_mock_selftests(void) SUBTEST(igt_mock_ppgtt_64K), }; struct drm_i915_private *dev_priv; - struct i915_hw_ppgtt *ppgtt; + struct i915_ppgtt *ppgtt; int err; dev_priv = mock_gem_device(); diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index 82371c60d4aa..be8974ccff24 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -38,7 +38,7 @@ mock_context(struct drm_i915_private *i915, goto err_engines; if (name) { - struct i915_hw_ppgtt *ppgtt; + struct i915_ppgtt *ppgtt; ctx->name = kstrdup(name, GFP_KERNEL); if (!ctx->name) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 579d605fbfc3..b8f5592da18f 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1621,7 +1621,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq) static int emit_pdps(struct i915_request *rq) { const struct intel_engine_cs * const engine = rq->engine; - struct i915_hw_ppgtt * const ppgtt = + struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->gem_context->vm); int err, i; u32 *cs; @@ -2825,7 +2825,7 @@ static void execlists_init_reg_state(u32 *regs, struct intel_engine_cs *engine, struct intel_ring *ring) { - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->gem_context->vm); + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->gem_context->vm); bool rcs = engine->class == RENDER_CLASS; u32 base = engine->mmio_base; diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index 6369cc5e22b4..c834d016c965 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1508,8 +1508,7 @@ static const struct intel_context_ops ring_context_ops = { .destroy = ring_context_destroy, }; -static int load_pd_dir(struct i915_request *rq, - const struct i915_hw_ppgtt *ppgtt) +static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt) { const struct intel_engine_cs * const engine = rq->engine; u32 *cs; @@ -1713,7 +1712,7 @@ static int switch_context(struct i915_request *rq) GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); if (vm) { - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); int loops; /* diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index ce6d2f06e8f7..080cb740e028 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -368,7 +368,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, struct i915_gem_context *ctx) { struct intel_vgpu_mm *mm = workload->shadow_mm; - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm); + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm); int i = 0; if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) @@ -1102,7 +1102,7 @@ err: static void i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s, - struct i915_hw_ppgtt *ppgtt) + struct i915_ppgtt *ppgtt) { int i; @@ -1160,7 +1160,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, static void i915_context_ppgtt_root_save(struct intel_vgpu_submission *s, - struct i915_hw_ppgtt *ppgtt) + struct i915_ppgtt *ppgtt) { int i; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6e7e72ab5360..0ea7f78ae227 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -792,7 +792,7 @@ struct i915_gem_mm { struct vfsmount *gemfs; /** PPGTT used for aliasing the PPGTT with the GTT */ - struct i915_hw_ppgtt *aliasing_ppgtt; + struct i915_ppgtt *aliasing_ppgtt; struct notifier_block oom_notifier; struct notifier_block vmap_notifier; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 69eb0bf7670b..5f7268c33ac0 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -803,7 +803,7 @@ static void gen8_initialize_pml4(struct i915_address_space *vm, * context switching/execlist queuing code takes extra steps * to ensure that tlbs are flushed. */ -static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) +static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt) { ppgtt->pd_dirty_engines = ALL_ENGINES; } @@ -944,7 +944,7 @@ static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4, static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, u64 start, u64 length) { - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_pml4 *pml4 = &ppgtt->pml4; struct i915_page_directory_pointer *pdp; unsigned int pml4e; @@ -997,7 +997,7 @@ static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start) } static __always_inline bool -gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, +gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt, struct i915_page_directory_pointer *pdp, struct sgt_dma *iter, struct gen8_insert_pte *idx, @@ -1058,7 +1058,7 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, enum i915_cache_level cache_level, u32 flags) { - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct sgt_dma iter = sgt_dma(vma); struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); @@ -1192,7 +1192,7 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, enum i915_cache_level cache_level, u32 flags) { - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct sgt_dma iter = sgt_dma(vma); struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps; @@ -1291,7 +1291,7 @@ free_scratch_page: return ret; } -static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) +static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) { struct i915_address_space *vm = &ppgtt->vm; struct drm_i915_private *dev_priv = vm->i915; @@ -1352,7 +1352,7 @@ static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, free_pdp(vm, pdp); } -static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) +static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt) { int i; @@ -1369,7 +1369,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { struct drm_i915_private *i915 = vm->i915; - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); if (intel_vgpu_active(i915)) gen8_ppgtt_notify_vgt(ppgtt, false); @@ -1501,7 +1501,7 @@ static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm, static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, u64 start, u64 length) { - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_pml4 *pml4 = &ppgtt->pml4; struct i915_page_directory_pointer *pdp; u64 from = start; @@ -1557,7 +1557,7 @@ unwind: return -ENOMEM; } -static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt) +static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) { struct i915_address_space *vm = &ppgtt->vm; struct i915_page_directory_pointer *pdp = &ppgtt->pdp; @@ -1590,7 +1590,7 @@ unwind: } static void ppgtt_init(struct drm_i915_private *i915, - struct i915_hw_ppgtt *ppgtt) + struct i915_ppgtt *ppgtt) { ppgtt->vm.i915 = i915; ppgtt->vm.dma = &i915->drm.pdev->dev; @@ -1611,9 +1611,9 @@ static void ppgtt_init(struct drm_i915_private *i915, * space. * */ -static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) +static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) { - struct i915_hw_ppgtt *ppgtt; + struct i915_ppgtt *ppgtt; int err; ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); @@ -1683,7 +1683,7 @@ err_free: } /* Write pde (index) from the page directory @pd to the page table @pt */ -static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt, +static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt, const unsigned int pde, const struct i915_page_table *pt) { @@ -1740,7 +1740,7 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv) static void gen6_ppgtt_clear_range(struct i915_address_space *vm, u64 start, u64 length) { - struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); unsigned int first_entry = start / I915_GTT_PAGE_SIZE; unsigned int pde = first_entry / GEN6_PTES; unsigned int pte = first_entry % GEN6_PTES; @@ -1780,7 +1780,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, enum i915_cache_level cache_level, u32 flags) { - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE; unsigned act_pt = first_entry / GEN6_PTES; unsigned act_pte = first_entry % GEN6_PTES; @@ -1818,7 +1818,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, static int gen6_alloc_va_range(struct i915_address_space *vm, u64 start, u64 length) { - struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); struct i915_page_table *pt; intel_wakeref_t wakeref; u64 from = start; @@ -1878,7 +1878,7 @@ unwind_out: return -ENOMEM; } -static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt) +static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt) { struct i915_address_space * const vm = &ppgtt->base.vm; struct i915_page_table *unused; @@ -1913,7 +1913,7 @@ static void gen6_ppgtt_free_scratch(struct i915_address_space *vm) cleanup_scratch_page(vm); } -static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt) +static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt) { struct i915_page_table *pt; u32 pde; @@ -1971,7 +1971,7 @@ static const struct i915_vma_ops nop_vma_ops = { static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { - struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); struct gen6_ppgtt_cleanup_work *work = ppgtt->work; /* FIXME remove the struct_mutex to bring the locking under control */ @@ -2002,7 +2002,7 @@ static int pd_vma_bind(struct i915_vma *vma, u32 unused) { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); - struct gen6_hw_ppgtt *ppgtt = vma->private; + struct gen6_ppgtt *ppgtt = vma->private; u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE; struct i915_page_table *pt; unsigned int pde; @@ -2021,7 +2021,7 @@ static int pd_vma_bind(struct i915_vma *vma, static void pd_vma_unbind(struct i915_vma *vma) { - struct gen6_hw_ppgtt *ppgtt = vma->private; + struct gen6_ppgtt *ppgtt = vma->private; struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt; struct i915_page_table *pt; unsigned int pde; @@ -2048,7 +2048,7 @@ static const struct i915_vma_ops pd_vma_ops = { .unbind_vma = pd_vma_unbind, }; -static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) +static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) { struct drm_i915_private *i915 = ppgtt->base.vm.i915; struct i915_ggtt *ggtt = &i915->ggtt; @@ -2083,9 +2083,9 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) return vma; } -int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) +int gen6_ppgtt_pin(struct i915_ppgtt *base) { - struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); int err; GEM_BUG_ON(ppgtt->base.vm.closed); @@ -2117,9 +2117,9 @@ unpin: return err; } -void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) +void gen6_ppgtt_unpin(struct i915_ppgtt *base) { - struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); GEM_BUG_ON(!ppgtt->pin_count); if (--ppgtt->pin_count) @@ -2128,9 +2128,9 @@ void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) i915_vma_unpin(ppgtt->vma); } -void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base) +void gen6_ppgtt_unpin_all(struct i915_ppgtt *base) { - struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); if (!ppgtt->pin_count) return; @@ -2139,10 +2139,10 @@ void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base) i915_vma_unpin(ppgtt->vma); } -static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) +static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) { struct i915_ggtt * const ggtt = &i915->ggtt; - struct gen6_hw_ppgtt *ppgtt; + struct gen6_ppgtt *ppgtt; int err; ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); @@ -2231,8 +2231,8 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) return 0; } -static struct i915_hw_ppgtt * -__hw_ppgtt_create(struct drm_i915_private *i915) +static struct i915_ppgtt * +__ppgtt_create(struct drm_i915_private *i915) { if (INTEL_GEN(i915) < 8) return gen6_ppgtt_create(i915); @@ -2240,12 +2240,12 @@ __hw_ppgtt_create(struct drm_i915_private *i915) return gen8_ppgtt_create(i915); } -struct i915_hw_ppgtt * +struct i915_ppgtt * i915_ppgtt_create(struct drm_i915_private *i915) { - struct i915_hw_ppgtt *ppgtt; + struct i915_ppgtt *ppgtt; - ppgtt = __hw_ppgtt_create(i915); + ppgtt = __ppgtt_create(i915); if (IS_ERR(ppgtt)) return ppgtt; @@ -2657,7 +2657,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, pte_flags |= PTE_READ_ONLY; if (flags & I915_VMA_LOCAL_BIND) { - struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; + struct i915_ppgtt *appgtt = i915->mm.aliasing_ppgtt; if (!(vma->flags & I915_VMA_LOCAL_BIND)) { ret = appgtt->vm.allocate_va_range(&appgtt->vm, @@ -2756,7 +2756,7 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node, static int init_aliasing_ppgtt(struct drm_i915_private *i915) { struct i915_ggtt *ggtt = &i915->ggtt; - struct i915_hw_ppgtt *ppgtt; + struct i915_ppgtt *ppgtt; int err; ppgtt = i915_ppgtt_create(i915); @@ -2796,7 +2796,7 @@ err_ppgtt: static void fini_aliasing_ppgtt(struct drm_i915_private *i915) { struct i915_ggtt *ggtt = &i915->ggtt; - struct i915_hw_ppgtt *ppgtt; + struct i915_ppgtt *ppgtt; ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt); if (!ppgtt) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 3b6336ef64f2..89437d0a721c 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -412,7 +412,7 @@ struct i915_ggtt { struct drm_mm_node uc_fw; }; -struct i915_hw_ppgtt { +struct i915_ppgtt { struct i915_address_space vm; intel_engine_mask_t pd_dirty_engines; @@ -423,8 +423,8 @@ struct i915_hw_ppgtt { }; }; -struct gen6_hw_ppgtt { - struct i915_hw_ppgtt base; +struct gen6_ppgtt { + struct i915_ppgtt base; struct i915_vma *vma; gen6_pte_t __iomem *pd_addr; @@ -435,11 +435,11 @@ struct gen6_hw_ppgtt { struct gen6_ppgtt_cleanup_work *work; }; -#define __to_gen6_ppgtt(base) container_of(base, struct gen6_hw_ppgtt, base) +#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base) -static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base) +static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base) { - BUILD_BUG_ON(offsetof(struct gen6_hw_ppgtt, base)); + BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base)); return __to_gen6_ppgtt(base); } @@ -575,7 +575,7 @@ static inline u64 gen8_pte_count(u64 address, u64 length) } static inline dma_addr_t -i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n) +i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) { return px_dma(ppgtt->pdp.page_directory[n]); } @@ -588,12 +588,12 @@ i915_vm_to_ggtt(struct i915_address_space *vm) return container_of(vm, struct i915_ggtt, vm); } -static inline struct i915_hw_ppgtt * +static inline struct i915_ppgtt * i915_vm_to_ppgtt(struct i915_address_space *vm) { - BUILD_BUG_ON(offsetof(struct i915_hw_ppgtt, vm)); + BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm)); GEM_BUG_ON(i915_is_ggtt(vm)); - return container_of(vm, struct i915_hw_ppgtt, vm); + return container_of(vm, struct i915_ppgtt, vm); } #define INTEL_MAX_PPAT_ENTRIES 8 @@ -637,7 +637,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv); -struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv); +struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv); static inline struct i915_address_space * i915_vm_get(struct i915_address_space *vm) @@ -653,9 +653,9 @@ static inline void i915_vm_put(struct i915_address_space *vm) kref_put(&vm->ref, i915_vm_release); } -int gen6_ppgtt_pin(struct i915_hw_ppgtt *base); -void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base); -void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base); +int gen6_ppgtt_pin(struct i915_ppgtt *base); +void gen6_ppgtt_unpin(struct i915_ppgtt *base); +void gen6_ppgtt_unpin_all(struct i915_ppgtt *base); void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv); void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 35db67757432..a557e77d9c54 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -148,7 +148,7 @@ err: static int igt_ppgtt_alloc(void *arg) { struct drm_i915_private *dev_priv = arg; - struct i915_hw_ppgtt *ppgtt; + struct i915_ppgtt *ppgtt; u64 size, last, limit; int err = 0; @@ -157,7 +157,7 @@ static int igt_ppgtt_alloc(void *arg) if (!HAS_PPGTT(dev_priv)) return 0; - ppgtt = __hw_ppgtt_create(dev_priv); + ppgtt = __ppgtt_create(dev_priv); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -999,7 +999,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, unsigned long end_time)) { struct drm_file *file; - struct i915_hw_ppgtt *ppgtt; + struct i915_ppgtt *ppgtt; IGT_TIMEOUT(end_time); int err; diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 9e61c2f06cc9..f625c307a406 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -55,11 +55,9 @@ static void mock_cleanup(struct i915_address_space *vm) { } -struct i915_hw_ppgtt * -mock_ppgtt(struct drm_i915_private *i915, - const char *name) +struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) { - struct i915_hw_ppgtt *ppgtt; + struct i915_ppgtt *ppgtt; ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); if (!ppgtt) diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h index 40d544bde1d5..3387393286de 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.h +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h @@ -28,8 +28,6 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt); void mock_fini_ggtt(struct i915_ggtt *ggtt); -struct i915_hw_ppgtt * -mock_ppgtt(struct drm_i915_private *i915, - const char *name); +struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name); #endif /* !__MOCK_GTT_H */ -- cgit v1.2.3 From 9937e16b2820d141df2ba9845c19d3b7bc31fd7b Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 11 Jun 2019 12:00:43 +0100 Subject: drm/i915/guc: Move intel_guc_reserved_gtt_size to intel_wopcm_guc_size Reduces pointer chasing and gets more to the point. v2: * Tidy whitespace. * Tidy comment. (Michal) Signed-off-by: Tvrtko Ursulin Suggested-by: Michal Wajdeczko Cc: Michal Wajdeczko Reviewed-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20190611110044.7742-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +- drivers/gpu/drm/i915/intel_guc.c | 17 ----------------- drivers/gpu/drm/i915/intel_guc.h | 1 - drivers/gpu/drm/i915/intel_wopcm.h | 15 +++++++++++++++ 4 files changed, 16 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 5f7268c33ac0..5ca3f83ea9a6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2831,7 +2831,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) * why. */ ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, - intel_guc_reserved_gtt_size(&dev_priv->guc)); + intel_wopcm_guc_size(&dev_priv->wopcm)); ret = intel_vgt_balloon(dev_priv); if (ret) diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 43232242d167..d45d97624402 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -686,23 +686,6 @@ err: return vma; } -/** - * intel_guc_reserved_gtt_size() - * @guc: intel_guc structure - * - * The GuC WOPCM mapping shadows the lower part of the GGTT, so if we are using - * GuC we can't have any objects pinned in that region. This function returns - * the size of the shadowed region. - * - * Returns: - * 0 if GuC is not present or not in use. - * Otherwise, the GuC WOPCM size. - */ -u32 intel_guc_reserved_gtt_size(struct intel_guc *guc) -{ - return guc_to_i915(guc)->wopcm.guc.size; -} - int intel_guc_reserve_ggtt_top(struct intel_guc *guc) { struct drm_i915_private *i915 = guc_to_i915(guc); diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index e07e4c69cf08..85c3b02a0c08 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -172,7 +172,6 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); int intel_guc_suspend(struct intel_guc *guc); int intel_guc_resume(struct intel_guc *guc); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); -u32 intel_guc_reserved_gtt_size(struct intel_guc *guc); int intel_guc_reserve_ggtt_top(struct intel_guc *guc); void intel_guc_release_ggtt_top(struct intel_guc *guc); diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h index 6298910a384c..114401971520 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.h +++ b/drivers/gpu/drm/i915/intel_wopcm.h @@ -24,6 +24,21 @@ struct intel_wopcm { } guc; }; +/** + * intel_wopcm_guc_size() + * @wopcm: intel_wopcm structure + * + * Returns size of the WOPCM shadowed region. + * + * Returns: + * 0 if GuC is not present or not in use. + * Otherwise, the GuC WOPCM size. + */ +static inline u32 intel_wopcm_guc_size(struct intel_wopcm *wopcm) +{ + return wopcm->guc.size; +} + void intel_wopcm_init_early(struct intel_wopcm *wopcm); int intel_wopcm_init(struct intel_wopcm *wopcm); int intel_wopcm_init_hw(struct intel_wopcm *wopcm); -- cgit v1.2.3 From 09a32cb7b45f7431e824b4778c89e4614dd841d0 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 11 Jun 2019 13:23:50 +0100 Subject: drm/i915: Make GuC GGTT reservation work on ggtt These functions operate on ggtt so make them take that directly as parameter. At the same time move the USES_GUC conditional down to intel_guc_reserve_ggtt_top for symmetry with intel_guc_reserved_gtt_size. v2: * Rename and move functions to be static in i915_gem_gtt.c (Michal) v3: * Add comment explaining reason for reservation, add assert and fix error message. (Michal) v4: * Fix checkpatch error. Signed-off-by: Tvrtko Ursulin Cc: Michal Wajdeczko Reviewed-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20190611122350.15060-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 43 +++++++++++++++++++++++++++++++------ drivers/gpu/drm/i915/intel_guc.c | 27 ----------------------- drivers/gpu/drm/i915/intel_guc.h | 2 -- 3 files changed, 36 insertions(+), 36 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 5ca3f83ea9a6..e70675bfb51d 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2808,6 +2808,32 @@ static void fini_aliasing_ppgtt(struct drm_i915_private *i915) ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; } +static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) +{ + u64 size; + int ret; + + if (!USES_GUC(ggtt->vm.i915)) + return 0; + + GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); + size = ggtt->vm.total - GUC_GGTT_TOP; + + ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, + GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE, + PIN_NOEVICT); + if (ret) + DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n"); + + return ret; +} + +static void ggtt_release_guc_top(struct i915_ggtt *ggtt) +{ + if (drm_mm_node_allocated(&ggtt->uc_fw)) + drm_mm_remove_node(&ggtt->uc_fw); +} + int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) { /* Let GEM Manage all of the aperture. @@ -2845,11 +2871,14 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) if (ret) return ret; - if (USES_GUC(dev_priv)) { - ret = intel_guc_reserve_ggtt_top(&dev_priv->guc); - if (ret) - goto err_reserve; - } + /* + * The upper portion of the GuC address space has a sizeable hole + * (several MB) that is inaccessible by GuC. Reserve this range within + * GGTT as it can comfortably hold GuC/HuC firmware images. + */ + ret = ggtt_reserve_guc_top(ggtt); + if (ret) + goto err_reserve; /* Clear any non-preallocated blocks */ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { @@ -2871,7 +2900,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) return 0; err_appgtt: - intel_guc_release_ggtt_top(&dev_priv->guc); + ggtt_release_guc_top(ggtt); err_reserve: drm_mm_remove_node(&ggtt->error_capture); return ret; @@ -2898,7 +2927,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) if (drm_mm_node_allocated(&ggtt->error_capture)) drm_mm_remove_node(&ggtt->error_capture); - intel_guc_release_ggtt_top(&dev_priv->guc); + ggtt_release_guc_top(ggtt); if (drm_mm_initialized(&ggtt->vm.mm)) { intel_vgt_deballoon(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index d45d97624402..c40a6efdd33a 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -685,30 +685,3 @@ err: i915_gem_object_put(obj); return vma; } - -int intel_guc_reserve_ggtt_top(struct intel_guc *guc) -{ - struct drm_i915_private *i915 = guc_to_i915(guc); - struct i915_ggtt *ggtt = &i915->ggtt; - u64 size; - int ret; - - size = ggtt->vm.total - GUC_GGTT_TOP; - - ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size, - GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE, - PIN_NOEVICT); - if (ret) - DRM_DEBUG_DRIVER("GuC: failed to reserve top of ggtt\n"); - - return ret; -} - -void intel_guc_release_ggtt_top(struct intel_guc *guc) -{ - struct drm_i915_private *i915 = guc_to_i915(guc); - struct i915_ggtt *ggtt = &i915->ggtt; - - if (drm_mm_node_allocated(&ggtt->uc_fw)) - drm_mm_remove_node(&ggtt->uc_fw); -} diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 85c3b02a0c08..08c906abdfa2 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -172,8 +172,6 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); int intel_guc_suspend(struct intel_guc *guc); int intel_guc_resume(struct intel_guc *guc); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); -int intel_guc_reserve_ggtt_top(struct intel_guc *guc); -void intel_guc_release_ggtt_top(struct intel_guc *guc); static inline bool intel_guc_is_loaded(struct intel_guc *guc) { -- cgit v1.2.3 From 4f338ac0b2fa59ef0c10cb828a82922e1c39aa64 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Mon, 10 Jun 2019 14:48:19 -0700 Subject: drm/i915/icl: use ranges for voltage level lookup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Spec shows voltage level 0 as 307.2, 312, or lower and suggests to use range checks. Prepare for having other frequencies in these ranges by not comparing the exact frequency. v2: invert checks by comparing biggest cdclk first (suggested by Ville) Signed-off-by: Lucas De Marchi Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190610214819.9703-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/intel_cdclk.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 6988c6cbc362..465a72d185ad 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -1865,21 +1865,12 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv, static u8 icl_calc_voltage_level(int cdclk) { - switch (cdclk) { - case 50000: - case 307200: - case 312000: - return 0; - case 556800: - case 552000: - return 1; - default: - MISSING_CASE(cdclk); - /* fall through */ - case 652800: - case 648000: + if (cdclk > 556800) return 2; - } + else if (cdclk > 312000) + return 1; + else + return 0; } static void icl_get_cdclk(struct drm_i915_private *dev_priv, -- cgit v1.2.3 From 63b1700b40d63a1369c0bc756d579bc242c658d7 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Mon, 10 Jun 2019 14:48:34 -0700 Subject: drm/i915/cnl: use ranges for voltage level lookup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Like was done for ICL, let's convert the voltage level lookup to use frequency ranges rather than individual frequencies. For deciding the voltage, the individual value doesn't really matter. Signed-off-by: Lucas De Marchi Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190610214834.9789-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/intel_cdclk.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 465a72d185ad..80e22507cd34 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -1531,15 +1531,12 @@ static int cnl_calc_cdclk(int min_cdclk) static u8 cnl_calc_voltage_level(int cdclk) { - switch (cdclk) { - default: - case 168000: - return 0; - case 336000: - return 1; - case 528000: + if (cdclk > 336000) return 2; - } + else if (cdclk > 168000) + return 1; + else + return 0; } static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv, -- cgit v1.2.3 From 522d47cff1128fa0a1edbdf47eb211633a171ab8 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Mon, 10 Jun 2019 14:48:47 -0700 Subject: drm/i915/skl: use ranges for voltage level lookup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Like was done for ICL, let's convert the voltage level lookup to use frequency ranges rather than individual frequencies. For deciding the voltage, the individual value doesn't really matter. Signed-off-by: Lucas De Marchi Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190610214847.9865-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/intel_cdclk.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 80e22507cd34..8993ab283562 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -809,20 +809,14 @@ static int skl_calc_cdclk(int min_cdclk, int vco) static u8 skl_calc_voltage_level(int cdclk) { - switch (cdclk) { - default: - case 308571: - case 337500: - return 0; - case 450000: - case 432000: - return 1; - case 540000: - return 2; - case 617143: - case 675000: + if (cdclk > 540000) return 3; - } + else if (cdclk > 450000) + return 2; + else if (cdclk > 337500) + return 1; + else + return 0; } static void skl_dpll0_update(struct drm_i915_private *dev_priv, -- cgit v1.2.3 From ea60f4bdc4330fc3be90e6a8634b9b912046b09d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 12 Jun 2019 10:31:11 +0100 Subject: drm/i915: Add a label for config DRM_I915_SPIN_REQUEST If we don't give it a label, it does not appear as a configuration option. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190612093111.11684-9-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Kconfig.profile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile index 4fd1ea639d0f..48df8889a88a 100644 --- a/drivers/gpu/drm/i915/Kconfig.profile +++ b/drivers/gpu/drm/i915/Kconfig.profile @@ -13,7 +13,7 @@ config DRM_I915_USERFAULT_AUTOSUSPEND runtime pm autosuspend delay tunable. config DRM_I915_SPIN_REQUEST - int + int "Busywait for request completion (us)" default 5 # microseconds help Before sleeping waiting for a request (GPU operation) to complete, -- cgit v1.2.3 From 33df8a7697a08ec96811a1e9bbce45c7cdbbc316 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 12 Jun 2019 09:52:46 +0100 Subject: drm/i915: Prevent lock-cycles between GPU waits and GPU resets We cannot allow ourselves to wait on the GPU while holding any lock as we may need to reset the GPU. While there is not an explicit lock between the two operations, lockdep cannot detect the dependency. So let's tell lockdep about the wait/reset dependency with an explicit lockmap. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Tvrtko Ursulin Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190612085246.16374-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_reset.c | 5 ++++- drivers/gpu/drm/i915/i915_drv.h | 8 ++++++++ drivers/gpu/drm/i915/i915_gem.c | 3 +++ drivers/gpu/drm/i915/i915_request.c | 2 ++ drivers/gpu/drm/i915/selftests/mock_gem_device.c | 2 ++ 5 files changed, 19 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 60d24110af80..6368b37f26d1 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -978,10 +978,11 @@ void i915_reset(struct drm_i915_private *i915, might_sleep(); GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); + lock_map_acquire(&i915->gt.reset_lockmap); /* Clear any previous failed attempts at recovery. Time to try again. */ if (!__i915_gem_unset_wedged(i915)) - return; + goto unlock; if (reason) dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); @@ -1029,6 +1030,8 @@ void i915_reset(struct drm_i915_private *i915, finish: reset_finish(i915); +unlock: + lock_map_release(&i915->gt.reset_lockmap); return; taint: diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0ea7f78ae227..2e2cfbfa0ced 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1919,6 +1919,14 @@ struct drm_i915_private { ktime_t last_init_time; struct i915_vma *scratch; + + /* + * We must never wait on the GPU while holding a lock as we + * may need to perform a GPU reset. So while we don't need to + * serialise wait/reset with an explicit lock, we do want + * lockdep to detect potential dependency cycles. + */ + struct lockdep_map reset_lockmap; } gt; struct { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e980c1ee3dcf..24f0f3db1bfb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1782,6 +1782,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915) int i915_gem_init_early(struct drm_i915_private *dev_priv) { + static struct lock_class_key reset_key; int err; intel_gt_pm_init(dev_priv); @@ -1789,6 +1790,8 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) INIT_LIST_HEAD(&dev_priv->gt.active_rings); INIT_LIST_HEAD(&dev_priv->gt.closed_vma); spin_lock_init(&dev_priv->gt.closed_lock); + lockdep_init_map(&dev_priv->gt.reset_lockmap, + "i915.reset", &reset_key, 0); i915_gem_init__mm(dev_priv); i915_gem_init__pm(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index e9b59eea4f10..1cbc3ef4fc27 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1444,6 +1444,7 @@ long i915_request_wait(struct i915_request *rq, return -ETIME; trace_i915_request_wait_begin(rq, flags); + lock_map_acquire(&rq->i915->gt.reset_lockmap); /* * Optimistic spin before touching IRQs. @@ -1517,6 +1518,7 @@ long i915_request_wait(struct i915_request *rq, dma_fence_remove_callback(&rq->fence, &wait.cb); out: + lock_map_release(&rq->i915->gt.reset_lockmap); trace_i915_request_wait_end(rq); return timeout; } diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index b7f3fbb4ae89..1e9ffced78c1 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -130,6 +130,7 @@ static struct dev_pm_domain pm_domain = { struct drm_i915_private *mock_gem_device(void) { + static struct lock_class_key reset_key; struct drm_i915_private *i915; struct pci_dev *pdev; int err; @@ -204,6 +205,7 @@ struct drm_i915_private *mock_gem_device(void) INIT_LIST_HEAD(&i915->gt.active_rings); INIT_LIST_HEAD(&i915->gt.closed_vma); spin_lock_init(&i915->gt.closed_lock); + lockdep_init_map(&i915->gt.reset_lockmap, "i915.reset", &reset_key, 0); mutex_lock(&i915->drm.struct_mutex); -- cgit v1.2.3 From b16c7ed95caf270075c52faad0af8f4cb57ae979 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 4 Jun 2019 23:09:29 +0300 Subject: drm/i915: Do not touch the PCH SSC reference if a PLL is using it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Our PCH refclk init code currently assumes that the PCH SSC reference can only be used for FDI. That is not true and it can be used by SPLL/WRPLL for eDP SSC or clock bending as well. Before we go reconfiguring it let's make sure no PLL is currently using the PCH SSC reference. For some reason the hw is not particularly upset about losing the clock if we immediately follow up with a modeset. Can't really explain why nothing times out during the crtc disable at least, but that's what the logs say. With fastboot the story is quite different and we lose the entire display if we turn off the PCH SSC reference when it's still being used. Since we totally skip configuring the PCH SSC reference it may not be in the proper state for FDI. Hopefully that won't be a problem in practice. We really should move this code to be part of the modeset seqeuence and properly deal with the potentially conflicting requirements imposed on PLL reference clocks. But that requires actual work. Let's toss in a TODO for that. v2: Pimp the commit message with the fastboot vs. not details Cc: Julius B. Cc: Johannes Krampf Tested-by: Johannes Krampf Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=108773 Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190604200933.29417-1-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_display.c | 79 ++++++++++++++++++++++++++++++++++-- 2 files changed, 77 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c87d288abb19..4f8c429f31b3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7512,6 +7512,7 @@ enum { #define ILK_eDP_A_DISABLE (1 << 24) #define HSW_CDCLK_LIMIT (1 << 24) #define ILK_DESKTOP (1 << 23) +#define HSW_CPU_SSC_ENABLE (1 << 21) #define ILK_DSPCLK_GATE_D _MMIO(0x42020) #define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 62fa573f90e8..352c42826cb1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9126,22 +9126,95 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) #undef BEND_IDX +static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) +{ + u32 fuse_strap = I915_READ(FUSE_STRAP); + u32 ctl = I915_READ(SPLL_CTL); + + if ((ctl & SPLL_PLL_ENABLE) == 0) + return false; + + if ((ctl & SPLL_PLL_REF_MASK) == SPLL_PLL_SSC && + (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) + return true; + + if (IS_BROADWELL(dev_priv) && + (ctl & SPLL_PLL_REF_MASK) == SPLL_PLL_NON_SSC) + return true; + + return false; +} + +static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, + enum intel_dpll_id id) +{ + u32 fuse_strap = I915_READ(FUSE_STRAP); + u32 ctl = I915_READ(WRPLL_CTL(id)); + + if ((ctl & WRPLL_PLL_ENABLE) == 0) + return false; + + if ((ctl & WRPLL_PLL_REF_MASK) == WRPLL_PLL_SSC) + return true; + + if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && + (ctl & WRPLL_PLL_REF_MASK) == WRPLL_PLL_NON_SSC && + (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) + return true; + + return false; +} + static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; - bool has_vga = false; + bool pch_ssc_in_use = false; + bool has_fdi = false; for_each_intel_encoder(&dev_priv->drm, encoder) { switch (encoder->type) { case INTEL_OUTPUT_ANALOG: - has_vga = true; + has_fdi = true; break; default: break; } } - if (has_vga) { + /* + * The BIOS may have decided to use the PCH SSC + * reference so we must not disable it until the + * relevant PLLs have stopped relying on it. We'll + * just leave the PCH SSC reference enabled in case + * any active PLL is using it. It will get disabled + * after runtime suspend if we don't have FDI. + * + * TODO: Move the whole reference clock handling + * to the modeset sequence proper so that we can + * actually enable/disable/reconfigure these things + * safely. To do that we need to introduce a real + * clock hierarchy. That would also allow us to do + * clock bending finally. + */ + if (spll_uses_pch_ssc(dev_priv)) { + DRM_DEBUG_KMS("SPLL using PCH SSC\n"); + pch_ssc_in_use = true; + } + + if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { + DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n"); + pch_ssc_in_use = true; + } + + if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { + DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n"); + pch_ssc_in_use = true; + } + + if (pch_ssc_in_use) + return; + + if (has_fdi) { lpt_bend_clkout_dp(dev_priv, 0); lpt_enable_clkout_dp(dev_priv, true, true); } else { -- cgit v1.2.3 From 4a95e36f0357ab4de6aeadfbe78ffe6af75b2fee Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 10 Jun 2019 16:36:09 +0300 Subject: drm/i915: Rename HSW/BDW PLL bits MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Give the PLL control register bits better names on HSW/BDW. v2: Fix the copy paste fails in SPLL_REF defines (Maarten) Cc: Maarten Lankhorst Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190610133609.27288-1-ville.syrjala@linux.intel.com Acked-by: Maarten Lankhorst #irc --- drivers/gpu/drm/i915/i915_reg.h | 32 ++++++++++++++++++++------------ drivers/gpu/drm/i915/intel_ddi.c | 16 ++++++++-------- drivers/gpu/drm/i915/intel_display.c | 8 ++++---- drivers/gpu/drm/i915/intel_dpll_mgr.c | 4 ++-- 4 files changed, 34 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4f8c429f31b3..665dfc177528 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9466,24 +9466,28 @@ enum skl_power_gate { /* SPLL */ #define SPLL_CTL _MMIO(0x46020) #define SPLL_PLL_ENABLE (1 << 31) -#define SPLL_PLL_SSC (1 << 28) -#define SPLL_PLL_NON_SSC (2 << 28) -#define SPLL_PLL_LCPLL (3 << 28) -#define SPLL_PLL_REF_MASK (3 << 28) -#define SPLL_PLL_FREQ_810MHz (0 << 26) -#define SPLL_PLL_FREQ_1350MHz (1 << 26) -#define SPLL_PLL_FREQ_2700MHz (2 << 26) -#define SPLL_PLL_FREQ_MASK (3 << 26) +#define SPLL_REF_BCLK (0 << 28) +#define SPLL_REF_MUXED_SSC (1 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */ +#define SPLL_REF_NON_SSC_HSW (2 << 28) +#define SPLL_REF_PCH_SSC_BDW (2 << 28) +#define SPLL_REF_LCPLL (3 << 28) +#define SPLL_REF_MASK (3 << 28) +#define SPLL_FREQ_810MHz (0 << 26) +#define SPLL_FREQ_1350MHz (1 << 26) +#define SPLL_FREQ_2700MHz (2 << 26) +#define SPLL_FREQ_MASK (3 << 26) /* WRPLL */ #define _WRPLL_CTL1 0x46040 #define _WRPLL_CTL2 0x46060 #define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2) #define WRPLL_PLL_ENABLE (1 << 31) -#define WRPLL_PLL_SSC (1 << 28) -#define WRPLL_PLL_NON_SSC (2 << 28) -#define WRPLL_PLL_LCPLL (3 << 28) -#define WRPLL_PLL_REF_MASK (3 << 28) +#define WRPLL_REF_BCLK (0 << 28) +#define WRPLL_REF_PCH_SSC (1 << 28) +#define WRPLL_REF_MUXED_SSC_BDW (2 << 28) /* CPU SSC if fused enabled, PCH SSC otherwise */ +#define WRPLL_REF_SPECIAL_HSW (2 << 28) /* muxed SSC (ULT), non-SSC (non-ULT) */ +#define WRPLL_REF_LCPLL (3 << 28) +#define WRPLL_REF_MASK (3 << 28) /* WRPLL divider programming */ #define WRPLL_DIVIDER_REFERENCE(x) ((x) << 0) #define WRPLL_DIVIDER_REF_MASK (0xff) @@ -9549,6 +9553,10 @@ enum skl_power_gate { #define LCPLL_CTL _MMIO(0x130040) #define LCPLL_PLL_DISABLE (1 << 31) #define LCPLL_PLL_LOCK (1 << 30) +#define LCPLL_REF_NON_SSC (0 << 28) +#define LCPLL_REF_BCLK (2 << 28) +#define LCPLL_REF_PCH_SSC (3 << 28) +#define LCPLL_REF_MASK (3 << 28) #define LCPLL_CLK_FREQ_MASK (3 << 26) #define LCPLL_CLK_FREQ_450 (0 << 26) #define LCPLL_CLK_FREQ_54O_BDW (1 << 26) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 609ccd240efb..1a9978f84c9a 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1231,9 +1231,9 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, u32 wrpll; wrpll = I915_READ(reg); - switch (wrpll & WRPLL_PLL_REF_MASK) { - case WRPLL_PLL_SSC: - case WRPLL_PLL_NON_SSC: + switch (wrpll & WRPLL_REF_MASK) { + case WRPLL_REF_SPECIAL_HSW: + case WRPLL_REF_PCH_SSC: /* * We could calculate spread here, but our checking * code only cares about 5% accuracy, and spread is a max of @@ -1241,7 +1241,7 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, */ refclk = 135; break; - case WRPLL_PLL_LCPLL: + case WRPLL_REF_LCPLL: refclk = LC_FREQ; break; default: @@ -1613,12 +1613,12 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder, link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1)); break; case PORT_CLK_SEL_SPLL: - pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK; - if (pll == SPLL_PLL_FREQ_810MHz) + pll = I915_READ(SPLL_CTL) & SPLL_FREQ_MASK; + if (pll == SPLL_FREQ_810MHz) link_clock = 81000; - else if (pll == SPLL_PLL_FREQ_1350MHz) + else if (pll == SPLL_FREQ_1350MHz) link_clock = 135000; - else if (pll == SPLL_PLL_FREQ_2700MHz) + else if (pll == SPLL_FREQ_2700MHz) link_clock = 270000; else { WARN(1, "bad spll freq\n"); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 352c42826cb1..1b1ddb48ca7a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9134,12 +9134,12 @@ static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) if ((ctl & SPLL_PLL_ENABLE) == 0) return false; - if ((ctl & SPLL_PLL_REF_MASK) == SPLL_PLL_SSC && + if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) return true; if (IS_BROADWELL(dev_priv) && - (ctl & SPLL_PLL_REF_MASK) == SPLL_PLL_NON_SSC) + (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) return true; return false; @@ -9154,11 +9154,11 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, if ((ctl & WRPLL_PLL_ENABLE) == 0) return false; - if ((ctl & WRPLL_PLL_REF_MASK) == WRPLL_PLL_SSC) + if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) return true; if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && - (ctl & WRPLL_PLL_REF_MASK) == WRPLL_PLL_NON_SSC && + (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) return true; diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 69787f259677..2d4e7b9a7b9d 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -775,7 +775,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state * hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p); - val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL | + val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL | WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p); @@ -839,7 +839,7 @@ hsw_get_dpll(struct intel_crtc_state *crtc_state, return NULL; crtc_state->dpll_hw_state.spll = - SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; + SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC; pll = intel_find_shared_dpll(crtc_state, DPLL_ID_SPLL, DPLL_ID_SPLL); -- cgit v1.2.3 From 0f52c097a25fb09378968db7afed54d306e6bcff Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 4 Jun 2019 23:09:31 +0300 Subject: drm/i915: Nuke LC_FREQ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Get rid of the pointless LC_FREQ define. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190604200933.29417-3-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_ddi.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 1a9978f84c9a..aa06f1bcea61 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1221,12 +1221,10 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc) return ret; } -#define LC_FREQ 2700 - static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, i915_reg_t reg) { - int refclk = LC_FREQ; + int refclk; int n, p, r; u32 wrpll; @@ -1242,7 +1240,7 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, refclk = 135; break; case WRPLL_REF_LCPLL: - refclk = LC_FREQ; + refclk = 2700; break; default: WARN(1, "bad wrpll refclk\n"); -- cgit v1.2.3 From d1707a96b11787c7b9ca11fd93d235d6eb6ccbf1 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 4 Jun 2019 23:09:32 +0300 Subject: drm/i915: Assert that HSW/BDW LCPLL is using the non-SSC reference MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Only the non-SSC reference is truly supported for the LCPLL. Assert that it is indeed selected. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190604200933.29417-4-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/intel_display_power.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_display_power.c b/drivers/gpu/drm/i915/intel_display_power.c index 278a7edc94f5..bb9ef1cea5db 100644 --- a/drivers/gpu/drm/i915/intel_display_power.c +++ b/drivers/gpu/drm/i915/intel_display_power.c @@ -3665,6 +3665,9 @@ static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) if (val & LCPLL_PLL_DISABLE) DRM_ERROR("LCPLL is disabled\n"); + + if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) + DRM_ERROR("LCPLL not using non-SSC reference\n"); } static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) -- cgit v1.2.3 From 86761789b38a90ee56d455b81d534ac65e8a2b8b Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 4 Jun 2019 23:09:33 +0300 Subject: drm/i915: Improve WRPLL reference clock readout on HSW/BDW MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On non-ULT HSW the "special" WRPLL reference clock select actually means non-SSC. Take that into account when reading out the WRPLL state. Also the non-SSC reference may be either 24MHz or 135MHz, which we can read out from FUSE_STRAP3. The BDW docs actually say: "also indicates whether the CPU and PCH are in a single package or separate packages", so it may be that this is not actually required and we could just assume 135 MHz (just like the code already did). But it doesn't really hurt to read this out as the HSW docs aren't quite so clear. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190604200933.29417-5-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/i915_reg.h | 3 +++ drivers/gpu/drm/i915/intel_ddi.c | 15 ++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 665dfc177528..d0c262367f09 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7514,6 +7514,9 @@ enum { #define ILK_DESKTOP (1 << 23) #define HSW_CPU_SSC_ENABLE (1 << 21) +#define FUSE_STRAP3 _MMIO(0x42020) +#define HSW_REF_CLK_SELECT (1 << 1) + #define ILK_DSPCLK_GATE_D _MMIO(0x42020) #define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) #define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index aa06f1bcea61..2fad62099ca5 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1231,6 +1231,19 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, wrpll = I915_READ(reg); switch (wrpll & WRPLL_REF_MASK) { case WRPLL_REF_SPECIAL_HSW: + /* + * muxed-SSC for BDW. + * non-SSC for non-ULT HSW. Check FUSE_STRAP3 + * for the non-SSC reference frequency. + */ + if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) { + if (I915_READ(FUSE_STRAP3) & HSW_REF_CLK_SELECT) + refclk = 24; + else + refclk = 135; + break; + } + /* fall through */ case WRPLL_REF_PCH_SSC: /* * We could calculate spread here, but our checking @@ -1243,7 +1256,7 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, refclk = 2700; break; default: - WARN(1, "bad wrpll refclk\n"); + MISSING_CASE(wrpll); return 0; } -- cgit v1.2.3 From c3ad8d29db5e57ba0494105b46f8ba76c2ee07cf Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 5 Jun 2019 19:29:45 +0300 Subject: drm/i915: Add missing commas to the end of the subplatform ID arrays MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a comma after the final entry to make diffs less obnoxious if we have to add further entries past the last one. Cc: Tvrtko Ursulin Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190605162946.19223-1-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson #irc --- drivers/gpu/drm/i915/intel_device_info.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 97f742530fa1..19437e8ec6fa 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -773,7 +773,7 @@ static const u16 subplatform_ult_ids[] = { INTEL_CFL_U_GT3_IDS(0), INTEL_WHL_U_GT1_IDS(0), INTEL_WHL_U_GT2_IDS(0), - INTEL_WHL_U_GT3_IDS(0) + INTEL_WHL_U_GT3_IDS(0), }; static const u16 subplatform_ulx_ids[] = { @@ -786,17 +786,17 @@ static const u16 subplatform_ulx_ids[] = { INTEL_SKL_ULX_GT1_IDS(0), INTEL_SKL_ULX_GT2_IDS(0), INTEL_KBL_ULX_GT1_IDS(0), - INTEL_KBL_ULX_GT2_IDS(0) + INTEL_KBL_ULX_GT2_IDS(0), }; static const u16 subplatform_aml_ids[] = { INTEL_AML_KBL_GT2_IDS(0), - INTEL_AML_CFL_GT2_IDS(0) + INTEL_AML_CFL_GT2_IDS(0), }; static const u16 subplatform_portf_ids[] = { INTEL_CNL_PORT_F_IDS(0), - INTEL_ICL_PORT_F_IDS(0) + INTEL_ICL_PORT_F_IDS(0), }; static bool find_devid(u16 id, const u16 *p, unsigned int num) -- cgit v1.2.3 From 6ce1c33d6c36fb3858e8e956d72586f7a024ed3a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 5 Jun 2019 19:29:46 +0300 Subject: drm/i915: Kill INTEL_SUBPLATFORM_AML MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All AML parts are either KBL ULX or CFL ULX so there is no point in keeping INTEL_SUBPLATFORM_AML around. As these are the only CFL ULX parts (normal CFL didn't have Y SKUs) so we'll just replace IS_AML_ULX with IS_CFL_ULX (it was already paired with IS_KBL_ULX which accounts for the other half of the AML parts). Cc: Tvrtko Ursulin Cc: José Roberto de Souza Cc: Rodrigo Vivi Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190605162946.19223-2-ville.syrjala@linux.intel.com Reviewed-by: Rodrigo Vivi --- drivers/gpu/drm/i915/i915_drv.h | 5 ++--- drivers/gpu/drm/i915/intel_ddi.c | 8 +++++--- drivers/gpu/drm/i915/intel_device_info.c | 6 ------ drivers/gpu/drm/i915/intel_device_info.h | 1 - 4 files changed, 7 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2e2cfbfa0ced..caf999180d57 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2223,9 +2223,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT) #define IS_KBL_ULX(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX) -#define IS_AML_ULX(dev_priv) \ - (IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_AML) || \ - IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_AML)) #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ INTEL_INFO(dev_priv)->gt == 2) #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ @@ -2238,6 +2235,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, INTEL_INFO(dev_priv)->gt == 3) #define IS_CFL_ULT(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT) +#define IS_CFL_ULX(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX) #define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \ INTEL_INFO(dev_priv)->gt == 2) #define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \ diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 2fad62099ca5..7925a176f900 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -615,7 +615,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) static const struct ddi_buf_trans * kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) { - if (IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) { + if (IS_KBL_ULX(dev_priv) || IS_CFL_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp); return kbl_y_ddi_translations_dp; } else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) { @@ -631,7 +631,8 @@ static const struct ddi_buf_trans * skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) { if (dev_priv->vbt.edp.low_vswing) { - if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) { + if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || + IS_CFL_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); return skl_y_ddi_translations_edp; } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) || @@ -653,7 +654,8 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) static const struct ddi_buf_trans * skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) { - if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) { + if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || + IS_CFL_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); return skl_y_ddi_translations_hdmi; } else { diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 19437e8ec6fa..7135d8dc32a7 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -787,9 +787,6 @@ static const u16 subplatform_ulx_ids[] = { INTEL_SKL_ULX_GT2_IDS(0), INTEL_KBL_ULX_GT1_IDS(0), INTEL_KBL_ULX_GT2_IDS(0), -}; - -static const u16 subplatform_aml_ids[] = { INTEL_AML_KBL_GT2_IDS(0), INTEL_AML_CFL_GT2_IDS(0), }; @@ -832,9 +829,6 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915) /* ULX machines are also considered ULT. */ mask |= BIT(INTEL_SUBPLATFORM_ULT); } - } else if (find_devid(devid, subplatform_aml_ids, - ARRAY_SIZE(subplatform_aml_ids))) { - mask = BIT(INTEL_SUBPLATFORM_AML); } else if (find_devid(devid, subplatform_portf_ids, ARRAY_SIZE(subplatform_portf_ids))) { mask = BIT(INTEL_SUBPLATFORM_PORTF); diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 1fb8b50df7df..3ea953a230b3 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -91,7 +91,6 @@ enum intel_platform { /* HSW/BDW/SKL/KBL/CFL */ #define INTEL_SUBPLATFORM_ULT (0) #define INTEL_SUBPLATFORM_ULX (1) -#define INTEL_SUBPLATFORM_AML (2) /* CNL/ICL */ #define INTEL_SUBPLATFORM_PORTF (0) -- cgit v1.2.3 From ecab9be174d98ffbc69d614978f2372ca2ef54c9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 12 Jun 2019 11:57:20 +0100 Subject: drm/i915: Combine unbound/bound list tracking for objects With async binding, we don't want to manage a bound/unbound list as we may end up running before we even acquire the pages. All that is required is keeping track of shrinkable objects, so reduce it to the minimum list. Fixes: 6951e5893b48 ("drm/i915: Move GEM object domain management from struct_mutex to local") Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190612105720.30310-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_domain.c | 11 +- drivers/gpu/drm/i915/gem/i915_gem_object.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 2 +- drivers/gpu/drm/i915/gem/i915_gem_pages.c | 10 +- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 30 +++- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 30 ++-- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 5 +- drivers/gpu/drm/i915/i915_debugfs.c | 191 +---------------------- drivers/gpu/drm/i915/i915_drv.h | 14 +- drivers/gpu/drm/i915/i915_gem.c | 30 ++-- drivers/gpu/drm/i915/i915_vma.c | 34 +--- drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 16 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 2 +- 13 files changed, 95 insertions(+), 282 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 31929220b90f..bd180ef46aeb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -219,7 +219,7 @@ restart: * rewrite the PTE in the belief that doing so tramples upon less * state and so involves less work. */ - if (obj->bind_count) { + if (atomic_read(&obj->bind_count)) { /* Before we change the PTE, the GPU must not be accessing it. * If we wait upon the object, we know that all the bound * VMA are no longer active. @@ -480,13 +480,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) spin_lock_irqsave(&i915->mm.obj_lock, flags); - if (obj->mm.madv == I915_MADV_WILLNEED) { - struct list_head *list; - - list = obj->bind_count ? - &i915->mm.bound_list : &i915->mm.unbound_list; - list_move_tail(&obj->mm.link, list); - } + if (obj->mm.madv == I915_MADV_WILLNEED) + list_move_tail(&obj->mm.link, &i915->mm.shrink_list); spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index d02a1aff2058..36b76c6a0a9d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -216,7 +216,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, mutex_unlock(&i915->drm.struct_mutex); - GEM_BUG_ON(obj->bind_count); + GEM_BUG_ON(atomic_read(&obj->bind_count)); GEM_BUG_ON(obj->userfault_count); GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); GEM_BUG_ON(!list_empty(&obj->lut_list)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 9c161ba73558..5b05698619ce 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -155,7 +155,7 @@ struct drm_i915_gem_object { #define STRIDE_MASK (~TILING_MASK) /** Count of VMA actually bound by this object */ - unsigned int bind_count; + atomic_t bind_count; unsigned int active_count; /** Count of how many global VMA are currently pinned for use by HW */ unsigned int pin_global; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 7ff907d6d0c6..b36ad269f4ea 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -57,13 +57,19 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); if (i915_gem_object_is_shrinkable(obj)) { + struct list_head *list; unsigned long flags; spin_lock_irqsave(&i915->mm.obj_lock, flags); i915->mm.shrink_count++; i915->mm.shrink_memory += obj->base.size; - list_add(&obj->mm.link, &i915->mm.unbound_list); + + if (obj->mm.madv != I915_MADV_WILLNEED) + list = &i915->mm.purge_list; + else + list = &i915->mm.shrink_list; + list_add_tail(&obj->mm.link, list); spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } @@ -193,7 +199,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, if (i915_gem_object_has_pinned_pages(obj)) return -EBUSY; - GEM_BUG_ON(obj->bind_count); + GEM_BUG_ON(atomic_read(&obj->bind_count)); /* May be called by shrinker from within get_pages() (on another bo) */ mutex_lock_nested(&obj->mm.lock, subclass); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index f40f13c0b8b7..f68c0ad1aa47 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -158,15 +158,22 @@ void i915_gem_suspend(struct drm_i915_private *i915) intel_uc_suspend(i915); } +static struct drm_i915_gem_object *first_mm_object(struct list_head *list) +{ + return list_first_entry_or_null(list, + struct drm_i915_gem_object, + mm.link); +} + void i915_gem_suspend_late(struct drm_i915_private *i915) { struct drm_i915_gem_object *obj; struct list_head *phases[] = { - &i915->mm.unbound_list, - &i915->mm.bound_list, + &i915->mm.shrink_list, &i915->mm.purge_list, NULL }, **phase; + unsigned long flags; /* * Neither the BIOS, ourselves or any other kernel @@ -188,13 +195,30 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) * machine in an unusable condition. */ + spin_lock_irqsave(&i915->mm.obj_lock, flags); for (phase = phases; *phase; phase++) { - list_for_each_entry(obj, *phase, mm.link) { + LIST_HEAD(keep); + + while ((obj = first_mm_object(*phase))) { + list_move_tail(&obj->mm.link, &keep); + + /* Beware the background _i915_gem_free_objects */ + if (!kref_get_unless_zero(&obj->base.refcount)) + continue; + + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); + i915_gem_object_lock(obj); WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); i915_gem_object_unlock(obj); + i915_gem_object_put(obj); + + spin_lock_irqsave(&i915->mm.obj_lock, flags); } + + list_splice_tail(&keep, *phase); } + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); intel_uc_sanitize(i915); i915_gem_sanitize(i915); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 70a4c9d3c098..1b93bc334630 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -69,7 +69,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) * to the GPU, simply unbinding from the GPU is not going to succeed * in releasing our pin count on the pages themselves. */ - if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count) + if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count)) return false; /* If any vma are "permanently" pinned, it will prevent us from @@ -145,8 +145,10 @@ i915_gem_shrink(struct drm_i915_private *i915, unsigned int bit; } phases[] = { { &i915->mm.purge_list, ~0u }, - { &i915->mm.unbound_list, I915_SHRINK_UNBOUND }, - { &i915->mm.bound_list, I915_SHRINK_BOUND }, + { + &i915->mm.shrink_list, + I915_SHRINK_BOUND | I915_SHRINK_UNBOUND + }, { NULL, 0 }, }, *phase; intel_wakeref_t wakeref = 0; @@ -238,7 +240,7 @@ i915_gem_shrink(struct drm_i915_private *i915, continue; if (!(shrink & I915_SHRINK_BOUND) && - READ_ONCE(obj->bind_count)) + atomic_read(&obj->bind_count)) continue; if (!can_release_pages(obj)) @@ -378,7 +380,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) struct drm_i915_private *i915 = container_of(nb, struct drm_i915_private, mm.oom_notifier); struct drm_i915_gem_object *obj; - unsigned long unevictable, bound, unbound, freed_pages; + unsigned long unevictable, available, freed_pages; intel_wakeref_t wakeref; unsigned long flags; @@ -393,26 +395,20 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) * assert that there are no objects with pinned pages that are not * being pointed to by hardware. */ - unbound = bound = unevictable = 0; + available = unevictable = 0; spin_lock_irqsave(&i915->mm.obj_lock, flags); - list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) { + list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) { if (!can_release_pages(obj)) unevictable += obj->base.size >> PAGE_SHIFT; else - unbound += obj->base.size >> PAGE_SHIFT; - } - list_for_each_entry(obj, &i915->mm.bound_list, mm.link) { - if (!can_release_pages(obj)) - unevictable += obj->base.size >> PAGE_SHIFT; - else - bound += obj->base.size >> PAGE_SHIFT; + available += obj->base.size >> PAGE_SHIFT; } spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - if (freed_pages || unbound || bound) + if (freed_pages || available) pr_info("Purging GPU memory, %lu pages freed, " - "%lu pages still pinned.\n", - freed_pages, unevictable); + "%lu pages still pinned, %lu pages left available.\n", + freed_pages, unevictable, available); *(unsigned long *)ptr += freed_pages; return NOTIFY_DONE; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 8f619ad2fab8..de1fab2058ec 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -613,7 +613,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; struct i915_vma *vma; - unsigned long flags; int ret; if (!drm_mm_initialized(&dev_priv->mm.stolen)) @@ -690,10 +689,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv list_move_tail(&vma->vm_link, &ggtt->vm.bound_list); mutex_unlock(&ggtt->vm.mutex); - spin_lock_irqsave(&dev_priv->mm.obj_lock, flags); GEM_BUG_ON(i915_gem_object_is_shrinkable(obj)); - obj->bind_count++; - spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags); + atomic_inc(&obj->bind_count); return obj; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b070e6f3780c..3d3d27ff5dab 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -104,19 +104,6 @@ static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) return obj->mm.mapping ? 'M' : ' '; } -static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) -{ - u64 size = 0; - struct i915_vma *vma; - - for_each_ggtt_vma(vma, obj) { - if (drm_mm_node_allocated(&vma->node)) - size += vma->node.size; - } - - return size; -} - static const char * stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len) { @@ -247,84 +234,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits); } -static int obj_rank_by_stolen(const void *A, const void *B) -{ - const struct drm_i915_gem_object *a = - *(const struct drm_i915_gem_object **)A; - const struct drm_i915_gem_object *b = - *(const struct drm_i915_gem_object **)B; - - if (a->stolen->start < b->stolen->start) - return -1; - if (a->stolen->start > b->stolen->start) - return 1; - return 0; -} - -static int i915_gem_stolen_list_info(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct drm_i915_gem_object **objects; - struct drm_i915_gem_object *obj; - u64 total_obj_size, total_gtt_size; - unsigned long total, count, n; - unsigned long flags; - int ret; - - total = READ_ONCE(dev_priv->mm.shrink_count); - objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL); - if (!objects) - return -ENOMEM; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - goto out; - - total_obj_size = total_gtt_size = count = 0; - - spin_lock_irqsave(&dev_priv->mm.obj_lock, flags); - list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { - if (count == total) - break; - - if (obj->stolen == NULL) - continue; - - objects[count++] = obj; - total_obj_size += obj->base.size; - total_gtt_size += i915_gem_obj_total_ggtt_size(obj); - - } - list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) { - if (count == total) - break; - - if (obj->stolen == NULL) - continue; - - objects[count++] = obj; - total_obj_size += obj->base.size; - } - spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags); - - sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL); - - seq_puts(m, "Stolen:\n"); - for (n = 0; n < count; n++) { - seq_puts(m, " "); - describe_obj(m, objects[n]); - seq_putc(m, '\n'); - } - seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n", - count, total_obj_size, total_gtt_size); - - mutex_unlock(&dev->struct_mutex); -out: - kvfree(objects); - return ret; -} - struct file_stats { struct i915_address_space *vm; unsigned long count; @@ -344,7 +253,7 @@ static int per_file_stats(int id, void *ptr, void *data) stats->count++; stats->total += obj->base.size; - if (!obj->bind_count) + if (!atomic_read(&obj->bind_count)) stats->unbound += obj->base.size; if (obj->base.name || obj->base.dma_buf) stats->shared += obj->base.size; @@ -451,105 +360,22 @@ static void print_context_stats(struct seq_file *m, static int i915_gem_object_info(struct seq_file *m, void *data) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct i915_ggtt *ggtt = &dev_priv->ggtt; - u32 count, mapped_count, purgeable_count, dpy_count, huge_count; - u64 size, mapped_size, purgeable_size, dpy_size, huge_size; - struct drm_i915_gem_object *obj; - unsigned int page_sizes = 0; - unsigned long flags; - char buf[80]; + struct drm_i915_private *i915 = node_to_i915(m->private); int ret; seq_printf(m, "%u shrinkable objects, %llu bytes\n", - dev_priv->mm.shrink_count, - dev_priv->mm.shrink_memory); - - size = count = 0; - mapped_size = mapped_count = 0; - purgeable_size = purgeable_count = 0; - huge_size = huge_count = 0; - - spin_lock_irqsave(&dev_priv->mm.obj_lock, flags); - list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) { - size += obj->base.size; - ++count; - - if (obj->mm.madv == I915_MADV_DONTNEED) { - purgeable_size += obj->base.size; - ++purgeable_count; - } - - if (obj->mm.mapping) { - mapped_count++; - mapped_size += obj->base.size; - } - - if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) { - huge_count++; - huge_size += obj->base.size; - page_sizes |= obj->mm.page_sizes.sg; - } - } - seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); - - size = count = dpy_size = dpy_count = 0; - list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { - size += obj->base.size; - ++count; - - if (obj->pin_global) { - dpy_size += obj->base.size; - ++dpy_count; - } - - if (obj->mm.madv == I915_MADV_DONTNEED) { - purgeable_size += obj->base.size; - ++purgeable_count; - } - - if (obj->mm.mapping) { - mapped_count++; - mapped_size += obj->base.size; - } - - if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) { - huge_count++; - huge_size += obj->base.size; - page_sizes |= obj->mm.page_sizes.sg; - } - } - spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags); - - seq_printf(m, "%u bound objects, %llu bytes\n", - count, size); - seq_printf(m, "%u purgeable objects, %llu bytes\n", - purgeable_count, purgeable_size); - seq_printf(m, "%u mapped objects, %llu bytes\n", - mapped_count, mapped_size); - seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n", - huge_count, - stringify_page_sizes(page_sizes, buf, sizeof(buf)), - huge_size); - seq_printf(m, "%u display objects (globally pinned), %llu bytes\n", - dpy_count, dpy_size); - - seq_printf(m, "%llu [%pa] gtt total\n", - ggtt->vm.total, &ggtt->mappable_end); - seq_printf(m, "Supported page sizes: %s\n", - stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes, - buf, sizeof(buf))); + i915->mm.shrink_count, + i915->mm.shrink_memory); seq_putc(m, '\n'); - ret = mutex_lock_interruptible(&dev->struct_mutex); + ret = mutex_lock_interruptible(&i915->drm.struct_mutex); if (ret) return ret; - print_batch_pool_stats(m, dev_priv); - print_context_stats(m, dev_priv); - mutex_unlock(&dev->struct_mutex); + print_batch_pool_stats(m, i915); + print_context_stats(m, i915); + mutex_unlock(&i915->drm.struct_mutex); return 0; } @@ -4535,7 +4361,6 @@ static const struct file_operations i915_fifo_underrun_reset_ops = { static const struct drm_info_list i915_debugfs_list[] = { {"i915_capabilities", i915_capabilities, 0}, {"i915_gem_objects", i915_gem_object_info, 0}, - {"i915_gem_stolen", i915_gem_stolen_list_info }, {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index caf999180d57..427c530a6c12 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -747,19 +747,15 @@ struct i915_gem_mm { /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */ spinlock_t obj_lock; - /** List of all objects in gtt_space. Used to restore gtt - * mappings on resume */ - struct list_head bound_list; /** - * List of objects which are not bound to the GTT (thus - * are idle and not used by the GPU). These objects may or may - * not actually have any pages attached. + * List of objects which are purgeable. */ - struct list_head unbound_list; + struct list_head purge_list; + /** - * List of objects which are purgeable. May be active. + * List of objects which have allocated pages and are shrinkable. */ - struct list_head purge_list; + struct list_head shrink_list; /** List of all objects in gtt_space, currently mmaped by userspace. * All objects within this list must also be on bound_list. diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 24f0f3db1bfb..17e8809c5312 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1144,10 +1144,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, if (obj->mm.madv != I915_MADV_WILLNEED) list = &i915->mm.purge_list; - else if (obj->bind_count) - list = &i915->mm.bound_list; else - list = &i915->mm.unbound_list; + list = &i915->mm.shrink_list; list_move_tail(&obj->mm.link, list); spin_unlock_irqrestore(&i915->mm.obj_lock, flags); @@ -1770,8 +1768,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915) init_llist_head(&i915->mm.free_list); INIT_LIST_HEAD(&i915->mm.purge_list); - INIT_LIST_HEAD(&i915->mm.unbound_list); - INIT_LIST_HEAD(&i915->mm.bound_list); + INIT_LIST_HEAD(&i915->mm.shrink_list); INIT_LIST_HEAD(&i915->mm.fence_list); INIT_LIST_HEAD(&i915->mm.userfault_list); @@ -1837,11 +1834,7 @@ int i915_gem_freeze(struct drm_i915_private *dev_priv) int i915_gem_freeze_late(struct drm_i915_private *i915) { struct drm_i915_gem_object *obj; - struct list_head *phases[] = { - &i915->mm.unbound_list, - &i915->mm.bound_list, - NULL - }, **phase; + intel_wakeref_t wakeref; /* * Called just before we write the hibernation image. @@ -1858,17 +1851,18 @@ int i915_gem_freeze_late(struct drm_i915_private *i915) * the objects as well, see i915_gem_freeze() */ - i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND); + wakeref = intel_runtime_pm_get(i915); + + i915_gem_shrink(i915, -1UL, NULL, ~0); i915_gem_drain_freed_objects(i915); - for (phase = phases; *phase; phase++) { - list_for_each_entry(obj, *phase, mm.link) { - i915_gem_object_lock(obj); - WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true)); - i915_gem_object_unlock(obj); - } + list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) { + i915_gem_object_lock(obj); + WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true)); + i915_gem_object_unlock(obj); } - GEM_BUG_ON(!list_empty(&i915->mm.purge_list)); + + intel_runtime_pm_put(i915, wakeref); return 0; } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 80050f6a0893..cb341e4acf99 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -83,10 +83,7 @@ static void obj_bump_mru(struct drm_i915_gem_object *obj) unsigned long flags; spin_lock_irqsave(&i915->mm.obj_lock, flags); - - if (obj->bind_count) - list_move_tail(&obj->mm.link, &i915->mm.bound_list); - + list_move_tail(&obj->mm.link, &i915->mm.shrink_list); spin_unlock_irqrestore(&i915->mm.obj_lock, flags); obj->mm.dirty = true; /* be paranoid */ @@ -538,7 +535,7 @@ static void assert_bind_count(const struct drm_i915_gem_object *obj) * assume that no else is pinning the pages, but as a rough assertion * that we will not run into problems later, this will do!) */ - GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); + GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count)); } /** @@ -680,18 +677,8 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) mutex_unlock(&vma->vm->mutex); if (vma->obj) { - struct drm_i915_gem_object *obj = vma->obj; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->mm.obj_lock, flags); - - if (i915_gem_object_is_shrinkable(obj)) - list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); - - obj->bind_count++; - assert_bind_count(obj); - - spin_unlock_irqrestore(&dev_priv->mm.obj_lock, flags); + atomic_inc(&vma->obj->bind_count); + assert_bind_count(vma->obj); } return 0; @@ -707,8 +694,6 @@ err_unpin: static void i915_vma_remove(struct i915_vma *vma) { - struct drm_i915_private *i915 = vma->vm->i915; - GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); @@ -725,17 +710,8 @@ i915_vma_remove(struct i915_vma *vma) */ if (vma->obj) { struct drm_i915_gem_object *obj = vma->obj; - unsigned long flags; - - spin_lock_irqsave(&i915->mm.obj_lock, flags); - - GEM_BUG_ON(obj->bind_count == 0); - if (--obj->bind_count == 0 && - i915_gem_object_is_shrinkable(obj) && - obj->mm.madv == I915_MADV_WILLNEED) - list_move_tail(&obj->mm.link, &i915->mm.unbound_list); - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); + atomic_dec(&obj->bind_count); /* * And finally now the object is completely decoupled from this diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 1d8235303edf..71c1363ad536 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -67,20 +67,24 @@ static int populate_ggtt(struct drm_i915_private *i915, count++; } + bound = 0; unbound = 0; - list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) - if (obj->mm.quirked) + list_for_each_entry(obj, objects, st_link) { + GEM_BUG_ON(!obj->mm.quirked); + + if (atomic_read(&obj->bind_count)) + bound++; + else unbound++; + } + GEM_BUG_ON(bound + unbound != count); + if (unbound) { pr_err("%s: Found %lu objects unbound, expected %u!\n", __func__, unbound, 0); return -EINVAL; } - bound = 0; - list_for_each_entry(obj, &i915->mm.bound_list, mm.link) - if (obj->mm.quirked) - bound++; if (bound != count) { pr_err("%s: Found %lu objects bound, expected %lu!\n", __func__, bound, count); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index a557e77d9c54..2093d08a7569 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1233,7 +1233,7 @@ static void track_vma_bind(struct i915_vma *vma) { struct drm_i915_gem_object *obj = vma->obj; - obj->bind_count++; /* track for eviction later */ + atomic_inc(&obj->bind_count); /* track for eviction later */ __i915_gem_object_pin_pages(obj); vma->pages = obj->mm.pages; -- cgit v1.2.3 From c54f0bac70466fa8a46d26529bb80099906c4e36 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 11 Jun 2019 11:45:43 +0100 Subject: drm/i915: Remove I915_READ8 Only a few call sites remain which have been converted to uncore mmio accessors and so the macro can be removed. Signed-off-by: Tvrtko Ursulin Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190611104548.30545-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.h | 2 -- drivers/gpu/drm/i915/intel_crt.c | 41 ++++++++++++++++++++++------------------ drivers/gpu/drm/i915/intel_pm.c | 6 +++--- 3 files changed, 26 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 427c530a6c12..f90bab84ac52 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2838,8 +2838,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, #define __I915_REG_OP(op__, dev_priv__, ...) \ intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__) -#define I915_READ8(reg__) __I915_REG_OP(read8, dev_priv, (reg__)) - #define I915_READ16(reg__) __I915_REG_OP(read16, dev_priv, (reg__)) #define I915_WRITE16(reg__, val__) __I915_REG_OP(write16, dev_priv, (reg__), (val__)) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index bb56518576a1..3fcf2f84bcce 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -643,6 +643,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) { struct drm_device *dev = crt->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_uncore *uncore = &dev_priv->uncore; u32 save_bclrpat; u32 save_vtotal; u32 vtotal, vactive; @@ -663,9 +664,9 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) pipeconf_reg = PIPECONF(pipe); pipe_dsl_reg = PIPEDSL(pipe); - save_bclrpat = I915_READ(bclrpat_reg); - save_vtotal = I915_READ(vtotal_reg); - vblank = I915_READ(vblank_reg); + save_bclrpat = intel_uncore_read(uncore, bclrpat_reg); + save_vtotal = intel_uncore_read(uncore, vtotal_reg); + vblank = intel_uncore_read(uncore, vblank_reg); vtotal = ((save_vtotal >> 16) & 0xfff) + 1; vactive = (save_vtotal & 0x7ff) + 1; @@ -674,21 +675,23 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) vblank_end = ((vblank >> 16) & 0xfff) + 1; /* Set the border color to purple. */ - I915_WRITE(bclrpat_reg, 0x500050); + intel_uncore_write(uncore, bclrpat_reg, 0x500050); if (!IS_GEN(dev_priv, 2)) { - u32 pipeconf = I915_READ(pipeconf_reg); - I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); - POSTING_READ(pipeconf_reg); + u32 pipeconf = intel_uncore_read(uncore, pipeconf_reg); + intel_uncore_write(uncore, + pipeconf_reg, + pipeconf | PIPECONF_FORCE_BORDER); + intel_uncore_posting_read(uncore, pipeconf_reg); /* Wait for next Vblank to substitue * border color for Color info */ intel_wait_for_vblank(dev_priv, pipe); - st00 = I915_READ8(_VGA_MSR_WRITE); + st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE); status = ((st00 & (1 << 4)) != 0) ? connector_status_connected : connector_status_disconnected; - I915_WRITE(pipeconf_reg, pipeconf); + intel_uncore_write(uncore, pipeconf_reg, pipeconf); } else { bool restore_vblank = false; int count, detect; @@ -702,9 +705,10 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) u32 vsync_start = (vsync & 0xffff) + 1; vblank_start = vsync_start; - I915_WRITE(vblank_reg, - (vblank_start - 1) | - ((vblank_end - 1) << 16)); + intel_uncore_write(uncore, + vblank_reg, + (vblank_start - 1) | + ((vblank_end - 1) << 16)); restore_vblank = true; } /* sample in the vertical border, selecting the larger one */ @@ -716,9 +720,10 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) /* * Wait for the border to be displayed */ - while (I915_READ(pipe_dsl_reg) >= vactive) + while (intel_uncore_read(uncore, pipe_dsl_reg) >= vactive) ; - while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample) + while ((dsl = intel_uncore_read(uncore, pipe_dsl_reg)) <= + vsample) ; /* * Watch ST00 for an entire scanline @@ -728,14 +733,14 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) do { count++; /* Read the ST00 VGA status register */ - st00 = I915_READ8(_VGA_MSR_WRITE); + st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE); if (st00 & (1 << 4)) detect++; - } while ((I915_READ(pipe_dsl_reg) == dsl)); + } while ((intel_uncore_read(uncore, pipe_dsl_reg) == dsl)); /* restore vblank if necessary */ if (restore_vblank) - I915_WRITE(vblank_reg, vblank); + intel_uncore_write(uncore, vblank_reg, vblank); /* * If more than 3/4 of the scanline detected a monitor, * then it is assumed to be present. This works even on i830, @@ -748,7 +753,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) } /* Restore previous settings */ - I915_WRITE(bclrpat_reg, save_bclrpat); + intel_uncore_write(uncore, bclrpat_reg, save_bclrpat); return status; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d7272d4ff258..93e411e6ad19 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -8160,15 +8160,15 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) return val; } -unsigned long i915_mch_val(struct drm_i915_private *dev_priv) +unsigned long i915_mch_val(struct drm_i915_private *i915) { unsigned long m, x, b; u32 tsfs; - tsfs = I915_READ(TSFS); + tsfs = intel_uncore_read(&i915->uncore, TSFS); m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); - x = I915_READ8(TR1); + x = intel_uncore_read8(&i915->uncore, TR1); b = tsfs & TSFS_INTR_MASK; -- cgit v1.2.3 From e33a4be83a64cd4f127f002abbe7f62b833fa3ac Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 11 Jun 2019 11:45:44 +0100 Subject: drm/i915: Remove I915_POSTING_READ_FW Only a few call sites remain which have been converted to uncore mmio accessors and so the macro can be removed. Signed-off-by: Tvrtko Ursulin Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190611104548.30545-2-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_gem.c | 9 +++++---- drivers/gpu/drm/i915/i915_irq.c | 2 +- drivers/gpu/drm/i915/intel_guc_submission.c | 4 ++-- drivers/gpu/drm/i915/intel_pm.c | 31 +++++++++++++++-------------- 5 files changed, 24 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f90bab84ac52..2cbd60c4a5dc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2877,7 +2877,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, */ #define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__)) #define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__)) -#define POSTING_READ_FW(reg__) __I915_REG_OP(posting_read_fw, dev_priv, (reg__)) /* "Broadcast RGB" property */ #define INTEL_BROADCAST_RGB_AUTO 0 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 17e8809c5312..4017ecf561f6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -263,11 +263,12 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) i915_gem_chipset_flush(dev_priv); with_intel_runtime_pm(dev_priv, wakeref) { - spin_lock_irq(&dev_priv->uncore.lock); + struct intel_uncore *uncore = &dev_priv->uncore; - POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE)); - - spin_unlock_irq(&dev_priv->uncore.lock); + spin_lock_irq(&uncore->lock); + intel_uncore_posting_read_fw(uncore, + RING_HEAD(RENDER_RING_BASE)); + spin_unlock_irq(&uncore->lock); } } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 11c451358fb8..9db9fbd0e70c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -387,7 +387,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask) { ilk_update_gt_irq(dev_priv, mask, mask); - POSTING_READ_FW(GTIMR); + intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR); } void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask) diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 89592ef778b8..97f6970d8da8 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -557,10 +557,10 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) */ static void flush_ggtt_writes(struct i915_vma *vma) { - struct drm_i915_private *dev_priv = vma->vm->i915; + struct drm_i915_private *i915 = vma->vm->i915; if (i915_vma_is_map_and_fenceable(vma)) - POSTING_READ_FW(GUC_STATUS); + intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS); } static void inject_preempt_context(struct work_struct *work) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 93e411e6ad19..84588ff8732f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1949,6 +1949,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_uncore *uncore = &dev_priv->uncore; const struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; int sprite0_start, sprite1_start, fifo_size; @@ -1974,13 +1975,13 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, * intel_pipe_update_start() has already disabled interrupts * for us, so a plain spin_lock() is sufficient here. */ - spin_lock(&dev_priv->uncore.lock); + spin_lock(&uncore->lock); switch (crtc->pipe) { u32 dsparb, dsparb2, dsparb3; case PIPE_A: - dsparb = I915_READ_FW(DSPARB); - dsparb2 = I915_READ_FW(DSPARB2); + dsparb = intel_uncore_read_fw(uncore, DSPARB); + dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | VLV_FIFO(SPRITEB, 0xff)); @@ -1992,12 +1993,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); - I915_WRITE_FW(DSPARB, dsparb); - I915_WRITE_FW(DSPARB2, dsparb2); + intel_uncore_write_fw(uncore, DSPARB, dsparb); + intel_uncore_write_fw(uncore, DSPARB2, dsparb2); break; case PIPE_B: - dsparb = I915_READ_FW(DSPARB); - dsparb2 = I915_READ_FW(DSPARB2); + dsparb = intel_uncore_read_fw(uncore, DSPARB); + dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | VLV_FIFO(SPRITED, 0xff)); @@ -2009,12 +2010,12 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); - I915_WRITE_FW(DSPARB, dsparb); - I915_WRITE_FW(DSPARB2, dsparb2); + intel_uncore_write_fw(uncore, DSPARB, dsparb); + intel_uncore_write_fw(uncore, DSPARB2, dsparb2); break; case PIPE_C: - dsparb3 = I915_READ_FW(DSPARB3); - dsparb2 = I915_READ_FW(DSPARB2); + dsparb3 = intel_uncore_read_fw(uncore, DSPARB3); + dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | VLV_FIFO(SPRITEF, 0xff)); @@ -2026,16 +2027,16 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); - I915_WRITE_FW(DSPARB3, dsparb3); - I915_WRITE_FW(DSPARB2, dsparb2); + intel_uncore_write_fw(uncore, DSPARB3, dsparb3); + intel_uncore_write_fw(uncore, DSPARB2, dsparb2); break; default: break; } - POSTING_READ_FW(DSPARB); + intel_uncore_posting_read_fw(uncore, DSPARB); - spin_unlock(&dev_priv->uncore.lock); + spin_unlock(&uncore->lock); } #undef VLV_FIFO -- cgit v1.2.3 From e44d62d1dd29e1d5855b053dd7fa7b2120f50bd7 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 11 Jun 2019 11:45:45 +0100 Subject: drm/i915: Remove POSTING_READ16 Only a few call sites remain which have been converted to uncore mmio accessors and so the macro can be removed. ENGINE_POSTING_READ16 is added to replace one engine->mmio_base relative call site. Signed-off-by: Tvrtko Ursulin Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190611104548.30545-3-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_engine.h | 1 + drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 8 ++++---- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/intel_pm.c | 11 ++++++----- 4 files changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 201bbd2a4faf..1439fa4093ac 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -52,6 +52,7 @@ struct drm_printer; #define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__) #define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__) #define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__) +#define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__) #define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \ __ENGINE_REG_OP(read64_2x32, (engine__), \ diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index c834d016c965..b0a71dac7b24 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -976,11 +976,11 @@ i9xx_irq_disable(struct intel_engine_cs *engine) static void i8xx_irq_enable(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + struct drm_i915_private *i915 = engine->i915; - dev_priv->irq_mask &= ~engine->irq_enable_mask; - I915_WRITE16(GEN2_IMR, dev_priv->irq_mask); - POSTING_READ16(RING_IMR(engine->mmio_base)); + i915->irq_mask &= ~engine->irq_enable_mask; + intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); + ENGINE_POSTING_READ16(engine, RING_IMR); } static void diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2cbd60c4a5dc..caea128d1052 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2847,7 +2847,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, #define I915_WRITE_NOTRACE(reg__, val__) __I915_REG_OP(write_notrace, dev_priv, (reg__), (val__)) #define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__)) -#define POSTING_READ16(reg__) __I915_REG_OP(posting_read16, dev_priv, (reg__)) /* These are untraced mmio-accessors that are only valid to be used inside * critical sections, such as inside IRQ handlers, where forcewake is explicitly diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 84588ff8732f..5a6679e2b6ee 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6406,13 +6406,14 @@ void intel_init_ipc(struct drm_i915_private *dev_priv) */ DEFINE_SPINLOCK(mchdev_lock); -bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val) +bool ironlake_set_drps(struct drm_i915_private *i915, u8 val) { + struct intel_uncore *uncore = &i915->uncore; u16 rgvswctl; lockdep_assert_held(&mchdev_lock); - rgvswctl = I915_READ16(MEMSWCTL); + rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); if (rgvswctl & MEMCTL_CMD_STS) { DRM_DEBUG("gpu busy, RCS change rejected\n"); return false; /* still busy with another command */ @@ -6420,11 +6421,11 @@ bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val) rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; - I915_WRITE16(MEMSWCTL, rgvswctl); - POSTING_READ16(MEMSWCTL); + intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); + intel_uncore_posting_read16(uncore, MEMSWCTL); rgvswctl |= MEMCTL_CMD_STS; - I915_WRITE16(MEMSWCTL, rgvswctl); + intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); return true; } -- cgit v1.2.3 From 54ac6479737be1d00adbacb659c79d3393028c08 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 11 Jun 2019 11:45:46 +0100 Subject: drm/i915: Remove I915_WRITE_NOTRACE Only a few call sites remain which have been converted to uncore mmio accessors and so the macro can be removed. Signed-off-by: Tvrtko Ursulin Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190611104548.30545-4-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/intel_gmbus.c | 42 ++++++++++++++++++++++++-------------- 2 files changed, 27 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index caea128d1052..f475b10eb635 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2844,7 +2844,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, #define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__)) #define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__)) #define I915_READ_NOTRACE(reg__) __I915_REG_OP(read_notrace, dev_priv, (reg__)) -#define I915_WRITE_NOTRACE(reg__, val__) __I915_REG_OP(write_notrace, dev_priv, (reg__), (val__)) #define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__)) diff --git a/drivers/gpu/drm/i915/intel_gmbus.c b/drivers/gpu/drm/i915/intel_gmbus.c index 969ce8b71e32..5e4c543e82e8 100644 --- a/drivers/gpu/drm/i915/intel_gmbus.c +++ b/drivers/gpu/drm/i915/intel_gmbus.c @@ -201,27 +201,37 @@ static u32 get_reserved(struct intel_gmbus *bus) static int get_clock(void *data) { struct intel_gmbus *bus = data; - struct drm_i915_private *dev_priv = bus->dev_priv; + struct intel_uncore *uncore = &bus->dev_priv->uncore; u32 reserved = get_reserved(bus); - I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK); - I915_WRITE_NOTRACE(bus->gpio_reg, reserved); - return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0; + + intel_uncore_write_notrace(uncore, + bus->gpio_reg, + reserved | GPIO_CLOCK_DIR_MASK); + intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved); + + return (intel_uncore_read_notrace(uncore, bus->gpio_reg) & + GPIO_CLOCK_VAL_IN) != 0; } static int get_data(void *data) { struct intel_gmbus *bus = data; - struct drm_i915_private *dev_priv = bus->dev_priv; + struct intel_uncore *uncore = &bus->dev_priv->uncore; u32 reserved = get_reserved(bus); - I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK); - I915_WRITE_NOTRACE(bus->gpio_reg, reserved); - return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0; + + intel_uncore_write_notrace(uncore, + bus->gpio_reg, + reserved | GPIO_DATA_DIR_MASK); + intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved); + + return (intel_uncore_read_notrace(uncore, bus->gpio_reg) & + GPIO_DATA_VAL_IN) != 0; } static void set_clock(void *data, int state_high) { struct intel_gmbus *bus = data; - struct drm_i915_private *dev_priv = bus->dev_priv; + struct intel_uncore *uncore = &bus->dev_priv->uncore; u32 reserved = get_reserved(bus); u32 clock_bits; @@ -229,16 +239,18 @@ static void set_clock(void *data, int state_high) clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; else clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | - GPIO_CLOCK_VAL_MASK; + GPIO_CLOCK_VAL_MASK; - I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits); - POSTING_READ(bus->gpio_reg); + intel_uncore_write_notrace(uncore, + bus->gpio_reg, + reserved | clock_bits); + intel_uncore_posting_read(uncore, bus->gpio_reg); } static void set_data(void *data, int state_high) { struct intel_gmbus *bus = data; - struct drm_i915_private *dev_priv = bus->dev_priv; + struct intel_uncore *uncore = &bus->dev_priv->uncore; u32 reserved = get_reserved(bus); u32 data_bits; @@ -248,8 +260,8 @@ static void set_data(void *data, int state_high) data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK; - I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits); - POSTING_READ(bus->gpio_reg); + intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved | data_bits); + intel_uncore_posting_read(uncore, bus->gpio_reg); } static int -- cgit v1.2.3 From 5a31d30b22c04615329132f0c7fa20b4b6079b43 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 11 Jun 2019 11:45:47 +0100 Subject: drm/i915: Remove I915_READ_NOTRACE Only a few call sites remain which have been converted to uncore mmio accessors and so the macro can be removed. Signed-off-by: Tvrtko Ursulin Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190611104548.30545-5-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gvt/debugfs.c | 4 ++-- drivers/gpu/drm/i915/gvt/firmware.c | 5 +++-- drivers/gpu/drm/i915/i915_drv.c | 6 ++++-- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_pmu.c | 8 ++++--- drivers/gpu/drm/i915/intel_dp.c | 43 ++++++++++++++++++++----------------- drivers/gpu/drm/i915/intel_gmbus.c | 11 +++++----- 7 files changed, 43 insertions(+), 35 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index 8a9606f91e68..2fb7b73b260d 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -58,12 +58,12 @@ static int mmio_offset_compare(void *priv, static inline int mmio_diff_handler(struct intel_gvt *gvt, u32 offset, void *data) { - struct drm_i915_private *dev_priv = gvt->dev_priv; + struct drm_i915_private *i915 = gvt->dev_priv; struct mmio_diff_param *param = data; struct diff_mmio *node; u32 preg, vreg; - preg = I915_READ_NOTRACE(_MMIO(offset)); + preg = intel_uncore_read_notrace(&i915->uncore, _MMIO(offset)); vreg = vgpu_vreg(param->vgpu, offset); if (preg != vreg) { diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 4ac18b447247..049775e8e350 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -68,9 +68,10 @@ static struct bin_attribute firmware_attr = { static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data) { - struct drm_i915_private *dev_priv = gvt->dev_priv; + struct drm_i915_private *i915 = gvt->dev_priv; - *(u32 *)(data + offset) = I915_READ_NOTRACE(_MMIO(offset)); + *(u32 *)(data + offset) = intel_uncore_read_notrace(&i915->uncore, + _MMIO(offset)); return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 1af6751e1b36..81ff2c78fd55 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2708,7 +2708,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); } -static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv, +static int vlv_wait_for_pw_status(struct drm_i915_private *i915, u32 mask, u32 val) { i915_reg_t reg = VLV_GTLC_PW_STATUS; @@ -2722,7 +2722,9 @@ static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv, * Transitioning between RC6 states should be at most 2ms (see * valleyview_enable_rps) so use a 3ms timeout. */ - ret = wait_for(((reg_value = I915_READ_NOTRACE(reg)) & mask) == val, 3); + ret = wait_for(((reg_value = + intel_uncore_read_notrace(&i915->uncore, reg)) & mask) + == val, 3); /* just trace the final value */ trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f475b10eb635..7ac63699f055 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2843,7 +2843,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, #define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__)) #define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__)) -#define I915_READ_NOTRACE(reg__) __I915_REG_OP(read_notrace, dev_priv, (reg__)) #define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__)) diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 1ccda0ee4ff5..eb9c0e0e545c 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -227,9 +227,11 @@ frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) if (dev_priv->gt.awake) { intel_wakeref_t wakeref; - with_intel_runtime_pm_if_in_use(dev_priv, wakeref) - val = intel_get_cagf(dev_priv, - I915_READ_NOTRACE(GEN6_RPSTAT1)); + with_intel_runtime_pm_if_in_use(dev_priv, wakeref) { + val = intel_uncore_read_notrace(&dev_priv->uncore, + GEN6_RPSTAT1); + val = intel_get_cagf(dev_priv, val); + } } add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 3aef12041672..4336df46fe78 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1082,13 +1082,13 @@ intel_dp_check_edp(struct intel_dp *intel_dp) static u32 intel_dp_aux_wait_done(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); u32 status; bool done; -#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) - done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, +#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) + done = wait_event_timeout(i915->gmbus_wait_queue, C, msecs_to_jiffies_timeout(10)); /* just trace the final value */ @@ -1221,8 +1221,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, u32 aux_send_ctl_flags) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct intel_uncore *uncore = &i915->uncore; i915_reg_t ch_ctl, ch_data[5]; u32 aux_clock_divider; enum intel_display_power_domain aux_domain = @@ -1238,7 +1239,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, for (i = 0; i < ARRAY_SIZE(ch_data); i++) ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); - aux_wakeref = intel_display_power_get(dev_priv, aux_domain); + aux_wakeref = intel_display_power_get(i915, aux_domain); pps_wakeref = pps_lock(intel_dp); /* @@ -1253,13 +1254,13 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, * lowest possible wakeup latency and so prevent the cpu from going into * deep sleep states. */ - pm_qos_update_request(&dev_priv->pm_qos, 0); + pm_qos_update_request(&i915->pm_qos, 0); intel_dp_check_edp(intel_dp); /* Try to wait for any previous AUX channel activity */ for (try = 0; try < 3; try++) { - status = I915_READ_NOTRACE(ch_ctl); + status = intel_uncore_read_notrace(uncore, ch_ctl); if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) break; msleep(1); @@ -1269,7 +1270,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, if (try == 3) { static u32 last_status = -1; - const u32 status = I915_READ(ch_ctl); + const u32 status = intel_uncore_read(uncore, ch_ctl); if (status != last_status) { WARN(1, "dp_aux_ch not started status 0x%08x\n", @@ -1298,21 +1299,23 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, for (try = 0; try < 5; try++) { /* Load the send data into the aux channel data registers */ for (i = 0; i < send_bytes; i += 4) - I915_WRITE(ch_data[i >> 2], - intel_dp_pack_aux(send + i, - send_bytes - i)); + intel_uncore_write(uncore, + ch_data[i >> 2], + intel_dp_pack_aux(send + i, + send_bytes - i)); /* Send the command and wait for it to complete */ - I915_WRITE(ch_ctl, send_ctl); + intel_uncore_write(uncore, ch_ctl, send_ctl); status = intel_dp_aux_wait_done(intel_dp); /* Clear done status and any errors */ - I915_WRITE(ch_ctl, - status | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_RECEIVE_ERROR); + intel_uncore_write(uncore, + ch_ctl, + status | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 * 400us delay required for errors and timeouts @@ -1375,18 +1378,18 @@ done: recv_bytes = recv_size; for (i = 0; i < recv_bytes; i += 4) - intel_dp_unpack_aux(I915_READ(ch_data[i >> 2]), + intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), recv + i, recv_bytes - i); ret = recv_bytes; out: - pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); + pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); if (vdd) edp_panel_vdd_off(intel_dp, false); pps_unlock(intel_dp, pps_wakeref); - intel_display_power_put_async(dev_priv, aux_domain, aux_wakeref); + intel_display_power_put_async(i915, aux_domain, aux_wakeref); return ret; } diff --git a/drivers/gpu/drm/i915/intel_gmbus.c b/drivers/gpu/drm/i915/intel_gmbus.c index 5e4c543e82e8..aa88e6e7cc65 100644 --- a/drivers/gpu/drm/i915/intel_gmbus.c +++ b/drivers/gpu/drm/i915/intel_gmbus.c @@ -186,14 +186,15 @@ static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv, static u32 get_reserved(struct intel_gmbus *bus) { - struct drm_i915_private *dev_priv = bus->dev_priv; + struct drm_i915_private *i915 = bus->dev_priv; + struct intel_uncore *uncore = &i915->uncore; u32 reserved = 0; /* On most chips, these bits must be preserved in software. */ - if (!IS_I830(dev_priv) && !IS_I845G(dev_priv)) - reserved = I915_READ_NOTRACE(bus->gpio_reg) & - (GPIO_DATA_PULLUP_DISABLE | - GPIO_CLOCK_PULLUP_DISABLE); + if (!IS_I830(i915) && !IS_I845G(i915)) + reserved = intel_uncore_read_notrace(uncore, bus->gpio_reg) & + (GPIO_DATA_PULLUP_DISABLE | + GPIO_CLOCK_PULLUP_DISABLE); return reserved; } -- cgit v1.2.3 From 4f5fd91fb327420d799354b9d068ebcf58d8ff45 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 11 Jun 2019 11:45:48 +0100 Subject: drm/i915: Remove I915_READ16 and I915_WRITE16 Remove call sites in favour of uncore mmio accessors and remove the old macros. Signed-off-by: Tvrtko Ursulin Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190611104548.30545-6-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 6 +- drivers/gpu/drm/i915/i915_debugfs.c | 37 +++++----- drivers/gpu/drm/i915/i915_drv.h | 3 - drivers/gpu/drm/i915/i915_gpu_error.c | 104 ++++++++++++++++------------- drivers/gpu/drm/i915/i915_irq.c | 40 ++++++----- drivers/gpu/drm/i915/intel_pm.c | 93 ++++++++++++++++---------- 6 files changed, 161 insertions(+), 122 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index b0a71dac7b24..b3bf47e8162f 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -986,10 +986,10 @@ i8xx_irq_enable(struct intel_engine_cs *engine) static void i8xx_irq_disable(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + struct drm_i915_private *i915 = engine->i915; - dev_priv->irq_mask |= engine->irq_enable_mask; - I915_WRITE16(GEN2_IMR, dev_priv->irq_mask); + i915->irq_mask |= engine->irq_enable_mask; + intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); } static int diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3d3d27ff5dab..026fb46020f6 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -826,6 +826,7 @@ static const struct file_operations i915_error_state_fops = { static int i915_frequency_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct intel_uncore *uncore = &dev_priv->uncore; struct intel_rps *rps = &dev_priv->gt_pm.rps; intel_wakeref_t wakeref; int ret = 0; @@ -833,8 +834,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused) wakeref = intel_runtime_pm_get(dev_priv); if (IS_GEN(dev_priv, 5)) { - u16 rgvswctl = I915_READ16(MEMSWCTL); - u16 rgvstat = I915_READ16(MEMSTAT_ILK); + u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); + u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK); seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); @@ -1156,13 +1157,14 @@ static int i915_reset_info(struct seq_file *m, void *unused) static int ironlake_drpc_info(struct seq_file *m) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_i915_private *i915 = node_to_i915(m->private); + struct intel_uncore *uncore = &i915->uncore; u32 rgvmodectl, rstdbyctl; u16 crstandvid; - rgvmodectl = I915_READ(MEMMODECTL); - rstdbyctl = I915_READ(RSTDBYCTL); - crstandvid = I915_READ16(CRSTANDVID); + rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); + rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL); + crstandvid = intel_uncore_read16(uncore, CRSTANDVID); seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); seq_printf(m, "Boost freq: %d\n", @@ -1745,6 +1747,7 @@ static const char *swizzle_string(unsigned swizzle) static int i915_swizzle_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct intel_uncore *uncore = &dev_priv->uncore; intel_wakeref_t wakeref; wakeref = intel_runtime_pm_get(dev_priv); @@ -1756,30 +1759,30 @@ static int i915_swizzle_info(struct seq_file *m, void *data) if (IS_GEN_RANGE(dev_priv, 3, 4)) { seq_printf(m, "DDC = 0x%08x\n", - I915_READ(DCC)); + intel_uncore_read(uncore, DCC)); seq_printf(m, "DDC2 = 0x%08x\n", - I915_READ(DCC2)); + intel_uncore_read(uncore, DCC2)); seq_printf(m, "C0DRB3 = 0x%04x\n", - I915_READ16(C0DRB3)); + intel_uncore_read16(uncore, C0DRB3)); seq_printf(m, "C1DRB3 = 0x%04x\n", - I915_READ16(C1DRB3)); + intel_uncore_read16(uncore, C1DRB3)); } else if (INTEL_GEN(dev_priv) >= 6) { seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", - I915_READ(MAD_DIMM_C0)); + intel_uncore_read(uncore, MAD_DIMM_C0)); seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", - I915_READ(MAD_DIMM_C1)); + intel_uncore_read(uncore, MAD_DIMM_C1)); seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", - I915_READ(MAD_DIMM_C2)); + intel_uncore_read(uncore, MAD_DIMM_C2)); seq_printf(m, "TILECTL = 0x%08x\n", - I915_READ(TILECTL)); + intel_uncore_read(uncore, TILECTL)); if (INTEL_GEN(dev_priv) >= 8) seq_printf(m, "GAMTARBMODE = 0x%08x\n", - I915_READ(GAMTARBMODE)); + intel_uncore_read(uncore, GAMTARBMODE)); else seq_printf(m, "ARB_MODE = 0x%08x\n", - I915_READ(ARB_MODE)); + intel_uncore_read(uncore, ARB_MODE)); seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", - I915_READ(DISP_ARB_CTL)); + intel_uncore_read(uncore, DISP_ARB_CTL)); } if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7ac63699f055..eb98d285d290 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2838,9 +2838,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, #define __I915_REG_OP(op__, dev_priv__, ...) \ intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__) -#define I915_READ16(reg__) __I915_REG_OP(read16, dev_priv, (reg__)) -#define I915_WRITE16(reg__, val__) __I915_REG_OP(write16, dev_priv, (reg__), (val__)) - #define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__)) #define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__)) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index dc026d5cd7a0..d7f164c9f2eb 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1576,7 +1576,8 @@ static void capture_uc_state(struct i915_gpu_state *error) /* Capture all registers which don't fit into another category. */ static void capture_reg_state(struct i915_gpu_state *error) { - struct drm_i915_private *dev_priv = error->i915; + struct drm_i915_private *i915 = error->i915; + struct intel_uncore *uncore = &i915->uncore; int i; /* General organization @@ -1588,71 +1589,84 @@ static void capture_reg_state(struct i915_gpu_state *error) */ /* 1: Registers specific to a single generation */ - if (IS_VALLEYVIEW(dev_priv)) { - error->gtier[0] = I915_READ(GTIER); - error->ier = I915_READ(VLV_IER); - error->forcewake = I915_READ_FW(FORCEWAKE_VLV); + if (IS_VALLEYVIEW(i915)) { + error->gtier[0] = intel_uncore_read(uncore, GTIER); + error->ier = intel_uncore_read(uncore, VLV_IER); + error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV); } - if (IS_GEN(dev_priv, 7)) - error->err_int = I915_READ(GEN7_ERR_INT); + if (IS_GEN(i915, 7)) + error->err_int = intel_uncore_read(uncore, GEN7_ERR_INT); - if (INTEL_GEN(dev_priv) >= 8) { - error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0); - error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1); + if (INTEL_GEN(i915) >= 8) { + error->fault_data0 = intel_uncore_read(uncore, + GEN8_FAULT_TLB_DATA0); + error->fault_data1 = intel_uncore_read(uncore, + GEN8_FAULT_TLB_DATA1); } - if (IS_GEN(dev_priv, 6)) { - error->forcewake = I915_READ_FW(FORCEWAKE); - error->gab_ctl = I915_READ(GAB_CTL); - error->gfx_mode = I915_READ(GFX_MODE); + if (IS_GEN(i915, 6)) { + error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE); + error->gab_ctl = intel_uncore_read(uncore, GAB_CTL); + error->gfx_mode = intel_uncore_read(uncore, GFX_MODE); } /* 2: Registers which belong to multiple generations */ - if (INTEL_GEN(dev_priv) >= 7) - error->forcewake = I915_READ_FW(FORCEWAKE_MT); + if (INTEL_GEN(i915) >= 7) + error->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT); - if (INTEL_GEN(dev_priv) >= 6) { - error->derrmr = I915_READ(DERRMR); - error->error = I915_READ(ERROR_GEN6); - error->done_reg = I915_READ(DONE_REG); + if (INTEL_GEN(i915) >= 6) { + error->derrmr = intel_uncore_read(uncore, DERRMR); + error->error = intel_uncore_read(uncore, ERROR_GEN6); + error->done_reg = intel_uncore_read(uncore, DONE_REG); } - if (INTEL_GEN(dev_priv) >= 5) - error->ccid = I915_READ(CCID(RENDER_RING_BASE)); + if (INTEL_GEN(i915) >= 5) + error->ccid = intel_uncore_read(uncore, CCID(RENDER_RING_BASE)); /* 3: Feature specific registers */ - if (IS_GEN_RANGE(dev_priv, 6, 7)) { - error->gam_ecochk = I915_READ(GAM_ECOCHK); - error->gac_eco = I915_READ(GAC_ECO_BITS); + if (IS_GEN_RANGE(i915, 6, 7)) { + error->gam_ecochk = intel_uncore_read(uncore, GAM_ECOCHK); + error->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS); } /* 4: Everything else */ - if (INTEL_GEN(dev_priv) >= 11) { - error->ier = I915_READ(GEN8_DE_MISC_IER); - error->gtier[0] = I915_READ(GEN11_RENDER_COPY_INTR_ENABLE); - error->gtier[1] = I915_READ(GEN11_VCS_VECS_INTR_ENABLE); - error->gtier[2] = I915_READ(GEN11_GUC_SG_INTR_ENABLE); - error->gtier[3] = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE); - error->gtier[4] = I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE); - error->gtier[5] = I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE); + if (INTEL_GEN(i915) >= 11) { + error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); + error->gtier[0] = + intel_uncore_read(uncore, + GEN11_RENDER_COPY_INTR_ENABLE); + error->gtier[1] = + intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE); + error->gtier[2] = + intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE); + error->gtier[3] = + intel_uncore_read(uncore, + GEN11_GPM_WGBOXPERF_INTR_ENABLE); + error->gtier[4] = + intel_uncore_read(uncore, + GEN11_CRYPTO_RSVD_INTR_ENABLE); + error->gtier[5] = + intel_uncore_read(uncore, + GEN11_GUNIT_CSME_INTR_ENABLE); error->ngtier = 6; - } else if (INTEL_GEN(dev_priv) >= 8) { - error->ier = I915_READ(GEN8_DE_MISC_IER); + } else if (INTEL_GEN(i915) >= 8) { + error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); for (i = 0; i < 4; i++) - error->gtier[i] = I915_READ(GEN8_GT_IER(i)); + error->gtier[i] = intel_uncore_read(uncore, + GEN8_GT_IER(i)); error->ngtier = 4; - } else if (HAS_PCH_SPLIT(dev_priv)) { - error->ier = I915_READ(DEIER); - error->gtier[0] = I915_READ(GTIER); + } else if (HAS_PCH_SPLIT(i915)) { + error->ier = intel_uncore_read(uncore, DEIER); + error->gtier[0] = intel_uncore_read(uncore, GTIER); error->ngtier = 1; - } else if (IS_GEN(dev_priv, 2)) { - error->ier = I915_READ16(GEN2_IER); - } else if (!IS_VALLEYVIEW(dev_priv)) { - error->ier = I915_READ(GEN2_IER); + } else if (IS_GEN(i915, 2)) { + error->ier = intel_uncore_read16(uncore, GEN2_IER); + } else if (!IS_VALLEYVIEW(i915)) { + error->ier = intel_uncore_read(uncore, GEN2_IER); } - error->eir = I915_READ(EIR); - error->pgtbl_er = I915_READ(PGTBL_ER); + error->eir = intel_uncore_read(uncore, EIR); + error->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER); } static const char * diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9db9fbd0e70c..cd9edddd6718 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1232,20 +1232,23 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc) static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) { + struct intel_uncore *uncore = &dev_priv->uncore; u32 busy_up, busy_down, max_avg, min_avg; u8 new_delay; spin_lock(&mchdev_lock); - I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); + intel_uncore_write16(uncore, + MEMINTRSTS, + intel_uncore_read(uncore, MEMINTRSTS)); new_delay = dev_priv->ips.cur_delay; - I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); - busy_up = I915_READ(RCPREVBSYTUPAVG); - busy_down = I915_READ(RCPREVBSYTDNAVG); - max_avg = I915_READ(RCBMAXAVG); - min_avg = I915_READ(RCBMINAVG); + intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); + busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG); + busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG); + max_avg = intel_uncore_read(uncore, RCBMAXAVG); + min_avg = intel_uncore_read(uncore, RCBMINAVG); /* Handle RCS change request from hw */ if (busy_up > max_avg) { @@ -4324,8 +4327,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev) struct intel_uncore *uncore = &dev_priv->uncore; u16 enable_mask; - I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | - I915_ERROR_MEMORY_REFRESH)); + intel_uncore_write16(uncore, + EMR, + ~(I915_ERROR_PAGE_TABLE | + I915_ERROR_MEMORY_REFRESH)); /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = @@ -4351,17 +4356,18 @@ static int i8xx_irq_postinstall(struct drm_device *dev) return 0; } -static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv, +static void i8xx_error_irq_ack(struct drm_i915_private *i915, u16 *eir, u16 *eir_stuck) { + struct intel_uncore *uncore = &i915->uncore; u16 emr; - *eir = I915_READ16(EIR); + *eir = intel_uncore_read16(uncore, EIR); if (*eir) - I915_WRITE16(EIR, *eir); + intel_uncore_write16(uncore, EIR, *eir); - *eir_stuck = I915_READ16(EIR); + *eir_stuck = intel_uncore_read16(uncore, EIR); if (*eir_stuck == 0) return; @@ -4375,9 +4381,9 @@ static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv, * (or by a GPU reset) so we mask any bit that * remains set. */ - emr = I915_READ16(EMR); - I915_WRITE16(EMR, 0xffff); - I915_WRITE16(EMR, emr | *eir_stuck); + emr = intel_uncore_read16(uncore, EMR); + intel_uncore_write16(uncore, EMR, 0xffff); + intel_uncore_write16(uncore, EMR, emr | *eir_stuck); } static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, @@ -4443,7 +4449,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) u16 eir = 0, eir_stuck = 0; u16 iir; - iir = I915_READ16(GEN2_IIR); + iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR); if (iir == 0) break; @@ -4456,7 +4462,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) if (iir & I915_MASTER_ERROR_INTERRUPT) i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); - I915_WRITE16(GEN2_IIR, iir); + intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT) intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 5a6679e2b6ee..2c7f3ebc0117 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -191,8 +191,8 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv) { u16 ddrpll, csipll; - ddrpll = I915_READ16(DDRMPLL1); - csipll = I915_READ16(CSIPLL0); + ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1); + csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0); switch (ddrpll & 0xff) { case 0xc: @@ -6432,26 +6432,27 @@ bool ironlake_set_drps(struct drm_i915_private *i915, u8 val) static void ironlake_enable_drps(struct drm_i915_private *dev_priv) { + struct intel_uncore *uncore = &dev_priv->uncore; u32 rgvmodectl; u8 fmax, fmin, fstart, vstart; spin_lock_irq(&mchdev_lock); - rgvmodectl = I915_READ(MEMMODECTL); + rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); /* Enable temp reporting */ - I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); - I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); + intel_uncore_write16(uncore, PMMISC, I915_READ(PMMISC) | MCPPCE_EN); + intel_uncore_write16(uncore, TSC1, I915_READ(TSC1) | TSE); /* 100ms RC evaluation intervals */ - I915_WRITE(RCUPEI, 100000); - I915_WRITE(RCDNEI, 100000); + intel_uncore_write(uncore, RCUPEI, 100000); + intel_uncore_write(uncore, RCDNEI, 100000); /* Set max/min thresholds to 90ms and 80ms respectively */ - I915_WRITE(RCBMAXAVG, 90000); - I915_WRITE(RCBMINAVG, 80000); + intel_uncore_write(uncore, RCBMAXAVG, 90000); + intel_uncore_write(uncore, RCBMINAVG, 80000); - I915_WRITE(MEMIHYST, 1); + intel_uncore_write(uncore, MEMIHYST, 1); /* Set up min, max, and cur for interrupt handling */ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; @@ -6459,8 +6460,8 @@ static void ironlake_enable_drps(struct drm_i915_private *dev_priv) fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT; - vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >> - PXVFREQ_PX_SHIFT; + vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) & + PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ dev_priv->ips.fstart = fstart; @@ -6472,53 +6473,66 @@ static void ironlake_enable_drps(struct drm_i915_private *dev_priv) DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, fstart); - I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); + intel_uncore_write(uncore, + MEMINTREN, + MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); /* * Interrupts will be enabled in ironlake_irq_postinstall */ - I915_WRITE(VIDSTART, vstart); - POSTING_READ(VIDSTART); + intel_uncore_write(uncore, VIDSTART, vstart); + intel_uncore_posting_read(uncore, VIDSTART); rgvmodectl |= MEMMODE_SWMODE_EN; - I915_WRITE(MEMMODECTL, rgvmodectl); + intel_uncore_write(uncore, MEMMODECTL, rgvmodectl); - if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) + if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) & + MEMCTL_CMD_STS) == 0, 10)) DRM_ERROR("stuck trying to change perf mode\n"); mdelay(1); ironlake_set_drps(dev_priv, fstart); - dev_priv->ips.last_count1 = I915_READ(DMIEC) + - I915_READ(DDREC) + I915_READ(CSIEC); + dev_priv->ips.last_count1 = + intel_uncore_read(uncore, DMIEC) + + intel_uncore_read(uncore, DDREC) + + intel_uncore_read(uncore, CSIEC); dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); - dev_priv->ips.last_count2 = I915_READ(GFXEC); + dev_priv->ips.last_count2 = intel_uncore_read(uncore, GFXEC); dev_priv->ips.last_time2 = ktime_get_raw_ns(); spin_unlock_irq(&mchdev_lock); } -static void ironlake_disable_drps(struct drm_i915_private *dev_priv) +static void ironlake_disable_drps(struct drm_i915_private *i915) { + struct intel_uncore *uncore = &i915->uncore; u16 rgvswctl; spin_lock_irq(&mchdev_lock); - rgvswctl = I915_READ16(MEMSWCTL); + rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); /* Ack interrupts, disable EFC interrupt */ - I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); - I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); - I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); - I915_WRITE(DEIIR, DE_PCU_EVENT); - I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); + intel_uncore_write(uncore, + MEMINTREN, + intel_uncore_read(uncore, MEMINTREN) & + ~MEMINT_EVAL_CHG_EN); + intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); + intel_uncore_write(uncore, + DEIER, + intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT); + intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT); + intel_uncore_write(uncore, + DEIMR, + intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT); /* Go back to the starting frequency */ - ironlake_set_drps(dev_priv, dev_priv->ips.fstart); + ironlake_set_drps(i915, i915->ips.fstart); mdelay(1); rgvswctl |= MEMCTL_CMD_STS; - I915_WRITE(MEMSWCTL, rgvswctl); + intel_uncore_write(uncore, MEMSWCTL, rgvswctl); mdelay(1); spin_unlock_irq(&mchdev_lock); @@ -9504,16 +9518,21 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv) static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv) { - I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); - I915_WRITE(RENCLK_GATE_D2, 0); - I915_WRITE(DSPCLK_GATE_D, 0); - I915_WRITE(RAMCLK_GATE_D, 0); - I915_WRITE16(DEUC, 0); - I915_WRITE(MI_ARB_STATE, - _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); + struct intel_uncore *uncore = &dev_priv->uncore; + + intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); + intel_uncore_write(uncore, RENCLK_GATE_D2, 0); + intel_uncore_write(uncore, DSPCLK_GATE_D, 0); + intel_uncore_write(uncore, RAMCLK_GATE_D, 0); + intel_uncore_write16(uncore, DEUC, 0); + intel_uncore_write(uncore, + MI_ARB_STATE, + _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); /* WaDisable_RenderCache_OperationalFlush:gen4 */ - I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); + intel_uncore_write(uncore, + CACHE_MODE_0, + _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); } static void i965g_init_clock_gating(struct drm_i915_private *dev_priv) -- cgit v1.2.3 From 70972f51819a22e7708094af930346bb7275f06a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 12 Jun 2019 16:13:11 +0100 Subject: drm/i915: kerneldoc warnings squelched MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/gpu/drm/i915//gem/i915_gem_shrinker.c:142: warning: Function parameter or member 'shrink' not described in 'i915_gem_shrink' drivers/gpu/drm/i915//gem/i915_gem_shrinker.c:142: warning: Excess function parameter 'flags' description in 'i915_gem_shrink' drivers/gpu/drm/i915//intel_display.c:13443: warning: Function parameter or member '_state' not described in 'intel_atomic_check' drivers/gpu/drm/i915//intel_display.c:13443: warning: Excess function parameter 'state' description in 'intel_atomic_check' Signed-off-by: Chris Wilson Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190612151311.30295-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 1b93bc334630..e15f37bef36a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -114,7 +114,7 @@ static void try_to_writeback(struct drm_i915_gem_object *obj, * @i915: i915 device * @target: amount of memory to make available, in pages * @nr_scanned: optional output for number of pages scanned (incremental) - * @flags: control flags for selecting cache types + * @shrink: control flags for selecting cache types * * This function is the main interface to the shrinker. It will try to release * up to @target pages of main memory backing storage from buffer objects. diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1b1ddb48ca7a..e681ed99cdf2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13436,7 +13436,7 @@ static int calc_watermark_data(struct intel_atomic_state *state) /** * intel_atomic_check - validate state object * @dev: drm device - * @state: state to validate + * @_state: state to validate */ static int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *_state) -- cgit v1.2.3 From 0cf289bd5de3f26d28781d81650e5bf022702a7e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 13 Jun 2019 08:32:54 +0100 Subject: drm/i915: Move fence register tracking from i915->mm to ggtt As the fence registers only apply to regions inside the GGTT is makes more sense that we track these as part of the i915_ggtt and not the general mm. In the next patch, we will then pull the register locking underneath the i915_ggtt.mutex. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190613073254.24048-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 2 +- drivers/gpu/drm/i915/gt/intel_reset.c | 6 +-- drivers/gpu/drm/i915/gvt/aperture_gm.c | 7 +-- drivers/gpu/drm/i915/gvt/gvt.h | 4 +- drivers/gpu/drm/i915/i915_debugfs.c | 42 ++++++++------- drivers/gpu/drm/i915/i915_drv.c | 3 +- drivers/gpu/drm/i915/i915_drv.h | 28 ---------- drivers/gpu/drm/i915/i915_gem.c | 52 ++++-------------- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 90 +++++++++++++++++++++---------- drivers/gpu/drm/i915/i915_gem_fence_reg.h | 19 ++++++- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 + drivers/gpu/drm/i915/i915_gem_gtt.h | 14 ++++- drivers/gpu/drm/i915/i915_gpu_error.c | 6 +-- drivers/gpu/drm/i915/i915_vma.h | 2 +- 15 files changed, 144 insertions(+), 137 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index c7b9b34de01b..a8b8b9c281f1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -310,9 +310,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) /* Mark as being mmapped into userspace for later revocation */ assert_rpm_wakelock_held(i915); if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) - list_add(&obj->userfault_link, &i915->mm.userfault_list); + list_add(&obj->userfault_link, &i915->ggtt.userfault_list); if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) - intel_wakeref_auto(&i915->mm.userfault_wakeref, + intel_wakeref_auto(&i915->ggtt.userfault_wakeref, msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); GEM_BUG_ON(!obj->userfault_count); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index f68c0ad1aa47..6e75702c5671 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -126,7 +126,7 @@ void i915_gem_suspend(struct drm_i915_private *i915) { GEM_TRACE("\n"); - intel_wakeref_auto(&i915->mm.userfault_wakeref, 0); + intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0); flush_workqueue(i915->wq); mutex_lock(&i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 6368b37f26d1..8ba7af8b7ced 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -695,19 +695,19 @@ static void revoke_mmaps(struct drm_i915_private *i915) { int i; - for (i = 0; i < i915->num_fence_regs; i++) { + for (i = 0; i < i915->ggtt.num_fences; i++) { struct drm_vma_offset_node *node; struct i915_vma *vma; u64 vma_offset; - vma = READ_ONCE(i915->fence_regs[i].vma); + vma = READ_ONCE(i915->ggtt.fence_regs[i].vma); if (!vma) continue; if (!i915_vma_has_userfault(vma)) continue; - GEM_BUG_ON(vma->fence != &i915->fence_regs[i]); + GEM_BUG_ON(vma->fence != &i915->ggtt.fence_regs[i]); node = &vma->obj->base.vma_node; vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; unmap_mapping_range(i915->drm.anon_inode->i_mapping, diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 1fa2f65c3cd1..4098902bfaeb 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -35,6 +35,7 @@ */ #include "i915_drv.h" +#include "i915_gem_fence_reg.h" #include "gvt.h" static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) @@ -128,7 +129,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->dev_priv; - struct drm_i915_fence_reg *reg; + struct i915_fence_reg *reg; i915_reg_t fence_reg_lo, fence_reg_hi; assert_rpm_wakelock_held(dev_priv); @@ -163,7 +164,7 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->dev_priv; - struct drm_i915_fence_reg *reg; + struct i915_fence_reg *reg; u32 i; if (WARN_ON(!vgpu_fence_sz(vgpu))) @@ -187,7 +188,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->dev_priv; - struct drm_i915_fence_reg *reg; + struct i915_fence_reg *reg; int i; intel_runtime_pm_get(dev_priv); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index b54f2bdc13a4..dfd10cf82b65 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -87,7 +87,7 @@ struct intel_vgpu_gm { /* Fences owned by a vGPU */ struct intel_vgpu_fence { - struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES]; + struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES]; u32 base; u32 size; }; @@ -390,7 +390,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \ + gvt_hidden_sz(gvt) - 1) -#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs) +#define gvt_fence_sz(gvt) ((gvt)->dev_priv->ggtt.num_fences) /* Aperture/GM space definitions for vGPU */ #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 026fb46020f6..323863504111 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -143,8 +143,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) unsigned int frontbuffer_bits; int pin_count = 0; - lockdep_assert_held(&obj->base.dev->struct_mutex); - seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s", &obj->base, get_active_flag(obj), @@ -160,17 +158,17 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) seq_printf(m, " (name: %d)", obj->base.name); - list_for_each_entry(vma, &obj->vma.list, obj_link) { - if (i915_vma_is_pinned(vma)) - pin_count++; - } - seq_printf(m, " (pinned x %d)", pin_count); - if (obj->pin_global) - seq_printf(m, " (global)"); + + spin_lock(&obj->vma.lock); list_for_each_entry(vma, &obj->vma.list, obj_link) { if (!drm_mm_node_allocated(&vma->node)) continue; + spin_unlock(&obj->vma.lock); + + if (i915_vma_is_pinned(vma)) + pin_count++; + seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s", i915_vma_is_ggtt(vma) ? "g" : "pp", vma->node.start, vma->node.size, @@ -221,9 +219,16 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) vma->fence->id, i915_active_request_isset(&vma->last_fence) ? "*" : ""); seq_puts(m, ")"); + + spin_lock(&obj->vma.lock); } + spin_unlock(&obj->vma.lock); + + seq_printf(m, " (pinned x %d)", pin_count); if (obj->stolen) seq_printf(m, " (stolen: %08llx)", obj->stolen->start); + if (obj->pin_global) + seq_printf(m, " (global)"); engine = i915_gem_object_last_write_engine(obj); if (engine) @@ -698,28 +703,25 @@ static int i915_interrupt_info(struct seq_file *m, void *data) static int i915_gem_fence_regs_info(struct seq_file *m, void *data) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - int i, ret; + struct drm_i915_private *i915 = node_to_i915(m->private); + unsigned int i; - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; + seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences); - seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); - for (i = 0; i < dev_priv->num_fence_regs; i++) { - struct i915_vma *vma = dev_priv->fence_regs[i].vma; + rcu_read_lock(); + for (i = 0; i < i915->ggtt.num_fences; i++) { + struct i915_vma *vma = i915->ggtt.fence_regs[i].vma; seq_printf(m, "Fence %d, pin count = %d, object = ", - i, dev_priv->fence_regs[i].pin_count); + i, i915->ggtt.fence_regs[i].pin_count); if (!vma) seq_puts(m, "unused"); else describe_obj(m, vma->obj); seq_putc(m, '\n'); } + rcu_read_unlock(); - mutex_unlock(&dev->struct_mutex); return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 81ff2c78fd55..254f7b7df306 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -350,7 +350,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = pdev->revision; break; case I915_PARAM_NUM_FENCES_AVAIL: - value = dev_priv->num_fence_regs; + value = dev_priv->ggtt.num_fences; break; case I915_PARAM_HAS_OVERLAY: value = dev_priv->overlay ? 1 : 0; @@ -1625,7 +1625,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) intel_uncore_sanitize(dev_priv); intel_gt_init_workarounds(dev_priv); - i915_gem_load_init_fences(dev_priv); /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index eb98d285d290..90d94d904e65 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -757,14 +757,6 @@ struct i915_gem_mm { */ struct list_head shrink_list; - /** List of all objects in gtt_space, currently mmaped by userspace. - * All objects within this list must also be on bound_list. - */ - struct list_head userfault_list; - - /* Manual runtime pm autosuspend delay for user GGTT mmaps */ - struct intel_wakeref_auto userfault_wakeref; - /** * List of objects which are pending destruction. */ @@ -794,9 +786,6 @@ struct i915_gem_mm { struct notifier_block vmap_notifier; struct shrinker shrinker; - /** LRU list of objects with fence regs on them. */ - struct list_head fence_list; - /** * Workqueue to fault in userptr pages, flushed by the execbuf * when required but otherwise left to userspace to try again @@ -1485,9 +1474,6 @@ struct drm_i915_private { /* protects panel power sequencer state */ struct mutex pps_mutex; - struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ - int num_fence_regs; /* 8 on pre-965, 16 otherwise */ - unsigned int fsb_freq, mem_freq, is_ddr3; unsigned int skl_preferred_vco_freq; unsigned int max_cdclk_freq; @@ -2541,7 +2527,6 @@ void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); void i915_gem_sanitize(struct drm_i915_private *i915); int i915_gem_init_early(struct drm_i915_private *dev_priv); void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); -void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); int i915_gem_freeze(struct drm_i915_private *dev_priv); int i915_gem_freeze_late(struct drm_i915_private *dev_priv); @@ -2661,19 +2646,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags); -/* i915_gem_fence_reg.c */ -struct drm_i915_fence_reg * -i915_reserve_fence(struct drm_i915_private *dev_priv); -void i915_unreserve_fence(struct drm_i915_fence_reg *fence); - -void i915_gem_restore_fences(struct drm_i915_private *dev_priv); - -void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv); -void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, - struct sg_table *pages); -void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, - struct sg_table *pages); - static inline struct i915_gem_context * __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4017ecf561f6..4bbded4aa936 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -884,7 +884,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, return 0; } -void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) +void i915_gem_runtime_suspend(struct drm_i915_private *i915) { struct drm_i915_gem_object *obj, *on; int i; @@ -897,17 +897,19 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) */ list_for_each_entry_safe(obj, on, - &dev_priv->mm.userfault_list, userfault_link) + &i915->ggtt.userfault_list, userfault_link) __i915_gem_object_release_mmap(obj); - /* The fence will be lost when the device powers down. If any were + /* + * The fence will be lost when the device powers down. If any were * in use by hardware (i.e. they are pinned), we should not be powering * down! All other fences will be reacquired by the user upon waking. */ - for (i = 0; i < dev_priv->num_fence_regs; i++) { - struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; + for (i = 0; i < i915->ggtt.num_fences; i++) { + struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i]; - /* Ideally we want to assert that the fence register is not + /* + * Ideally we want to assert that the fence register is not * live at this point (i.e. that no piece of code will be * trying to write through fence + GTT, as that both violates * our tracking of activity and associated locking/barriers, @@ -1687,7 +1689,7 @@ void i915_gem_fini_hw(struct drm_i915_private *dev_priv) { GEM_BUG_ON(dev_priv->gt.awake); - intel_wakeref_auto_fini(&dev_priv->mm.userfault_wakeref); + intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref); i915_gem_suspend_late(dev_priv); intel_disable_gt_powersave(dev_priv); @@ -1729,38 +1731,6 @@ void i915_gem_init_mmio(struct drm_i915_private *i915) i915_gem_sanitize(i915); } -void -i915_gem_load_init_fences(struct drm_i915_private *dev_priv) -{ - int i; - - if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) && - !IS_CHERRYVIEW(dev_priv)) - dev_priv->num_fence_regs = 32; - else if (INTEL_GEN(dev_priv) >= 4 || - IS_I945G(dev_priv) || IS_I945GM(dev_priv) || - IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) - dev_priv->num_fence_regs = 16; - else - dev_priv->num_fence_regs = 8; - - if (intel_vgpu_active(dev_priv)) - dev_priv->num_fence_regs = - I915_READ(vgtif_reg(avail_rs.fence_num)); - - /* Initialize fence registers to zero */ - for (i = 0; i < dev_priv->num_fence_regs; i++) { - struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; - - fence->i915 = dev_priv; - fence->id = i; - list_add_tail(&fence->link, &dev_priv->mm.fence_list); - } - i915_gem_restore_fences(dev_priv); - - i915_gem_detect_bit_6_swizzle(dev_priv); -} - static void i915_gem_init__mm(struct drm_i915_private *i915) { spin_lock_init(&i915->mm.obj_lock); @@ -1770,10 +1740,6 @@ static void i915_gem_init__mm(struct drm_i915_private *i915) INIT_LIST_HEAD(&i915->mm.purge_list); INIT_LIST_HEAD(&i915->mm.shrink_list); - INIT_LIST_HEAD(&i915->mm.fence_list); - - INIT_LIST_HEAD(&i915->mm.userfault_list); - intel_wakeref_auto_init(&i915->mm.userfault_wakeref, i915); i915_gem_init__objects(i915); } diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 10aa6e350bfa..1c9466676caf 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -25,6 +25,7 @@ #include "i915_drv.h" #include "i915_scatterlist.h" +#include "i915_vgpu.h" /** * DOC: fence register handling @@ -58,7 +59,7 @@ #define pipelined 0 -static void i965_write_fence_reg(struct drm_i915_fence_reg *fence, +static void i965_write_fence_reg(struct i915_fence_reg *fence, struct i915_vma *vma) { i915_reg_t fence_reg_lo, fence_reg_hi; @@ -115,7 +116,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence, } } -static void i915_write_fence_reg(struct drm_i915_fence_reg *fence, +static void i915_write_fence_reg(struct i915_fence_reg *fence, struct i915_vma *vma) { u32 val; @@ -155,7 +156,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *fence, } } -static void i830_write_fence_reg(struct drm_i915_fence_reg *fence, +static void i830_write_fence_reg(struct i915_fence_reg *fence, struct i915_vma *vma) { u32 val; @@ -187,7 +188,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *fence, } } -static void fence_write(struct drm_i915_fence_reg *fence, +static void fence_write(struct i915_fence_reg *fence, struct i915_vma *vma) { /* @@ -211,7 +212,7 @@ static void fence_write(struct drm_i915_fence_reg *fence, fence->dirty = false; } -static int fence_update(struct drm_i915_fence_reg *fence, +static int fence_update(struct i915_fence_reg *fence, struct i915_vma *vma) { intel_wakeref_t wakeref; @@ -256,7 +257,7 @@ static int fence_update(struct drm_i915_fence_reg *fence, old->fence = NULL; } - list_move(&fence->link, &fence->i915->mm.fence_list); + list_move(&fence->link, &fence->i915->ggtt.fence_list); } /* @@ -280,7 +281,7 @@ static int fence_update(struct drm_i915_fence_reg *fence, if (vma) { vma->fence = fence; - list_move_tail(&fence->link, &fence->i915->mm.fence_list); + list_move_tail(&fence->link, &fence->i915->ggtt.fence_list); } intel_runtime_pm_put(fence->i915, wakeref); @@ -300,7 +301,7 @@ static int fence_update(struct drm_i915_fence_reg *fence, */ int i915_vma_put_fence(struct i915_vma *vma) { - struct drm_i915_fence_reg *fence = vma->fence; + struct i915_fence_reg *fence = vma->fence; if (!fence) return 0; @@ -311,11 +312,11 @@ int i915_vma_put_fence(struct i915_vma *vma) return fence_update(fence, NULL); } -static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *i915) +static struct i915_fence_reg *fence_find(struct drm_i915_private *i915) { - struct drm_i915_fence_reg *fence; + struct i915_fence_reg *fence; - list_for_each_entry(fence, &i915->mm.fence_list, link) { + list_for_each_entry(fence, &i915->ggtt.fence_list, link) { GEM_BUG_ON(fence->vma && fence->vma->fence != fence); if (fence->pin_count) @@ -349,10 +350,9 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *i915) * * 0 on success, negative error code on failure. */ -int -i915_vma_pin_fence(struct i915_vma *vma) +int i915_vma_pin_fence(struct i915_vma *vma) { - struct drm_i915_fence_reg *fence; + struct i915_fence_reg *fence; struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL; int err; @@ -369,7 +369,7 @@ i915_vma_pin_fence(struct i915_vma *vma) fence->pin_count++; if (!fence->dirty) { list_move_tail(&fence->link, - &fence->i915->mm.fence_list); + &fence->i915->ggtt.fence_list); return 0; } } else if (set) { @@ -404,10 +404,9 @@ out_unpin: * This function walks the fence regs looking for a free one and remove * it from the fence_list. It is used to reserve fence for vGPU to use. */ -struct drm_i915_fence_reg * -i915_reserve_fence(struct drm_i915_private *i915) +struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915) { - struct drm_i915_fence_reg *fence; + struct i915_fence_reg *fence; int count; int ret; @@ -415,7 +414,7 @@ i915_reserve_fence(struct drm_i915_private *i915) /* Keep at least one fence available for the display engine. */ count = 0; - list_for_each_entry(fence, &i915->mm.fence_list, link) + list_for_each_entry(fence, &i915->ggtt.fence_list, link) count += !fence->pin_count; if (count <= 1) return ERR_PTR(-ENOSPC); @@ -441,11 +440,11 @@ i915_reserve_fence(struct drm_i915_private *i915) * * This function add a reserved fence register from vGPU to the fence_list. */ -void i915_unreserve_fence(struct drm_i915_fence_reg *fence) +void i915_unreserve_fence(struct i915_fence_reg *fence) { lockdep_assert_held(&fence->i915->drm.struct_mutex); - list_add(&fence->link, &fence->i915->mm.fence_list); + list_add(&fence->link, &fence->i915->ggtt.fence_list); } /** @@ -461,8 +460,8 @@ void i915_gem_restore_fences(struct drm_i915_private *i915) int i; rcu_read_lock(); /* keep obj alive as we dereference */ - for (i = 0; i < i915->num_fence_regs; i++) { - struct drm_i915_fence_reg *reg = &i915->fence_regs[i]; + for (i = 0; i < i915->ggtt.num_fences; i++) { + struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i]; struct i915_vma *vma = READ_ONCE(reg->vma); GEM_BUG_ON(vma && vma->fence != reg); @@ -534,8 +533,7 @@ void i915_gem_restore_fences(struct drm_i915_private *i915) * Detects bit 6 swizzling of address lookup between IGD access and CPU * access through main memory. */ -void -i915_gem_detect_bit_6_swizzle(struct drm_i915_private *i915) +static void detect_bit_6_swizzle(struct drm_i915_private *i915) { struct intel_uncore *uncore = &i915->uncore; u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; @@ -708,8 +706,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *i915) * bit 17 of its physical address and therefore being interpreted differently * by the GPU. */ -static void -i915_gem_swizzle_page(struct page *page) +static void i915_gem_swizzle_page(struct page *page) { char temp[64]; char *vaddr; @@ -798,3 +795,42 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, i++; } } + +void i915_ggtt_init_fences(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + int num_fences; + int i; + + INIT_LIST_HEAD(&ggtt->fence_list); + INIT_LIST_HEAD(&ggtt->userfault_list); + intel_wakeref_auto_init(&ggtt->userfault_wakeref, i915); + + detect_bit_6_swizzle(i915); + + if (INTEL_GEN(i915) >= 7 && + !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))) + num_fences = 32; + else if (INTEL_GEN(i915) >= 4 || + IS_I945G(i915) || IS_I945GM(i915) || + IS_G33(i915) || IS_PINEVIEW(i915)) + num_fences = 16; + else + num_fences = 8; + + if (intel_vgpu_active(i915)) + num_fences = intel_uncore_read(&i915->uncore, + vgtif_reg(avail_rs.fence_num)); + + /* Initialize fence registers to zero */ + for (i = 0; i < num_fences; i++) { + struct i915_fence_reg *fence = &ggtt->fence_regs[i]; + + fence->i915 = i915; + fence->id = i; + list_add_tail(&fence->link, &ggtt->fence_list); + } + ggtt->num_fences = num_fences; + + i915_gem_restore_fences(i915); +} diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h index 09dcaf14121b..d2da98828179 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h @@ -26,13 +26,17 @@ #define __I915_FENCE_REG_H__ #include +#include +struct drm_i915_gem_object; struct drm_i915_private; +struct i915_ggtt; struct i915_vma; +struct sg_table; #define I965_FENCE_PAGE 4096UL -struct drm_i915_fence_reg { +struct i915_fence_reg { struct list_head link; struct drm_i915_private *i915; struct i915_vma *vma; @@ -49,4 +53,17 @@ struct drm_i915_fence_reg { bool dirty; }; +/* i915_gem_fence_reg.c */ +struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915); +void i915_unreserve_fence(struct i915_fence_reg *fence); + +void i915_gem_restore_fences(struct drm_i915_private *i915); + +void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, + struct sg_table *pages); +void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, + struct sg_table *pages); + +void i915_ggtt_init_fences(struct i915_ggtt *ggtt); + #endif diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e70675bfb51d..7be72388b052 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3567,6 +3567,8 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end); + i915_ggtt_init_fences(ggtt); + /* * Initialise stolen early so that we may reserve preallocated * objects for the BIOS to KMS transition. diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 89437d0a721c..63fa357c69de 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -39,6 +39,7 @@ #include #include "gt/intel_reset.h" +#include "i915_gem_fence_reg.h" #include "i915_request.h" #include "i915_scatterlist.h" #include "i915_selftest.h" @@ -61,7 +62,6 @@ #define I915_MAX_NUM_FENCE_BITS 6 struct drm_i915_file_private; -struct drm_i915_fence_reg; struct drm_i915_gem_object; struct i915_vma; @@ -408,6 +408,18 @@ struct i915_ggtt { u32 pin_bias; + unsigned int num_fences; + struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; + struct list_head fence_list; + + /** List of all objects in gtt_space, currently mmaped by userspace. + * All objects within this list must also be on bound_list. + */ + struct list_head userfault_list; + + /* Manual runtime pm autosuspend delay for user GGTT mmaps */ + struct intel_wakeref_auto userfault_wakeref; + struct drm_mm_node error_capture; struct drm_mm_node uc_fw; }; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index d7f164c9f2eb..26c9c0595bdf 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1127,17 +1127,17 @@ static void gem_record_fences(struct i915_gpu_state *error) int i; if (INTEL_GEN(dev_priv) >= 6) { - for (i = 0; i < dev_priv->num_fence_regs; i++) + for (i = 0; i < dev_priv->ggtt.num_fences; i++) error->fence[i] = intel_uncore_read64(uncore, FENCE_REG_GEN6_LO(i)); } else if (INTEL_GEN(dev_priv) >= 4) { - for (i = 0; i < dev_priv->num_fence_regs; i++) + for (i = 0; i < dev_priv->ggtt.num_fences; i++) error->fence[i] = intel_uncore_read64(uncore, FENCE_REG_965_LO(i)); } else { - for (i = 0; i < dev_priv->num_fence_regs; i++) + for (i = 0; i < dev_priv->ggtt.num_fences; i++) error->fence[i] = intel_uncore_read(uncore, FENCE_REG(i)); } diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 0c57ab4fed5d..4b769db649bf 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -54,7 +54,7 @@ struct i915_vma { struct drm_i915_gem_object *obj; struct i915_address_space *vm; const struct i915_vma_ops *ops; - struct drm_i915_fence_reg *fence; + struct i915_fence_reg *fence; struct reservation_object *resv; /** Alias of obj->resv */ struct sg_table *pages; void __iomem *iomap; -- cgit v1.2.3 From 5e3fb2a5b528bbb6090bea3f345eb72d8c8b808d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 13 Jun 2019 13:28:42 +0100 Subject: drm/i915: Enable refcount debugging for default debug levels refcount_t is our first line of defence against use-after-free, so let's enable it for debugging. Signed-off-by: Chris Wilson Acked-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190613122842.4840-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Kconfig.debug | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 09aa0f4c8bf1..8d922bb4d953 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -21,6 +21,7 @@ config DRM_I915_DEBUG depends on DRM_I915 select DEBUG_FS select PREEMPT_COUNT + select REFCOUNT_FULL select I2C_CHARDEV select STACKDEPOT select DRM_DP_AUX_CHARDEV -- cgit v1.2.3 From a09507682e45d62e64c93cdb810de6a280424f17 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 14 Jun 2019 12:10:52 +0100 Subject: drm/i915: Discard some redundant cache domain flushes Since commit a679f58d0510 ("drm/i915: Flush pages on acquisition"), we flush objects on acquire their pages and as such when we create an object for the purpose of writing into it, we do not need to manually flush. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190614111053.25615-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c | 6 ------ drivers/gpu/drm/i915/gt/selftest_workarounds.c | 6 ------ drivers/gpu/drm/i915/intel_guc_log.c | 11 +---------- drivers/gpu/drm/i915/intel_overlay.c | 8 -------- 4 files changed, 1 insertion(+), 30 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 74b0e5871c4b..9e2878a59023 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -209,12 +209,6 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - i915_gem_object_unlock(obj); - if (err) - goto err; - vma = i915_vma_instance(obj, vma->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index c8d335d63f9c..93e9579b8a4f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -368,12 +368,6 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx) if (err) goto err_obj; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_wc_domain(obj, true); - i915_gem_object_unlock(obj); - if (err) - goto err_obj; - return vma; err_obj: diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index 67eadc82c396..4f9c53681f92 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -344,29 +344,20 @@ static void capture_logs_work(struct work_struct *work) static int guc_log_map(struct intel_guc_log *log) { void *vaddr; - int ret; lockdep_assert_held(&log->relay.lock); if (!log->vma) return -ENODEV; - i915_gem_object_lock(log->vma->obj); - ret = i915_gem_object_set_to_wc_domain(log->vma->obj, true); - i915_gem_object_unlock(log->vma->obj); - if (ret) - return ret; - /* * Create a WC (Uncached for read) vmalloc mapping of log * buffer pages, so that we can directly get the data * (up-to-date) from memory. */ vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC); - if (IS_ERR(vaddr)) { - DRM_ERROR("Couldn't map log buffer pages %d\n", ret); + if (IS_ERR(vaddr)) return PTR_ERR(vaddr); - } log->relay.buf_addr = vaddr; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index a2ac06a08715..21339b7f6a3e 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1377,12 +1377,6 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) if (ret) goto out_free; - i915_gem_object_lock(overlay->reg_bo); - ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true); - i915_gem_object_unlock(overlay->reg_bo); - if (ret) - goto out_reg_bo; - memset_io(overlay->regs, 0, sizeof(struct overlay_registers)); update_polyphase_filter(overlay->regs); update_reg_attrs(overlay, overlay->regs); @@ -1391,8 +1385,6 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) DRM_INFO("Initialized overlay support.\n"); return; -out_reg_bo: - i915_gem_object_put(overlay->reg_bo); out_free: kfree(overlay); } -- cgit v1.2.3 From 6e4e9708614aaf743fd9eb29cc2738ab7b501c3f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 14 Jun 2019 12:10:53 +0100 Subject: drm/i915: Execute signal callbacks from no-op i915_request_wait If we enter i915_request_wait() with an already completed request, but unsignaled dma-fence, signal the fence before returning. This allows us to execute any of the signal callbacks at the earliest opportunity. v2: Also signal after busyspin success Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190614111053.25615-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_request.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 1cbc3ef4fc27..5ee1ef92a9d9 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1437,7 +1437,7 @@ long i915_request_wait(struct i915_request *rq, might_sleep(); GEM_BUG_ON(timeout < 0); - if (i915_request_completed(rq)) + if (dma_fence_is_signaled(&rq->fence)) return timeout; if (!timeout) @@ -1470,8 +1470,10 @@ long i915_request_wait(struct i915_request *rq, * duration, which we currently lack. */ if (CONFIG_DRM_I915_SPIN_REQUEST && - __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) + __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) { + dma_fence_signal(&rq->fence); goto out; + } /* * This client is about to stall waiting for the GPU. In many cases -- cgit v1.2.3 From 84383d2e8d7c50dc344a1acf7b5b4d32cd1569fc Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 14 Jun 2019 08:09:46 +0100 Subject: drm/i915: Refine i915_reset.lock_map We already use a mutex to serialise i915_reset() and wedging, so all we need it to link that into i915_request_wait() and we have our lock cycle detection. v2.5: Take error mutex for selftests Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190614071023.17929-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_reset.c | 6 ++---- drivers/gpu/drm/i915/i915_drv.h | 8 -------- drivers/gpu/drm/i915/i915_gem.c | 3 --- drivers/gpu/drm/i915/i915_request.c | 12 ++++++++++-- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 2 -- 5 files changed, 12 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 8ba7af8b7ced..41a294f5cc19 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -978,7 +978,7 @@ void i915_reset(struct drm_i915_private *i915, might_sleep(); GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); - lock_map_acquire(&i915->gt.reset_lockmap); + mutex_lock(&error->wedge_mutex); /* Clear any previous failed attempts at recovery. Time to try again. */ if (!__i915_gem_unset_wedged(i915)) @@ -1031,7 +1031,7 @@ void i915_reset(struct drm_i915_private *i915, finish: reset_finish(i915); unlock: - lock_map_release(&i915->gt.reset_lockmap); + mutex_unlock(&error->wedge_mutex); return; taint: @@ -1147,9 +1147,7 @@ static void i915_reset_device(struct drm_i915_private *i915, /* Flush everyone using a resource about to be clobbered */ synchronize_srcu_expedited(&error->reset_backoff_srcu); - mutex_lock(&error->wedge_mutex); i915_reset(i915, engine_mask, reason); - mutex_unlock(&error->wedge_mutex); intel_finish_reset(i915); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 90d94d904e65..3683ef6d4c28 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1901,14 +1901,6 @@ struct drm_i915_private { ktime_t last_init_time; struct i915_vma *scratch; - - /* - * We must never wait on the GPU while holding a lock as we - * may need to perform a GPU reset. So while we don't need to - * serialise wait/reset with an explicit lock, we do want - * lockdep to detect potential dependency cycles. - */ - struct lockdep_map reset_lockmap; } gt; struct { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4bbded4aa936..7232361973fd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1746,7 +1746,6 @@ static void i915_gem_init__mm(struct drm_i915_private *i915) int i915_gem_init_early(struct drm_i915_private *dev_priv) { - static struct lock_class_key reset_key; int err; intel_gt_pm_init(dev_priv); @@ -1754,8 +1753,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) INIT_LIST_HEAD(&dev_priv->gt.active_rings); INIT_LIST_HEAD(&dev_priv->gt.closed_vma); spin_lock_init(&dev_priv->gt.closed_lock); - lockdep_init_map(&dev_priv->gt.reset_lockmap, - "i915.reset", &reset_key, 0); i915_gem_init__mm(dev_priv); i915_gem_init__pm(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 5ee1ef92a9d9..da76e4d1c7f1 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1444,7 +1444,15 @@ long i915_request_wait(struct i915_request *rq, return -ETIME; trace_i915_request_wait_begin(rq, flags); - lock_map_acquire(&rq->i915->gt.reset_lockmap); + + /* + * We must never wait on the GPU while holding a lock as we + * may need to perform a GPU reset. So while we don't need to + * serialise wait/reset with an explicit lock, we do want + * lockdep to detect potential dependency cycles. + */ + mutex_acquire(&rq->i915->gpu_error.wedge_mutex.dep_map, + 0, 0, _THIS_IP_); /* * Optimistic spin before touching IRQs. @@ -1520,7 +1528,7 @@ long i915_request_wait(struct i915_request *rq, dma_fence_remove_callback(&rq->fence, &wait.cb); out: - lock_map_release(&rq->i915->gt.reset_lockmap); + mutex_release(&rq->i915->gpu_error.wedge_mutex.dep_map, 0, _THIS_IP_); trace_i915_request_wait_end(rq); return timeout; } diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 1e9ffced78c1..b7f3fbb4ae89 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -130,7 +130,6 @@ static struct dev_pm_domain pm_domain = { struct drm_i915_private *mock_gem_device(void) { - static struct lock_class_key reset_key; struct drm_i915_private *i915; struct pci_dev *pdev; int err; @@ -205,7 +204,6 @@ struct drm_i915_private *mock_gem_device(void) INIT_LIST_HEAD(&i915->gt.active_rings); INIT_LIST_HEAD(&i915->gt.closed_vma); spin_lock_init(&i915->gt.closed_lock); - lockdep_init_map(&i915->gt.reset_lockmap, "i915.reset", &reset_key, 0); mutex_lock(&i915->drm.struct_mutex); -- cgit v1.2.3 From cc49abc2460fadcc5fc8ec703538100405a405d4 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Wed, 12 Jun 2019 11:36:31 -0700 Subject: drm/i915: Add Wa_1409120013:icl,ehl This chicken bit should be set before enabling FBC to avoid screen corruption when the plane size has odd vertical and horizontal dimensions. It is safe to leave the bit set even when FBC is disabled. v2: - The bspec's name for this bit on these platforms ("Spare 14") is pretty meaningless. Let's rename the bit definition to something that more accurately reflects what the bit really does. (Clint) v3: - The chicken register was already defined (along with a few other gen9-specific bits) farther down. Just add the new bit definition there. (Clint) Cc: Clinton Taylor Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190612183631.30540-1-matthew.d.roper@intel.com Reviewed-by: Clint Taylor --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_fbc.c | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d0c262367f09..daee496aade6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3163,6 +3163,7 @@ enum i915_power_well_id { #define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) #define ILK_DPFC_CHICKEN _MMIO(0x43224) #define ILK_DPFC_DISABLE_DUMMY0 (1 << 8) +#define ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL (1 << 14) #define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23) #define ILK_FBC_RT_BASE _MMIO(0x2128) #define ILK_FBC_RT_VALID (1 << 0) diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 5679f2fffb7c..d36cada2cc7d 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -344,6 +344,10 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) HSW_FBCQ_DIS); } + if (IS_GEN(dev_priv, 11)) + /* Wa_1409120013:icl,ehl */ + I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); intel_fbc_recompress(dev_priv); -- cgit v1.2.3 From d5b6c275d04aacffc06b5bad837a6805091fb7c6 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Thu, 13 Jun 2019 16:21:49 -0700 Subject: drm/i915: prefer i915_runtime_pm in intel_runtime function As a first step towards updating the code to work on the runtime_pm structure instead of i915, rework all the internals to use and pass around that. v2: add comment for kdev (Jani), move rpm init after pdev init for mock_device Signed-off-by: Daniele Ceraolo Spurio Cc: Jani Nikula Cc: Imre Deak Reviewed-by: Chris Wilson Acked-by: Imre Deak Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190613232156.34940-2-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/intel_drv.h | 10 +- drivers/gpu/drm/i915/intel_runtime_pm.c | 122 +++++++++++------------ drivers/gpu/drm/i915/selftests/mock_gem_device.c | 4 +- 4 files changed, 68 insertions(+), 70 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3683ef6d4c28..0a3cd4bb7c89 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1058,6 +1058,8 @@ struct skl_wm_params { */ struct i915_runtime_pm { atomic_t wakeref_count; + struct device *kdev; /* points to i915->drm.pdev->dev */ + bool available; bool suspended; bool irqs_enabled; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3e337317f77e..aec40adf4876 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1656,13 +1656,17 @@ ____assert_rpm_wakelock_held(struct i915_runtime_pm *rpm, int wakeref_count) } static inline void -assert_rpm_raw_wakeref_held(struct drm_i915_private *i915) +__assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm) { - struct i915_runtime_pm *rpm = &i915->runtime_pm; - ____assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count)); } +static inline void +assert_rpm_raw_wakeref_held(struct drm_i915_private *i915) +{ + __assert_rpm_raw_wakeref_held(&i915->runtime_pm); +} + static inline void __assert_rpm_wakelock_held(struct i915_runtime_pm *rpm) { diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index af3c1ada1b2e..7c602f5c748d 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -75,21 +75,18 @@ static void __print_depot_stack(depot_stack_handle_t stack, stack_trace_snprint(buf, sz, entries, nr_entries, indent); } -static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +static void init_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm) { - struct i915_runtime_pm *rpm = &i915->runtime_pm; - spin_lock_init(&rpm->debug.lock); } static noinline depot_stack_handle_t -track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +track_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm) { - struct i915_runtime_pm *rpm = &i915->runtime_pm; depot_stack_handle_t stack, *stacks; unsigned long flags; - if (!HAS_RUNTIME_PM(i915)) + if (!rpm->available) return -1; stack = __save_depot_stack(); @@ -116,10 +113,9 @@ track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) return stack; } -static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915, +static void untrack_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm, depot_stack_handle_t stack) { - struct i915_runtime_pm *rpm = &i915->runtime_pm; unsigned long flags, n; bool found = false; @@ -237,9 +233,8 @@ dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug) } static noinline void -__intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915) +__intel_wakeref_dec_and_check_tracking(struct i915_runtime_pm *rpm) { - struct i915_runtime_pm *rpm = &i915->runtime_pm; struct intel_runtime_pm_debug dbg = {}; unsigned long flags; @@ -255,9 +250,8 @@ __intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915) } static noinline void -untrack_all_intel_runtime_pm_wakerefs(struct drm_i915_private *i915) +untrack_all_intel_runtime_pm_wakerefs(struct i915_runtime_pm *rpm) { - struct i915_runtime_pm *rpm = &i915->runtime_pm; struct intel_runtime_pm_debug dbg = {}; unsigned long flags; @@ -308,76 +302,70 @@ out: #else -static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +static void init_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm) { } static depot_stack_handle_t -track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) +track_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm) { return -1; } -static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915, +static void untrack_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm, intel_wakeref_t wref) { } static void -__intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915) +__intel_wakeref_dec_and_check_tracking(struct i915_runtime_pm *rpm) { - atomic_dec(&i915->runtime_pm.wakeref_count); + atomic_dec(&rpm->wakeref_count); } static void -untrack_all_intel_runtime_pm_wakerefs(struct drm_i915_private *i915) +untrack_all_intel_runtime_pm_wakerefs(struct i915_runtime_pm *rpm) { } #endif static void -intel_runtime_pm_acquire(struct drm_i915_private *i915, bool wakelock) +intel_runtime_pm_acquire(struct i915_runtime_pm *rpm, bool wakelock) { - struct i915_runtime_pm *rpm = &i915->runtime_pm; - if (wakelock) { atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count); - assert_rpm_wakelock_held(i915); + __assert_rpm_wakelock_held(rpm); } else { atomic_inc(&rpm->wakeref_count); - assert_rpm_raw_wakeref_held(i915); + __assert_rpm_raw_wakeref_held(rpm); } } static void -intel_runtime_pm_release(struct drm_i915_private *i915, int wakelock) +intel_runtime_pm_release(struct i915_runtime_pm *rpm, int wakelock) { - struct i915_runtime_pm *rpm = &i915->runtime_pm; - if (wakelock) { - assert_rpm_wakelock_held(i915); + __assert_rpm_wakelock_held(rpm); atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count); } else { - assert_rpm_raw_wakeref_held(i915); + __assert_rpm_raw_wakeref_held(rpm); } - __intel_wakeref_dec_and_check_tracking(i915); + __intel_wakeref_dec_and_check_tracking(rpm); } -static intel_wakeref_t __intel_runtime_pm_get(struct drm_i915_private *i915, +static intel_wakeref_t __intel_runtime_pm_get(struct i915_runtime_pm *rpm, bool wakelock) { - struct pci_dev *pdev = i915->drm.pdev; - struct device *kdev = &pdev->dev; int ret; - ret = pm_runtime_get_sync(kdev); + ret = pm_runtime_get_sync(rpm->kdev); WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); - intel_runtime_pm_acquire(i915, wakelock); + intel_runtime_pm_acquire(rpm, wakelock); - return track_intel_runtime_pm_wakeref(i915); + return track_intel_runtime_pm_wakeref(rpm); } /** @@ -400,7 +388,7 @@ static intel_wakeref_t __intel_runtime_pm_get(struct drm_i915_private *i915, intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915) { - return __intel_runtime_pm_get(i915, false); + return __intel_runtime_pm_get(&i915->runtime_pm, false); } /** @@ -417,7 +405,7 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915) */ intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915) { - return __intel_runtime_pm_get(i915, true); + return __intel_runtime_pm_get(&i915->runtime_pm, true); } /** @@ -436,23 +424,22 @@ intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915) */ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) { - if (IS_ENABLED(CONFIG_PM)) { - struct pci_dev *pdev = i915->drm.pdev; - struct device *kdev = &pdev->dev; + struct i915_runtime_pm *rpm = &i915->runtime_pm; + if (IS_ENABLED(CONFIG_PM)) { /* * In cases runtime PM is disabled by the RPM core and we get * an -EINVAL return value we are not supposed to call this * function, since the power state is undefined. This applies * atm to the late/early system suspend/resume handlers. */ - if (pm_runtime_get_if_in_use(kdev) <= 0) + if (pm_runtime_get_if_in_use(rpm->kdev) <= 0) return 0; } - intel_runtime_pm_acquire(i915, true); + intel_runtime_pm_acquire(rpm, true); - return track_intel_runtime_pm_wakeref(i915); + return track_intel_runtime_pm_wakeref(rpm); } /** @@ -476,27 +463,25 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) */ intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915) { - struct pci_dev *pdev = i915->drm.pdev; - struct device *kdev = &pdev->dev; + struct i915_runtime_pm *rpm = &i915->runtime_pm; - assert_rpm_wakelock_held(i915); - pm_runtime_get_noresume(kdev); + __assert_rpm_wakelock_held(rpm); + pm_runtime_get_noresume(rpm->kdev); - intel_runtime_pm_acquire(i915, true); + intel_runtime_pm_acquire(rpm, true); - return track_intel_runtime_pm_wakeref(i915); + return track_intel_runtime_pm_wakeref(rpm); } -static void __intel_runtime_pm_put(struct drm_i915_private *i915, +static void __intel_runtime_pm_put(struct i915_runtime_pm *rpm, intel_wakeref_t wref, bool wakelock) { - struct pci_dev *pdev = i915->drm.pdev; - struct device *kdev = &pdev->dev; + struct device *kdev = rpm->kdev; - untrack_intel_runtime_pm_wakeref(i915, wref); + untrack_intel_runtime_pm_wakeref(rpm, wref); - intel_runtime_pm_release(i915, wakelock); + intel_runtime_pm_release(rpm, wakelock); pm_runtime_mark_last_busy(kdev); pm_runtime_put_autosuspend(kdev); @@ -514,7 +499,7 @@ static void __intel_runtime_pm_put(struct drm_i915_private *i915, void intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref) { - __intel_runtime_pm_put(i915, wref, false); + __intel_runtime_pm_put(&i915->runtime_pm, wref, false); } /** @@ -531,7 +516,7 @@ intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref) */ void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915) { - __intel_runtime_pm_put(i915, -1, true); + __intel_runtime_pm_put(&i915->runtime_pm, -1, true); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) @@ -546,7 +531,7 @@ void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915) */ void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) { - __intel_runtime_pm_put(i915, wref, true); + __intel_runtime_pm_put(&i915->runtime_pm, wref, true); } #endif @@ -562,8 +547,8 @@ void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) */ void intel_runtime_pm_enable(struct drm_i915_private *i915) { - struct pci_dev *pdev = i915->drm.pdev; - struct device *kdev = &pdev->dev; + struct i915_runtime_pm *rpm = &i915->runtime_pm; + struct device *kdev = rpm->kdev; /* * Disable the system suspend direct complete optimization, which can @@ -584,7 +569,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *i915) * so the driver's own RPM reference tracking asserts also work on * platforms without RPM support. */ - if (!HAS_RUNTIME_PM(i915)) { + if (!rpm->available) { int ret; pm_runtime_dont_use_autosuspend(kdev); @@ -604,8 +589,8 @@ void intel_runtime_pm_enable(struct drm_i915_private *i915) void intel_runtime_pm_disable(struct drm_i915_private *i915) { - struct pci_dev *pdev = i915->drm.pdev; - struct device *kdev = &pdev->dev; + struct i915_runtime_pm *rpm = &i915->runtime_pm; + struct device *kdev = rpm->kdev; /* Transfer rpm ownership back to core */ WARN(pm_runtime_get_sync(kdev) < 0, @@ -613,7 +598,7 @@ void intel_runtime_pm_disable(struct drm_i915_private *i915) pm_runtime_dont_use_autosuspend(kdev); - if (!HAS_RUNTIME_PM(i915)) + if (!rpm->available) pm_runtime_put(kdev); } @@ -627,10 +612,17 @@ void intel_runtime_pm_cleanup(struct drm_i915_private *i915) intel_rpm_raw_wakeref_count(count), intel_rpm_wakelock_count(count)); - untrack_all_intel_runtime_pm_wakerefs(i915); + untrack_all_intel_runtime_pm_wakerefs(rpm); } void intel_runtime_pm_init_early(struct drm_i915_private *i915) { - init_intel_runtime_pm_wakeref(i915); + struct i915_runtime_pm *rpm = &i915->runtime_pm; + struct pci_dev *pdev = i915->drm.pdev; + struct device *kdev = &pdev->dev; + + rpm->kdev = kdev; + rpm->available = HAS_RUNTIME_PM(i915); + + init_intel_runtime_pm_wakeref(rpm); } diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index b7f3fbb4ae89..68bae6130729 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -152,8 +152,6 @@ struct drm_i915_private *mock_gem_device(void) i915 = (struct drm_i915_private *)(pdev + 1); pci_set_drvdata(pdev, i915); - intel_runtime_pm_init_early(i915); - dev_pm_domain_set(&pdev->dev, &pm_domain); pm_runtime_enable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); @@ -168,6 +166,8 @@ struct drm_i915_private *mock_gem_device(void) i915->drm.pdev = pdev; i915->drm.dev_private = i915; + intel_runtime_pm_init_early(i915); + /* Using the global GTT may ask questions about KMS users, so prepare */ drm_mode_config_init(&i915->drm); -- cgit v1.2.3 From 87b391b9518497ecdda7958c723ccd868afb9630 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Thu, 13 Jun 2019 16:21:50 -0700 Subject: drm/i915: Remove rpm asserts that use i915 Quite a few of the call points have already switched to the version working directly on the runtime_pm structure, so let's switch over the rest and kill the i915-based asserts. v2: rebase Signed-off-by: Daniele Ceraolo Spurio Cc: Imre Deak Reviewed-by: Chris Wilson Acked-by: Imre Deak Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190613232156.34940-3-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 2 +- drivers/gpu/drm/i915/gvt/aperture_gm.c | 2 +- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 2 +- drivers/gpu/drm/i915/i915_irq.c | 6 +++--- drivers/gpu/drm/i915/i915_vma.c | 2 +- drivers/gpu/drm/i915/intel_csr.c | 2 +- drivers/gpu/drm/i915/intel_display_power.c | 4 ++-- drivers/gpu/drm/i915/intel_drv.h | 26 +++++++------------------- drivers/gpu/drm/i915/intel_runtime_pm.c | 10 +++++----- drivers/gpu/drm/i915/intel_uncore.c | 12 ++++++------ drivers/gpu/drm/i915/intel_wakeref.c | 2 +- 11 files changed, 29 insertions(+), 41 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index a8b8b9c281f1..7b5841b73588 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -308,7 +308,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) goto err_fence; /* Mark as being mmapped into userspace for later revocation */ - assert_rpm_wakelock_held(i915); + assert_rpm_wakelock_held(&i915->runtime_pm); if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) list_add(&obj->userfault_link, &i915->ggtt.userfault_list); if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 4098902bfaeb..716622266fa6 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -132,7 +132,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, struct i915_fence_reg *reg; i915_reg_t fence_reg_lo, fence_reg_hi; - assert_rpm_wakelock_held(dev_priv); + assert_rpm_wakelock_held(&dev_priv->runtime_pm); if (WARN_ON(fence >= vgpu_fence_sz(vgpu))) return; diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 1c9466676caf..fc4cf908afc3 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -360,7 +360,7 @@ int i915_vma_pin_fence(struct i915_vma *vma) * Note that we revoke fences on runtime suspend. Therefore the user * must keep the device awake whilst using the fence. */ - assert_rpm_wakelock_held(vma->vm->i915); + assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm); /* Just update our place in the LRU if our fence is getting reused. */ if (vma->fence) { diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index cd9edddd6718..3f08302a14e5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -589,7 +589,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) { - assert_rpm_wakelock_held(dev_priv); + assert_rpm_wakelock_held(&dev_priv->runtime_pm); spin_lock_irq(&dev_priv->irq_lock); gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); @@ -598,7 +598,7 @@ void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) { - assert_rpm_wakelock_held(dev_priv); + assert_rpm_wakelock_held(&dev_priv->runtime_pm); spin_lock_irq(&dev_priv->irq_lock); if (!dev_priv->guc.interrupts.enabled) { @@ -612,7 +612,7 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) { - assert_rpm_wakelock_held(dev_priv); + assert_rpm_wakelock_held(&dev_priv->runtime_pm); spin_lock_irq(&dev_priv->irq_lock); dev_priv->guc.interrupts.enabled = false; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index cb341e4acf99..b295c53085ee 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -364,7 +364,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) int err; /* Access through the GTT requires the device to be awake. */ - assert_rpm_wakelock_held(vma->vm->i915); + assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm); lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index bf0eebd385b9..097153c7f3d4 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -273,7 +273,7 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv) } fw_size = dev_priv->csr.dmc_fw_size; - assert_rpm_wakelock_held(dev_priv); + assert_rpm_wakelock_held(&dev_priv->runtime_pm); preempt_disable(); diff --git a/drivers/gpu/drm/i915/intel_display_power.c b/drivers/gpu/drm/i915/intel_display_power.c index bb9ef1cea5db..3e52453189ef 100644 --- a/drivers/gpu/drm/i915/intel_display_power.c +++ b/drivers/gpu/drm/i915/intel_display_power.c @@ -696,7 +696,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), "DC5 already programmed to be enabled.\n"); - assert_rpm_wakelock_held(dev_priv); + assert_rpm_wakelock_held(&dev_priv->runtime_pm); assert_csr_loaded(dev_priv); } @@ -1814,7 +1814,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) * wakeref to make the state checker happy about the HW access during * power well disabling. */ - assert_rpm_raw_wakeref_held(dev_priv); + assert_rpm_raw_wakeref_held(&dev_priv->runtime_pm); wakeref = intel_runtime_pm_get(dev_priv); for_each_power_domain(domain, mask) { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index aec40adf4876..ac0bd6067864 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1640,7 +1640,7 @@ assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm) } static inline void -____assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm, int wakeref_count) +__assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm, int wakeref_count) { assert_rpm_device_not_suspended(rpm); WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count), @@ -1648,35 +1648,23 @@ ____assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm, int wakeref_count) } static inline void -____assert_rpm_wakelock_held(struct i915_runtime_pm *rpm, int wakeref_count) +__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm, int wakeref_count) { - ____assert_rpm_raw_wakeref_held(rpm, wakeref_count); + __assert_rpm_raw_wakeref_held(rpm, wakeref_count); WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count), "RPM wakelock ref not held during HW access\n"); } static inline void -__assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm) +assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm) { - ____assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count)); + __assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count)); } static inline void -assert_rpm_raw_wakeref_held(struct drm_i915_private *i915) +assert_rpm_wakelock_held(struct i915_runtime_pm *rpm) { - __assert_rpm_raw_wakeref_held(&i915->runtime_pm); -} - -static inline void -__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm) -{ - ____assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count)); -} - -static inline void -assert_rpm_wakelock_held(struct drm_i915_private *i915) -{ - __assert_rpm_wakelock_held(&i915->runtime_pm); + __assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count)); } /** diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 7c602f5c748d..ae60ae1c970e 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -335,10 +335,10 @@ intel_runtime_pm_acquire(struct i915_runtime_pm *rpm, bool wakelock) { if (wakelock) { atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count); - __assert_rpm_wakelock_held(rpm); + assert_rpm_wakelock_held(rpm); } else { atomic_inc(&rpm->wakeref_count); - __assert_rpm_raw_wakeref_held(rpm); + assert_rpm_raw_wakeref_held(rpm); } } @@ -346,10 +346,10 @@ static void intel_runtime_pm_release(struct i915_runtime_pm *rpm, int wakelock) { if (wakelock) { - __assert_rpm_wakelock_held(rpm); + assert_rpm_wakelock_held(rpm); atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count); } else { - __assert_rpm_raw_wakeref_held(rpm); + assert_rpm_raw_wakeref_held(rpm); } __intel_wakeref_dec_and_check_tracking(rpm); @@ -465,7 +465,7 @@ intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915) { struct i915_runtime_pm *rpm = &i915->runtime_pm; - __assert_rpm_wakelock_held(rpm); + assert_rpm_wakelock_held(rpm); pm_runtime_get_noresume(rpm->kdev); intel_runtime_pm_acquire(rpm, true); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 85171a8b866a..e1de51c4d84d 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -583,7 +583,7 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore, if (!uncore->funcs.force_wake_get) return; - __assert_rpm_wakelock_held(uncore->rpm); + assert_rpm_wakelock_held(uncore->rpm); spin_lock_irqsave(&uncore->lock, irqflags); __intel_uncore_forcewake_get(uncore, fw_domains); @@ -737,7 +737,7 @@ void assert_forcewakes_active(struct intel_uncore *uncore, if (!uncore->funcs.force_wake_get) return; - __assert_rpm_wakelock_held(uncore->rpm); + assert_rpm_wakelock_held(uncore->rpm); fw_domains &= uncore->fw_domains; WARN(fw_domains & ~uncore->fw_domains_active, @@ -1054,7 +1054,7 @@ unclaimed_reg_debug(struct intel_uncore *uncore, #define GEN2_READ_HEADER(x) \ u##x val = 0; \ - __assert_rpm_wakelock_held(uncore->rpm); + assert_rpm_wakelock_held(uncore->rpm); #define GEN2_READ_FOOTER \ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ @@ -1096,7 +1096,7 @@ __gen2_read(64) u32 offset = i915_mmio_reg_offset(reg); \ unsigned long irqflags; \ u##x val = 0; \ - __assert_rpm_wakelock_held(uncore->rpm); \ + assert_rpm_wakelock_held(uncore->rpm); \ spin_lock_irqsave(&uncore->lock, irqflags); \ unclaimed_reg_debug(uncore, reg, true, true) @@ -1170,7 +1170,7 @@ __gen6_read(64) #define GEN2_WRITE_HEADER \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ - __assert_rpm_wakelock_held(uncore->rpm); \ + assert_rpm_wakelock_held(uncore->rpm); \ #define GEN2_WRITE_FOOTER @@ -1208,7 +1208,7 @@ __gen2_write(32) u32 offset = i915_mmio_reg_offset(reg); \ unsigned long irqflags; \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ - __assert_rpm_wakelock_held(uncore->rpm); \ + assert_rpm_wakelock_held(uncore->rpm); \ spin_lock_irqsave(&uncore->lock, irqflags); \ unclaimed_reg_debug(uncore, reg, false, true) diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index c25ba1b5e8ba..b6c7167ce154 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -110,7 +110,7 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) } /* Our mission is that we only extend an already active wakeref */ - assert_rpm_wakelock_held(wf->i915); + assert_rpm_wakelock_held(&wf->i915->runtime_pm); if (!refcount_inc_not_zero(&wf->count)) { spin_lock_irqsave(&wf->lock, flags); -- cgit v1.2.3 From 9102650fb97548f8a08120c7d0174b39092257d6 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Thu, 13 Jun 2019 16:21:51 -0700 Subject: drm/i915: make enable/disable rpm assert function use the rpm structure With this all the rpm assert-related functions consistently work on the i915_runtime_pm structure Signed-off-by: Daniele Ceraolo Spurio Cc: Imre Deak Reviewed-by: Chris Wilson Acked-by: Imre Deak Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190613232156.34940-4-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 48 +++++++++++++++++++------------------ drivers/gpu/drm/i915/i915_irq.c | 32 ++++++++++++------------- drivers/gpu/drm/i915/intel_drv.h | 12 +++++----- drivers/gpu/drm/i915/intel_uncore.c | 12 +++++----- 4 files changed, 53 insertions(+), 51 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 254f7b7df306..118d774506ae 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1889,7 +1889,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret < 0) goto out_pci_disable; - disable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); ret = i915_driver_init_mmio(dev_priv); if (ret < 0) @@ -1905,7 +1905,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) i915_driver_register(dev_priv); - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); i915_welcome_messages(dev_priv); @@ -1917,7 +1917,7 @@ out_cleanup_hw: out_cleanup_mmio: i915_driver_cleanup_mmio(dev_priv); out_runtime_pm_put: - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); i915_driver_cleanup_early(dev_priv); out_pci_disable: pci_disable_device(pdev); @@ -1932,7 +1932,7 @@ void i915_driver_unload(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; - disable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); i915_driver_unregister(dev_priv); @@ -1971,21 +1971,21 @@ void i915_driver_unload(struct drm_device *dev) i915_driver_cleanup_hw(dev_priv); - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); } static void i915_driver_release(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - disable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); i915_gem_fini(dev_priv); i915_ggtt_cleanup_hw(dev_priv); i915_driver_cleanup_mmio(dev_priv); - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); intel_runtime_pm_cleanup(dev_priv); i915_driver_cleanup_early(dev_priv); @@ -2080,7 +2080,7 @@ static int i915_drm_suspend(struct drm_device *dev) struct pci_dev *pdev = dev_priv->drm.pdev; pci_power_t opregion_target_state; - disable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); /* We do a lot of poking in a lot of registers, make sure they work * properly. */ @@ -2114,7 +2114,7 @@ static int i915_drm_suspend(struct drm_device *dev) intel_csr_ucode_suspend(dev_priv); - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return 0; } @@ -2137,7 +2137,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) struct pci_dev *pdev = dev_priv->drm.pdev; int ret; - disable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); i915_gem_suspend_late(dev_priv); @@ -2178,7 +2178,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) pci_set_power_state(pdev, PCI_D3hot); out: - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); if (!dev_priv->uncore.user_forcewake.count) intel_runtime_pm_cleanup(dev_priv); @@ -2214,7 +2214,7 @@ static int i915_drm_resume(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); int ret; - disable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); intel_sanitize_gt_powersave(dev_priv); i915_gem_sanitize(dev_priv); @@ -2274,7 +2274,7 @@ static int i915_drm_resume(struct drm_device *dev) intel_power_domains_enable(dev_priv); - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return 0; } @@ -2329,7 +2329,7 @@ static int i915_drm_resume_early(struct drm_device *dev) pci_set_master(pdev); - disable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) ret = vlv_resume_prepare(dev_priv, false); @@ -2354,7 +2354,7 @@ static int i915_drm_resume_early(struct drm_device *dev) intel_gt_sanitize(dev_priv, true); - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return ret; } @@ -2889,6 +2889,7 @@ static int intel_runtime_suspend(struct device *kdev) struct pci_dev *pdev = to_pci_dev(kdev); struct drm_device *dev = pci_get_drvdata(pdev); struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_runtime_pm *rpm = &dev_priv->runtime_pm; int ret; if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv)))) @@ -2899,7 +2900,7 @@ static int intel_runtime_suspend(struct device *kdev) DRM_DEBUG_KMS("Suspending device\n"); - disable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(rpm); /* * We are safe here against re-faults, since the fault handler takes @@ -2937,18 +2938,18 @@ static int intel_runtime_suspend(struct device *kdev) i915_gem_init_swizzling(dev_priv); i915_gem_restore_fences(dev_priv); - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(rpm); return ret; } - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(rpm); intel_runtime_pm_cleanup(dev_priv); if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore)) DRM_ERROR("Unclaimed access detected prior to suspending\n"); - dev_priv->runtime_pm.suspended = true; + rpm->suspended = true; /* * FIXME: We really should find a document that references the arguments @@ -2987,6 +2988,7 @@ static int intel_runtime_resume(struct device *kdev) struct pci_dev *pdev = to_pci_dev(kdev); struct drm_device *dev = pci_get_drvdata(pdev); struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_runtime_pm *rpm = &dev_priv->runtime_pm; int ret = 0; if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) @@ -2994,11 +2996,11 @@ static int intel_runtime_resume(struct device *kdev) DRM_DEBUG_KMS("Resuming device\n"); - WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count)); - disable_rpm_wakeref_asserts(dev_priv); + WARN_ON_ONCE(atomic_read(&rpm->wakeref_count)); + disable_rpm_wakeref_asserts(rpm); intel_opregion_notify_adapter(dev_priv, PCI_D0); - dev_priv->runtime_pm.suspended = false; + rpm->suspended = false; if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); @@ -3048,7 +3050,7 @@ static int intel_runtime_resume(struct device *kdev) intel_enable_ipc(dev_priv); - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(rpm); if (ret) DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 3f08302a14e5..6faa6d6c60b8 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2186,7 +2186,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) return IRQ_NONE; /* IRQs are synced during runtime_suspend, we don't require a wakeref */ - disable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); do { u32 iir, gt_iir, pm_iir; @@ -2257,7 +2257,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) valleyview_pipestat_irq_handler(dev_priv, pipe_stats); } while (0); - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return ret; } @@ -2272,7 +2272,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) return IRQ_NONE; /* IRQs are synced during runtime_suspend, we don't require a wakeref */ - disable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); do { u32 master_ctl, iir; @@ -2338,7 +2338,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) valleyview_pipestat_irq_handler(dev_priv, pipe_stats); } while (0); - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return ret; } @@ -2692,7 +2692,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) return IRQ_NONE; /* IRQs are synced during runtime_suspend, we don't require a wakeref */ - disable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); @@ -2744,7 +2744,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) I915_WRITE(SDEIER, sde_ier); /* IRQs are synced during runtime_suspend, we don't require a wakeref */ - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return ret; } @@ -3011,9 +3011,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ if (master_ctl & ~GEN8_GT_IRQS) { - disable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); gen8_de_irq_handler(dev_priv, master_ctl); - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); } gen8_master_intr_enable(regs); @@ -3212,13 +3212,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) if (master_ctl & GEN11_DISPLAY_IRQ) { const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); - disable_rpm_wakeref_asserts(i915); + disable_rpm_wakeref_asserts(&i915->runtime_pm); /* * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ * for the display related bits. */ gen8_de_irq_handler(i915, disp_ctl); - enable_rpm_wakeref_asserts(i915); + enable_rpm_wakeref_asserts(&i915->runtime_pm); } gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); @@ -4442,7 +4442,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) return IRQ_NONE; /* IRQs are synced during runtime_suspend, we don't require a wakeref */ - disable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); do { u32 pipe_stats[I915_MAX_PIPES] = {}; @@ -4473,7 +4473,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); } while (0); - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return ret; } @@ -4547,7 +4547,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) return IRQ_NONE; /* IRQs are synced during runtime_suspend, we don't require a wakeref */ - disable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); do { u32 pipe_stats[I915_MAX_PIPES] = {}; @@ -4586,7 +4586,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); } while (0); - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return ret; } @@ -4695,7 +4695,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) return IRQ_NONE; /* IRQs are synced during runtime_suspend, we don't require a wakeref */ - disable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); do { u32 pipe_stats[I915_MAX_PIPES] = {}; @@ -4736,7 +4736,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); } while (0); - enable_rpm_wakeref_asserts(dev_priv); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return ret; } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index ac0bd6067864..70ef9b7623f0 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1669,7 +1669,7 @@ assert_rpm_wakelock_held(struct i915_runtime_pm *rpm) /** * disable_rpm_wakeref_asserts - disable the RPM assert checks - * @i915: i915 device instance + * @rpm: the i915_runtime_pm structure * * This function disable asserts that check if we hold an RPM wakelock * reference, while keeping the device-not-suspended checks still enabled. @@ -1686,15 +1686,15 @@ assert_rpm_wakelock_held(struct i915_runtime_pm *rpm) * enable_rpm_wakeref_asserts(). */ static inline void -disable_rpm_wakeref_asserts(struct drm_i915_private *i915) +disable_rpm_wakeref_asserts(struct i915_runtime_pm *rpm) { atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1, - &i915->runtime_pm.wakeref_count); + &rpm->wakeref_count); } /** * enable_rpm_wakeref_asserts - re-enable the RPM assert checks - * @i915: i915 device instance + * @rpm: the i915_runtime_pm structure * * This function re-enables the RPM assert checks after disabling them with * disable_rpm_wakeref_asserts. It's meant to be used only in special @@ -1704,10 +1704,10 @@ disable_rpm_wakeref_asserts(struct drm_i915_private *i915) * disable_rpm_wakeref_asserts(). */ static inline void -enable_rpm_wakeref_asserts(struct drm_i915_private *i915) +enable_rpm_wakeref_asserts(struct i915_runtime_pm *rpm) { atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1, - &i915->runtime_pm.wakeref_count); + &rpm->wakeref_count); } #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index e1de51c4d84d..5eb1d130fc2e 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1461,8 +1461,8 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) static int i915_pmic_bus_access_notifier(struct notifier_block *nb, unsigned long action, void *data) { - struct drm_i915_private *dev_priv = container_of(nb, - struct drm_i915_private, uncore.pmic_bus_access_nb); + struct intel_uncore *uncore = container_of(nb, + struct intel_uncore, pmic_bus_access_nb); switch (action) { case MBI_PMIC_BUS_ACCESS_BEGIN: @@ -1479,12 +1479,12 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, * wake reference -> disable wakeref asserts for the time of * the access. */ - disable_rpm_wakeref_asserts(dev_priv); - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - enable_rpm_wakeref_asserts(dev_priv); + disable_rpm_wakeref_asserts(uncore->rpm); + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + enable_rpm_wakeref_asserts(uncore->rpm); break; case MBI_PMIC_BUS_ACCESS_END: - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); break; } -- cgit v1.2.3 From 1bf676cc2dbaeec7a52ea93e71660f746123c2fe Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Thu, 13 Jun 2019 16:21:52 -0700 Subject: drm/i915: move and rename i915_runtime_pm Asserts aside, all the code working on this structure is in intel_runtime_pm.c and uses the intel_ prefix, so move the structure to intel_runtime_pm.h and adopt the same prefix. Since all the asserts are now working on the runtime_pm structure, bring them across as well. v2: drop unneeded include (Chris), don't rename debugfs, rebase Signed-off-by: Daniele Ceraolo Spurio Cc: Chris Wilson Cc: Imre Deak Reviewed-by: Chris Wilson Acked-by: Imre Deak Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190613232156.34940-5-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 4 +- drivers/gpu/drm/i915/i915_drv.h | 52 +---------- drivers/gpu/drm/i915/intel_drv.h | 97 --------------------- drivers/gpu/drm/i915/intel_runtime_pm.c | 42 ++++----- drivers/gpu/drm/i915/intel_runtime_pm.h | 149 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_uncore.h | 4 +- 6 files changed, 175 insertions(+), 173 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 118d774506ae..6236d3323e71 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2889,7 +2889,7 @@ static int intel_runtime_suspend(struct device *kdev) struct pci_dev *pdev = to_pci_dev(kdev); struct drm_device *dev = pci_get_drvdata(pdev); struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_runtime_pm *rpm = &dev_priv->runtime_pm; + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; int ret; if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv)))) @@ -2988,7 +2988,7 @@ static int intel_runtime_resume(struct device *kdev) struct pci_dev *pdev = to_pci_dev(kdev); struct drm_device *dev = pci_get_drvdata(pdev); struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_runtime_pm *rpm = &dev_priv->runtime_pm; + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; int ret = 0; if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0a3cd4bb7c89..9dce1c71bb9d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1033,56 +1033,6 @@ struct skl_wm_params { u32 dbuf_block_size; }; -/* - * This struct helps tracking the state needed for runtime PM, which puts the - * device in PCI D3 state. Notice that when this happens, nothing on the - * graphics device works, even register access, so we don't get interrupts nor - * anything else. - * - * Every piece of our code that needs to actually touch the hardware needs to - * either call intel_runtime_pm_get or call intel_display_power_get with the - * appropriate power domain. - * - * Our driver uses the autosuspend delay feature, which means we'll only really - * suspend if we stay with zero refcount for a certain amount of time. The - * default value is currently very conservative (see intel_runtime_pm_enable), but - * it can be changed with the standard runtime PM files from sysfs. - * - * The irqs_disabled variable becomes true exactly after we disable the IRQs and - * goes back to false exactly before we reenable the IRQs. We use this variable - * to check if someone is trying to enable/disable IRQs while they're supposed - * to be disabled. This shouldn't happen and we'll print some error messages in - * case it happens. - * - * For more, read the Documentation/power/runtime_pm.txt. - */ -struct i915_runtime_pm { - atomic_t wakeref_count; - struct device *kdev; /* points to i915->drm.pdev->dev */ - bool available; - bool suspended; - bool irqs_enabled; - -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) - /* - * To aide detection of wakeref leaks and general misuse, we - * track all wakeref holders. With manual markup (i.e. returning - * a cookie to each rpm_get caller which they then supply to their - * paired rpm_put) we can remove corresponding pairs of and keep - * the array trimmed to active wakerefs. - */ - struct intel_runtime_pm_debug { - spinlock_t lock; - - depot_stack_handle_t last_acquire; - depot_stack_handle_t last_release; - - depot_stack_handle_t *owners; - unsigned long count; - } debug; -#endif -}; - enum intel_pipe_crc_source { INTEL_PIPE_CRC_SOURCE_NONE, INTEL_PIPE_CRC_SOURCE_PLANE1, @@ -1728,7 +1678,7 @@ struct drm_i915_private { struct drm_private_obj bw_obj; - struct i915_runtime_pm runtime_pm; + struct intel_runtime_pm runtime_pm; struct { bool initialized; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 70ef9b7623f0..1d58f7ec5d84 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1613,101 +1613,4 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane, unsigned int rotation); int bdw_get_pipemisc_bpp(struct intel_crtc *crtc); -/* intel_runtime_pm.c */ -#define BITS_PER_WAKEREF \ - BITS_PER_TYPE(struct_member(struct i915_runtime_pm, wakeref_count)) -#define INTEL_RPM_WAKELOCK_SHIFT (BITS_PER_WAKEREF / 2) -#define INTEL_RPM_WAKELOCK_BIAS (1 << INTEL_RPM_WAKELOCK_SHIFT) -#define INTEL_RPM_RAW_WAKEREF_MASK (INTEL_RPM_WAKELOCK_BIAS - 1) - -static inline int -intel_rpm_raw_wakeref_count(int wakeref_count) -{ - return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK; -} - -static inline int -intel_rpm_wakelock_count(int wakeref_count) -{ - return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT; -} - -static inline void -assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm) -{ - WARN_ONCE(rpm->suspended, - "Device suspended during HW access\n"); -} - -static inline void -__assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm, int wakeref_count) -{ - assert_rpm_device_not_suspended(rpm); - WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count), - "RPM raw-wakeref not held\n"); -} - -static inline void -__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm, int wakeref_count) -{ - __assert_rpm_raw_wakeref_held(rpm, wakeref_count); - WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count), - "RPM wakelock ref not held during HW access\n"); -} - -static inline void -assert_rpm_raw_wakeref_held(struct i915_runtime_pm *rpm) -{ - __assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count)); -} - -static inline void -assert_rpm_wakelock_held(struct i915_runtime_pm *rpm) -{ - __assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count)); -} - -/** - * disable_rpm_wakeref_asserts - disable the RPM assert checks - * @rpm: the i915_runtime_pm structure - * - * This function disable asserts that check if we hold an RPM wakelock - * reference, while keeping the device-not-suspended checks still enabled. - * It's meant to be used only in special circumstances where our rule about - * the wakelock refcount wrt. the device power state doesn't hold. According - * to this rule at any point where we access the HW or want to keep the HW in - * an active state we must hold an RPM wakelock reference acquired via one of - * the intel_runtime_pm_get() helpers. Currently there are a few special spots - * where this rule doesn't hold: the IRQ and suspend/resume handlers, the - * forcewake release timer, and the GPU RPS and hangcheck works. All other - * users should avoid using this function. - * - * Any calls to this function must have a symmetric call to - * enable_rpm_wakeref_asserts(). - */ -static inline void -disable_rpm_wakeref_asserts(struct i915_runtime_pm *rpm) -{ - atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1, - &rpm->wakeref_count); -} - -/** - * enable_rpm_wakeref_asserts - re-enable the RPM assert checks - * @rpm: the i915_runtime_pm structure - * - * This function re-enables the RPM assert checks after disabling them with - * disable_rpm_wakeref_asserts. It's meant to be used only in special - * circumstances otherwise its use should be avoided. - * - * Any calls to this function must have a symmetric call to - * disable_rpm_wakeref_asserts(). - */ -static inline void -enable_rpm_wakeref_asserts(struct i915_runtime_pm *rpm) -{ - atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1, - &rpm->wakeref_count); -} - #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index ae60ae1c970e..07aaa3e06587 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -75,13 +75,13 @@ static void __print_depot_stack(depot_stack_handle_t stack, stack_trace_snprint(buf, sz, entries, nr_entries, indent); } -static void init_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm) +static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { spin_lock_init(&rpm->debug.lock); } static noinline depot_stack_handle_t -track_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm) +track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { depot_stack_handle_t stack, *stacks; unsigned long flags; @@ -113,7 +113,7 @@ track_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm) return stack; } -static void untrack_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm, +static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, depot_stack_handle_t stack) { unsigned long flags, n; @@ -233,7 +233,7 @@ dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug) } static noinline void -__intel_wakeref_dec_and_check_tracking(struct i915_runtime_pm *rpm) +__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm) { struct intel_runtime_pm_debug dbg = {}; unsigned long flags; @@ -250,7 +250,7 @@ __intel_wakeref_dec_and_check_tracking(struct i915_runtime_pm *rpm) } static noinline void -untrack_all_intel_runtime_pm_wakerefs(struct i915_runtime_pm *rpm) +untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm) { struct intel_runtime_pm_debug dbg = {}; unsigned long flags; @@ -268,7 +268,7 @@ void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, struct intel_runtime_pm_debug dbg = {}; do { - struct i915_runtime_pm *rpm = &i915->runtime_pm; + struct intel_runtime_pm *rpm = &i915->runtime_pm; unsigned long alloc = dbg.count; depot_stack_handle_t *s; @@ -302,36 +302,36 @@ out: #else -static void init_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm) +static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { } static depot_stack_handle_t -track_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm) +track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { return -1; } -static void untrack_intel_runtime_pm_wakeref(struct i915_runtime_pm *rpm, +static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, intel_wakeref_t wref) { } static void -__intel_wakeref_dec_and_check_tracking(struct i915_runtime_pm *rpm) +__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm) { atomic_dec(&rpm->wakeref_count); } static void -untrack_all_intel_runtime_pm_wakerefs(struct i915_runtime_pm *rpm) +untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm) { } #endif static void -intel_runtime_pm_acquire(struct i915_runtime_pm *rpm, bool wakelock) +intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock) { if (wakelock) { atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count); @@ -343,7 +343,7 @@ intel_runtime_pm_acquire(struct i915_runtime_pm *rpm, bool wakelock) } static void -intel_runtime_pm_release(struct i915_runtime_pm *rpm, int wakelock) +intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock) { if (wakelock) { assert_rpm_wakelock_held(rpm); @@ -355,7 +355,7 @@ intel_runtime_pm_release(struct i915_runtime_pm *rpm, int wakelock) __intel_wakeref_dec_and_check_tracking(rpm); } -static intel_wakeref_t __intel_runtime_pm_get(struct i915_runtime_pm *rpm, +static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm, bool wakelock) { int ret; @@ -424,7 +424,7 @@ intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915) */ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) { - struct i915_runtime_pm *rpm = &i915->runtime_pm; + struct intel_runtime_pm *rpm = &i915->runtime_pm; if (IS_ENABLED(CONFIG_PM)) { /* @@ -463,7 +463,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) */ intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915) { - struct i915_runtime_pm *rpm = &i915->runtime_pm; + struct intel_runtime_pm *rpm = &i915->runtime_pm; assert_rpm_wakelock_held(rpm); pm_runtime_get_noresume(rpm->kdev); @@ -473,7 +473,7 @@ intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915) return track_intel_runtime_pm_wakeref(rpm); } -static void __intel_runtime_pm_put(struct i915_runtime_pm *rpm, +static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref, bool wakelock) { @@ -547,7 +547,7 @@ void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) */ void intel_runtime_pm_enable(struct drm_i915_private *i915) { - struct i915_runtime_pm *rpm = &i915->runtime_pm; + struct intel_runtime_pm *rpm = &i915->runtime_pm; struct device *kdev = rpm->kdev; /* @@ -589,7 +589,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *i915) void intel_runtime_pm_disable(struct drm_i915_private *i915) { - struct i915_runtime_pm *rpm = &i915->runtime_pm; + struct intel_runtime_pm *rpm = &i915->runtime_pm; struct device *kdev = rpm->kdev; /* Transfer rpm ownership back to core */ @@ -604,7 +604,7 @@ void intel_runtime_pm_disable(struct drm_i915_private *i915) void intel_runtime_pm_cleanup(struct drm_i915_private *i915) { - struct i915_runtime_pm *rpm = &i915->runtime_pm; + struct intel_runtime_pm *rpm = &i915->runtime_pm; int count = atomic_read(&rpm->wakeref_count); WARN(count, @@ -617,7 +617,7 @@ void intel_runtime_pm_cleanup(struct drm_i915_private *i915) void intel_runtime_pm_init_early(struct drm_i915_private *i915) { - struct i915_runtime_pm *rpm = &i915->runtime_pm; + struct intel_runtime_pm *rpm = &i915->runtime_pm; struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index a7acceb13473..40c6530af5bb 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -11,6 +11,9 @@ #include "intel_display.h" #include "intel_wakeref.h" +#include "i915_utils.h" + +struct device; struct drm_i915_private; struct drm_printer; @@ -20,6 +23,152 @@ enum i915_drm_suspend_mode { I915_DRM_SUSPEND_HIBERNATE, }; +/* + * This struct helps tracking the state needed for runtime PM, which puts the + * device in PCI D3 state. Notice that when this happens, nothing on the + * graphics device works, even register access, so we don't get interrupts nor + * anything else. + * + * Every piece of our code that needs to actually touch the hardware needs to + * either call intel_runtime_pm_get or call intel_display_power_get with the + * appropriate power domain. + * + * Our driver uses the autosuspend delay feature, which means we'll only really + * suspend if we stay with zero refcount for a certain amount of time. The + * default value is currently very conservative (see intel_runtime_pm_enable), but + * it can be changed with the standard runtime PM files from sysfs. + * + * The irqs_disabled variable becomes true exactly after we disable the IRQs and + * goes back to false exactly before we reenable the IRQs. We use this variable + * to check if someone is trying to enable/disable IRQs while they're supposed + * to be disabled. This shouldn't happen and we'll print some error messages in + * case it happens. + * + * For more, read the Documentation/power/runtime_pm.txt. + */ +struct intel_runtime_pm { + atomic_t wakeref_count; + struct device *kdev; /* points to i915->drm.pdev->dev */ + bool available; + bool suspended; + bool irqs_enabled; + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + /* + * To aide detection of wakeref leaks and general misuse, we + * track all wakeref holders. With manual markup (i.e. returning + * a cookie to each rpm_get caller which they then supply to their + * paired rpm_put) we can remove corresponding pairs of and keep + * the array trimmed to active wakerefs. + */ + struct intel_runtime_pm_debug { + spinlock_t lock; + + depot_stack_handle_t last_acquire; + depot_stack_handle_t last_release; + + depot_stack_handle_t *owners; + unsigned long count; + } debug; +#endif +}; + +#define BITS_PER_WAKEREF \ + BITS_PER_TYPE(struct_member(struct intel_runtime_pm, wakeref_count)) +#define INTEL_RPM_WAKELOCK_SHIFT (BITS_PER_WAKEREF / 2) +#define INTEL_RPM_WAKELOCK_BIAS (1 << INTEL_RPM_WAKELOCK_SHIFT) +#define INTEL_RPM_RAW_WAKEREF_MASK (INTEL_RPM_WAKELOCK_BIAS - 1) + +static inline int +intel_rpm_raw_wakeref_count(int wakeref_count) +{ + return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK; +} + +static inline int +intel_rpm_wakelock_count(int wakeref_count) +{ + return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT; +} + +static inline void +assert_rpm_device_not_suspended(struct intel_runtime_pm *rpm) +{ + WARN_ONCE(rpm->suspended, + "Device suspended during HW access\n"); +} + +static inline void +__assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm, int wakeref_count) +{ + assert_rpm_device_not_suspended(rpm); + WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count), + "RPM raw-wakeref not held\n"); +} + +static inline void +__assert_rpm_wakelock_held(struct intel_runtime_pm *rpm, int wakeref_count) +{ + __assert_rpm_raw_wakeref_held(rpm, wakeref_count); + WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count), + "RPM wakelock ref not held during HW access\n"); +} + +static inline void +assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm) +{ + __assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count)); +} + +static inline void +assert_rpm_wakelock_held(struct intel_runtime_pm *rpm) +{ + __assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count)); +} + +/** + * disable_rpm_wakeref_asserts - disable the RPM assert checks + * @rpm: the intel_runtime_pm structure + * + * This function disable asserts that check if we hold an RPM wakelock + * reference, while keeping the device-not-suspended checks still enabled. + * It's meant to be used only in special circumstances where our rule about + * the wakelock refcount wrt. the device power state doesn't hold. According + * to this rule at any point where we access the HW or want to keep the HW in + * an active state we must hold an RPM wakelock reference acquired via one of + * the intel_runtime_pm_get() helpers. Currently there are a few special spots + * where this rule doesn't hold: the IRQ and suspend/resume handlers, the + * forcewake release timer, and the GPU RPS and hangcheck works. All other + * users should avoid using this function. + * + * Any calls to this function must have a symmetric call to + * enable_rpm_wakeref_asserts(). + */ +static inline void +disable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm) +{ + atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1, + &rpm->wakeref_count); +} + +/** + * enable_rpm_wakeref_asserts - re-enable the RPM assert checks + * @rpm: the intel_runtime_pm structure + * + * This function re-enables the RPM assert checks after disabling them with + * disable_rpm_wakeref_asserts. It's meant to be used only in special + * circumstances otherwise its use should be avoided. + * + * Any calls to this function must have a symmetric call to + * disable_rpm_wakeref_asserts(). + */ +static inline void +enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm) +{ + atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1, + &rpm->wakeref_count); +} + void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); void intel_runtime_pm_disable(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index d6af3de70121..804a0faacc91 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -33,7 +33,7 @@ #include "i915_reg.h" struct drm_i915_private; -struct i915_runtime_pm; +struct intel_runtime_pm; struct intel_uncore; enum forcewake_domain_id { @@ -97,7 +97,7 @@ struct intel_forcewake_range { struct intel_uncore { void __iomem *regs; - struct i915_runtime_pm *rpm; + struct intel_runtime_pm *rpm; spinlock_t lock; /** lock is also taken in irq contexts. */ -- cgit v1.2.3 From 69c663554452e6589144046331d08e597e7f5823 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Thu, 13 Jun 2019 16:21:53 -0700 Subject: drm/i915: move a few more functions to accept the rpm structure Focusing on the functions called in few places. Signed-off-by: Daniele Ceraolo Spurio Cc: Imre Deak Reviewed-by: Chris Wilson Acked-by: Imre Deak Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190613232156.34940-6-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 22 ++++++++++++---------- drivers/gpu/drm/i915/intel_runtime_pm.c | 19 ++++++++----------- drivers/gpu/drm/i915/intel_runtime_pm.h | 12 ++++++------ drivers/gpu/drm/i915/selftests/mock_gem_device.c | 2 +- 5 files changed, 28 insertions(+), 29 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 323863504111..1fc5af4c1905 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2459,7 +2459,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) { struct drm_printer p = drm_seq_file_printer(m); - print_intel_runtime_pm_wakeref(dev_priv, &p); + print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p); } return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6236d3323e71..65d599065709 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -909,7 +909,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv) mutex_init(&dev_priv->hdcp_comp_mutex); i915_memcpy_init_early(dev_priv); - intel_runtime_pm_init_early(dev_priv); + intel_runtime_pm_init_early(&dev_priv->runtime_pm); ret = i915_workqueues_init(dev_priv); if (ret < 0) @@ -1751,7 +1751,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) drm_kms_helper_poll_init(dev); intel_power_domains_enable(dev_priv); - intel_runtime_pm_enable(dev_priv); + intel_runtime_pm_enable(&dev_priv->runtime_pm); } /** @@ -1760,7 +1760,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) */ static void i915_driver_unregister(struct drm_i915_private *dev_priv) { - intel_runtime_pm_disable(dev_priv); + intel_runtime_pm_disable(&dev_priv->runtime_pm); intel_power_domains_disable(dev_priv); intel_fbdev_unregister(dev_priv); @@ -1977,16 +1977,17 @@ void i915_driver_unload(struct drm_device *dev) static void i915_driver_release(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; - disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); + disable_rpm_wakeref_asserts(rpm); i915_gem_fini(dev_priv); i915_ggtt_cleanup_hw(dev_priv); i915_driver_cleanup_mmio(dev_priv); - enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - intel_runtime_pm_cleanup(dev_priv); + enable_rpm_wakeref_asserts(rpm); + intel_runtime_pm_cleanup(rpm); i915_driver_cleanup_early(dev_priv); i915_driver_destroy(dev_priv); @@ -2135,9 +2136,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) { struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; int ret; - disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); + disable_rpm_wakeref_asserts(rpm); i915_gem_suspend_late(dev_priv); @@ -2178,9 +2180,9 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) pci_set_power_state(pdev, PCI_D3hot); out: - enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); + enable_rpm_wakeref_asserts(rpm); if (!dev_priv->uncore.user_forcewake.count) - intel_runtime_pm_cleanup(dev_priv); + intel_runtime_pm_cleanup(rpm); return ret; } @@ -2944,7 +2946,7 @@ static int intel_runtime_suspend(struct device *kdev) } enable_rpm_wakeref_asserts(rpm); - intel_runtime_pm_cleanup(dev_priv); + intel_runtime_pm_cleanup(rpm); if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore)) DRM_ERROR("Unclaimed access detected prior to suspending\n"); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 07aaa3e06587..3d9ea3498679 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -262,13 +262,12 @@ untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm) dump_and_free_wakeref_tracking(&dbg); } -void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, +void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, struct drm_printer *p) { struct intel_runtime_pm_debug dbg = {}; do { - struct intel_runtime_pm *rpm = &i915->runtime_pm; unsigned long alloc = dbg.count; depot_stack_handle_t *s; @@ -537,7 +536,7 @@ void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) /** * intel_runtime_pm_enable - enable runtime pm - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This function enables runtime pm at the end of the driver load sequence. * @@ -545,9 +544,8 @@ void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) * subordinate display power domains. That is done by * intel_power_domains_enable(). */ -void intel_runtime_pm_enable(struct drm_i915_private *i915) +void intel_runtime_pm_enable(struct intel_runtime_pm *rpm) { - struct intel_runtime_pm *rpm = &i915->runtime_pm; struct device *kdev = rpm->kdev; /* @@ -587,9 +585,8 @@ void intel_runtime_pm_enable(struct drm_i915_private *i915) pm_runtime_put_autosuspend(kdev); } -void intel_runtime_pm_disable(struct drm_i915_private *i915) +void intel_runtime_pm_disable(struct intel_runtime_pm *rpm) { - struct intel_runtime_pm *rpm = &i915->runtime_pm; struct device *kdev = rpm->kdev; /* Transfer rpm ownership back to core */ @@ -602,9 +599,8 @@ void intel_runtime_pm_disable(struct drm_i915_private *i915) pm_runtime_put(kdev); } -void intel_runtime_pm_cleanup(struct drm_i915_private *i915) +void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm) { - struct intel_runtime_pm *rpm = &i915->runtime_pm; int count = atomic_read(&rpm->wakeref_count); WARN(count, @@ -615,9 +611,10 @@ void intel_runtime_pm_cleanup(struct drm_i915_private *i915) untrack_all_intel_runtime_pm_wakerefs(rpm); } -void intel_runtime_pm_init_early(struct drm_i915_private *i915) +void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm) { - struct intel_runtime_pm *rpm = &i915->runtime_pm; + struct drm_i915_private *i915 = + container_of(rpm, struct drm_i915_private, runtime_pm); struct pci_dev *pdev = i915->drm.pdev; struct device *kdev = &pdev->dev; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index 40c6530af5bb..0890e698f196 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -169,10 +169,10 @@ enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm) &rpm->wakeref_count); } -void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv); -void intel_runtime_pm_enable(struct drm_i915_private *dev_priv); -void intel_runtime_pm_disable(struct drm_i915_private *dev_priv); -void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv); +void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm); +void intel_runtime_pm_enable(struct intel_runtime_pm *rpm); +void intel_runtime_pm_disable(struct intel_runtime_pm *rpm); +void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915); intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915); @@ -200,10 +200,10 @@ intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) void intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) -void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, +void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, struct drm_printer *p); #else -static inline void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, +static inline void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, struct drm_printer *p) { } diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 68bae6130729..82f2be6cec5b 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -166,7 +166,7 @@ struct drm_i915_private *mock_gem_device(void) i915->drm.pdev = pdev; i915->drm.dev_private = i915; - intel_runtime_pm_init_early(i915); + intel_runtime_pm_init_early(&i915->runtime_pm); /* Using the global GTT may ask questions about KMS users, so prepare */ drm_mode_config_init(&i915->drm); -- cgit v1.2.3 From d858d5695f3897d55df68452066a90d7560cb845 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Thu, 13 Jun 2019 16:21:54 -0700 Subject: drm/i915: update rpm_get/put to use the rpm structure The functions where internally already only using the structure, so we need to just flip the interface. v2: rebase Signed-off-by: Daniele Ceraolo Spurio Cc: Imre Deak Reviewed-by: Chris Wilson Acked-by: Imre Deak Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190613232156.34940-7-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 11 ++--- drivers/gpu/drm/i915/gem/i915_gem_object.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 4 +- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 4 +- .../drm/i915/gem/selftests/i915_gem_coherency.c | 4 +- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 12 +++--- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 4 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 8 ++-- drivers/gpu/drm/i915/gt/intel_hangcheck.c | 4 +- drivers/gpu/drm/i915/gt/intel_reset.c | 4 +- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 12 +++--- drivers/gpu/drm/i915/gt/selftest_lrc.c | 36 ++++++++-------- drivers/gpu/drm/i915/gt/selftest_reset.c | 4 +- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 12 +++--- drivers/gpu/drm/i915/gvt/aperture_gm.c | 15 +++---- drivers/gpu/drm/i915/gvt/gvt.h | 4 +- drivers/gpu/drm/i915/gvt/sched_policy.c | 4 +- drivers/gpu/drm/i915/gvt/scheduler.c | 4 +- drivers/gpu/drm/i915/i915_debugfs.c | 49 +++++++++++----------- drivers/gpu/drm/i915/i915_gem.c | 19 +++++---- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 4 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 6 +-- drivers/gpu/drm/i915/i915_perf.c | 6 +-- drivers/gpu/drm/i915/i915_pmu.c | 12 +++--- drivers/gpu/drm/i915/i915_sysfs.c | 12 +++--- drivers/gpu/drm/i915/intel_display.c | 12 +++--- drivers/gpu/drm/i915/intel_display_power.c | 37 ++++++++-------- drivers/gpu/drm/i915/intel_fbdev.c | 6 +-- drivers/gpu/drm/i915/intel_hotplug.c | 4 +- drivers/gpu/drm/i915/intel_runtime_pm.c | 43 +++++++++---------- drivers/gpu/drm/i915/intel_runtime_pm.h | 26 ++++++------ drivers/gpu/drm/i915/intel_wakeref.c | 8 ++-- drivers/gpu/drm/i915/selftests/i915_active.c | 8 ++-- drivers/gpu/drm/i915/selftests/i915_gem.c | 4 +- drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 4 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 8 ++-- drivers/gpu/drm/i915/selftests/i915_request.c | 20 ++++----- drivers/gpu/drm/i915/selftests/i915_timeline.c | 16 +++---- drivers/gpu/drm/i915/selftests/i915_vma.c | 4 +- drivers/gpu/drm/i915/selftests/intel_guc.c | 8 ++-- drivers/gpu/drm/i915/selftests/intel_uncore.c | 4 +- 41 files changed, 236 insertions(+), 234 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 7b5841b73588..391621ee3cbb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -222,6 +222,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); struct drm_device *dev = obj->base.dev; struct drm_i915_private *i915 = to_i915(dev); + struct intel_runtime_pm *rpm = &i915->runtime_pm; struct i915_ggtt *ggtt = &i915->ggtt; bool write = area->vm_flags & VM_WRITE; intel_wakeref_t wakeref; @@ -243,7 +244,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) if (ret) goto err; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(rpm); srcu = i915_reset_trylock(i915); if (srcu < 0) { @@ -308,7 +309,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) goto err_fence; /* Mark as being mmapped into userspace for later revocation */ - assert_rpm_wakelock_held(&i915->runtime_pm); + assert_rpm_wakelock_held(rpm); if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) list_add(&obj->userfault_link, &i915->ggtt.userfault_list); if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) @@ -327,7 +328,7 @@ err_unlock: err_reset: i915_reset_unlock(i915, srcu); err_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(rpm, wakeref); i915_gem_object_unpin_pages(obj); err: switch (ret) { @@ -410,7 +411,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) * wakeref. */ lockdep_assert_held(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (!obj->userfault_count) goto out; @@ -427,7 +428,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) wmb(); out: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } static int create_mmap_offset(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 36b76c6a0a9d..a4047a585c8b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -181,7 +181,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, *on; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); llist_for_each_entry_safe(obj, on, freed, freed) { struct i915_vma *vma, *vn; @@ -243,7 +243,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, cond_resched(); } - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } void i915_gem_flush_free_objects(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index e15f37bef36a..13ff05566a0a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -182,7 +182,7 @@ i915_gem_shrink(struct drm_i915_private *i915, * we will force the wake during oom-notifier. */ if (shrink & I915_SHRINK_BOUND) { - wakeref = intel_runtime_pm_get_if_in_use(i915); + wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm); if (!wakeref) shrink &= ~I915_SHRINK_BOUND; } @@ -267,7 +267,7 @@ i915_gem_shrink(struct drm_i915_private *i915, } if (shrink & I915_SHRINK_BOUND) - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); i915_retire_requests(i915); diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 73e667b31cc4..b74729b6f353 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1754,7 +1754,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) return PTR_ERR(file); mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); ctx = live_context(dev_priv, file); if (IS_ERR(ctx)) { @@ -1768,7 +1768,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) err = i915_subtests(tests, ctx); out_unlock: - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); mock_file_free(dev_priv, file); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index c72e17da090c..8f22d3f18422 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -293,7 +293,7 @@ static int igt_gem_coherency(void *arg) values = offsets + ncachelines; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for (over = igt_coherency_mode; over->name; over++) { if (!over->set) continue; @@ -371,7 +371,7 @@ static int igt_gem_coherency(void *arg) } } unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); kfree(offsets); return err; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 9e2878a59023..b85a4a43536a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -53,7 +53,7 @@ static int live_nop_switch(void *arg) return PTR_ERR(file); mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL); if (!ctx) { @@ -156,7 +156,7 @@ static int live_nop_switch(void *arg) } out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); mock_file_free(i915, file); return err; @@ -1084,7 +1084,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, goto out_unlock; } - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); ce = i915_gem_context_get_engine(ctx, RCS0); if (IS_ERR(ce)) { @@ -1124,7 +1124,7 @@ out_fail: out_context: intel_context_put(ce); out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); i915_gem_object_put(obj); out_unlock: @@ -1541,7 +1541,7 @@ static int igt_vm_isolation(void *arg) GEM_BUG_ON(ctx_b->vm->total != vm_total); vm_total -= I915_GTT_PAGE_SIZE; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; for_each_engine(engine, i915, id) { @@ -1586,7 +1586,7 @@ static int igt_vm_isolation(void *arg) count, RUNTIME_INFO(i915)->num_engines); out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); out_unlock: if (igt_live_test_end(&t)) err = -EIO; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index b92809418729..5c81f4b4813a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -205,7 +205,7 @@ static int igt_partial_tiling(void *arg) } mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (1) { IGT_TIMEOUT(end); @@ -316,7 +316,7 @@ next_tiling: ; } out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); i915_gem_object_unpin_pages(obj); out: diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index c0d986db5a75..39220e16ea26 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1103,7 +1103,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine) return true; /* If the whole device is asleep, the engine must be idle */ - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); if (!wakeref) return true; @@ -1117,7 +1117,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine) !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE)) idle = false; - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return idle; } @@ -1531,10 +1531,10 @@ void intel_engine_dump(struct intel_engine_cs *engine, rcu_read_unlock(); - wakeref = intel_runtime_pm_get_if_in_use(engine->i915); + wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm); if (wakeref) { intel_engine_print_registers(engine, m); - intel_runtime_pm_put(engine->i915, wakeref); + intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref); } else { drm_printf(m, "\tDevice is asleep; skipping register dump\n"); } diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c index 174bb0a60309..6bcfa6456c45 100644 --- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c @@ -273,7 +273,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (i915_terminally_wedged(dev_priv)) return; - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); if (!wakeref) return; @@ -324,7 +324,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (hung) hangcheck_declare_hang(dev_priv, hung, stuck); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); /* Reset timer in case GPU hangs without another request being added */ i915_queue_hangcheck(dev_priv); diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 41a294f5cc19..9e7d400c083a 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1311,7 +1311,7 @@ void i915_handle_error(struct drm_i915_private *i915, * isn't the case at least when we get here by doing a * simulated reset via debugfs, so get an RPM reference. */ - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); engine_mask &= INTEL_INFO(i915)->engine_mask; @@ -1374,7 +1374,7 @@ void i915_handle_error(struct drm_i915_private *i915, wake_up_all(&error->reset_queue); out: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } int i915_reset_trylock(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 45379a63e013..b0b2998e56b8 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -394,7 +394,7 @@ static int igt_reset_nop(void *arg) } i915_gem_context_clear_bannable(ctx); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); reset_count = i915_reset_count(&i915->gpu_error); count = 0; do { @@ -441,7 +441,7 @@ static int igt_reset_nop(void *arg) err = igt_flush_test(i915, I915_WAIT_LOCKED); mutex_unlock(&i915->drm.struct_mutex); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); out: mock_file_free(i915, file); @@ -478,7 +478,7 @@ static int igt_reset_nop_engine(void *arg) } i915_gem_context_clear_bannable(ctx); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for_each_engine(engine, i915, id) { unsigned int reset_count, reset_engine_count; unsigned int count; @@ -549,7 +549,7 @@ static int igt_reset_nop_engine(void *arg) err = igt_flush_test(i915, I915_WAIT_LOCKED); mutex_unlock(&i915->drm.struct_mutex); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); out: mock_file_free(i915, file); if (i915_reset_failed(i915)) @@ -1749,7 +1749,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) if (i915_terminally_wedged(i915)) return -EIO; /* we're long past hope of a successful reset */ - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */ @@ -1760,7 +1760,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) mutex_unlock(&i915->drm.struct_mutex); i915_modparams.enable_hangcheck = saved_hangcheck; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index f0ca2a09dabd..d84d31e3da19 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -33,7 +33,7 @@ static int live_sanitycheck(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (igt_spinner_init(&spin, i915)) goto err_unlock; @@ -74,7 +74,7 @@ err_spin: igt_spinner_fini(&spin); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -97,7 +97,7 @@ static int live_busywait_preempt(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); ctx_hi = kernel_context(i915); if (!ctx_hi) @@ -255,7 +255,7 @@ err_ctx_hi: err_unlock: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -277,7 +277,7 @@ static int live_preempt(void *arg) pr_err("Logical preemption supported, but not exposed\n"); mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -362,7 +362,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -382,7 +382,7 @@ static int live_late_preempt(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -466,7 +466,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -532,7 +532,7 @@ static int live_suppress_self_preempt(void *arg) return 0; /* presume black blox */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (preempt_client_init(i915, &a)) goto err_unlock; @@ -606,7 +606,7 @@ err_client_a: err_unlock: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -683,7 +683,7 @@ static int live_suppress_wait_preempt(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (preempt_client_init(i915, &client[0])) /* ELSP[0] */ goto err_unlock; @@ -776,7 +776,7 @@ err_client_0: err_unlock: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -807,7 +807,7 @@ static int live_chain_preempt(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (preempt_client_init(i915, &hi)) goto err_unlock; @@ -924,7 +924,7 @@ err_client_hi: err_unlock: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -953,7 +953,7 @@ static int live_preempt_hang(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -1050,7 +1050,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -1256,7 +1256,7 @@ static int live_preempt_smoke(void *arg) return -ENOMEM; mutex_lock(&smoke.i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(smoke.i915); + wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm); smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE); if (IS_ERR(smoke.batch)) { @@ -1309,7 +1309,7 @@ err_ctx: err_batch: i915_gem_object_put(smoke.batch); err_unlock: - intel_runtime_pm_put(smoke.i915, wakeref); + intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref); mutex_unlock(&smoke.i915->drm.struct_mutex); kfree(smoke.contexts); diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 607473439eb0..91f0349882fc 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -42,14 +42,14 @@ static int igt_wedged_reset(void *arg) /* Check that we can recover a wedged device with a GPU reset */ igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); i915_gem_set_wedged(i915); GEM_BUG_ON(!i915_reset_failed(i915)); i915_reset(i915, ALL_ENGINES, NULL); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); igt_global_reset_unlock(i915); return i915_reset_failed(i915) ? -EIO : 0; diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 93e9579b8a4f..8891e1a8ac16 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -636,7 +636,7 @@ static int live_dirty_whitelist(void *arg) if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */ return 0; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); mutex_unlock(&i915->drm.struct_mutex); file = mock_file(i915); @@ -666,7 +666,7 @@ out_file: mock_file_free(i915, file); mutex_lock(&i915->drm.struct_mutex); out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); return err; } @@ -1055,7 +1055,7 @@ live_gpu_reset_workarounds(void *arg) pr_info("Verifying after GPU reset...\n"); igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); reference_lists_init(i915, &lists); @@ -1070,7 +1070,7 @@ live_gpu_reset_workarounds(void *arg) out: kernel_context_close(ctx); reference_lists_fini(i915, &lists); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); igt_global_reset_unlock(i915); return ok ? 0 : -ESRCH; @@ -1097,7 +1097,7 @@ live_engine_reset_workarounds(void *arg) return PTR_ERR(ctx); igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); reference_lists_init(i915, &lists); @@ -1154,7 +1154,7 @@ live_engine_reset_workarounds(void *arg) err: reference_lists_fini(i915, &lists); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); igt_global_reset_unlock(i915); kernel_context_close(ctx); diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 716622266fa6..c3d19d88da40 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -170,7 +170,7 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) if (WARN_ON(!vgpu_fence_sz(vgpu))) return; - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&dev_priv->drm.struct_mutex); _clear_vgpu_fence(vgpu); @@ -181,17 +181,18 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) } mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } static int alloc_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->dev_priv; + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; struct i915_fence_reg *reg; int i; - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(rpm); /* Request fences from host */ mutex_lock(&dev_priv->drm.struct_mutex); @@ -207,7 +208,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) _clear_vgpu_fence(vgpu); mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(rpm); return 0; out_free_fence: gvt_vgpu_err("Failed to alloc fences\n"); @@ -220,7 +221,7 @@ out_free_fence: vgpu->fence.regs[i] = NULL; } mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(rpm); return -ENOSPC; } @@ -316,9 +317,9 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); _clear_vgpu_fence(vgpu); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } /** diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index dfd10cf82b65..7a1fe44d45af 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -584,12 +584,12 @@ enum { static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv) { - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); } static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv) { - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } /** diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 1c763a27a412..2369d4a9af94 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -465,7 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) scheduler->current_vgpu = NULL; } - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); spin_lock_bh(&scheduler->mmio_context_lock); for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { if (scheduler->engine_owner[ring_id] == vgpu) { @@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) } } spin_unlock_bh(&scheduler->mmio_context_lock); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); mutex_unlock(&vgpu->gvt->sched_lock); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 080cb740e028..8f0bd1e195d1 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1501,11 +1501,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, * as there is only one pre-allocated buf-obj for shadow. */ if (list_empty(workload_q_head(vgpu, ring_id))) { - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&dev_priv->drm.struct_mutex); ret = intel_gvt_scan_and_shadow_workload(workload); mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } if (ret) { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1fc5af4c1905..5ca41165fb78 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -490,7 +490,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) intel_wakeref_t wakeref; int i, pipe; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); if (IS_CHERRYVIEW(dev_priv)) { intel_wakeref_t pref; @@ -696,7 +696,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } } - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -833,7 +833,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) intel_wakeref_t wakeref; int ret = 0; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); if (IS_GEN(dev_priv, 5)) { u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); @@ -1045,7 +1045,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret; } @@ -1391,7 +1391,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) if (!HAS_FBC(dev_priv)) return -ENODEV; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&fbc->lock); if (intel_fbc_is_active(dev_priv)) @@ -1418,7 +1418,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) } mutex_unlock(&fbc->lock); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -1468,7 +1468,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) if (!HAS_IPS(dev_priv)) return -ENODEV; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "Enabled by kernel parameter: %s\n", yesno(i915_modparams.enable_ips)); @@ -1482,7 +1482,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) seq_puts(m, "Currently: disabled\n"); } - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -1561,7 +1561,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { ia_freq = gpu_freq; sandybridge_pcode_read(dev_priv, @@ -1575,7 +1575,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ((ia_freq >> 0) & 0xff) * 100, ((ia_freq >> 8) & 0xff) * 100); } - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -1752,7 +1752,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) struct intel_uncore *uncore = &dev_priv->uncore; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "bit6 swizzle for X-tiling = %s\n", swizzle_string(dev_priv->mm.bit_6_swizzle_x)); @@ -1790,7 +1790,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) seq_puts(m, "L-shaped memory detected\n"); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -2303,7 +2303,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) if (!psr->sink_support) return 0; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&psr->lock); if (psr->enabled) @@ -2367,7 +2367,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) unlock: mutex_unlock(&psr->lock); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -2384,11 +2384,11 @@ i915_edp_psr_debug_set(void *data, u64 val) DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); ret = intel_psr_debug_set(dev_priv, val); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret; } @@ -2504,7 +2504,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused) csr = &dev_priv->csr; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); seq_printf(m, "path: %s\n", csr->fw_path); @@ -2530,7 +2530,7 @@ out: seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -2814,7 +2814,7 @@ static int i915_display_info(struct seq_file *m, void *unused) struct drm_connector_list_iter conn_iter; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "CRTC info\n"); seq_printf(m, "---------\n"); @@ -2863,7 +2863,7 @@ static int i915_display_info(struct seq_file *m, void *unused) drm_connector_list_iter_end(&conn_iter); mutex_unlock(&dev->mode_config.mutex); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -2876,7 +2876,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) enum intel_engine_id id; struct drm_printer p; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "GT awake? %s [%d]\n", yesno(dev_priv->gt.awake), @@ -2888,7 +2888,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) for_each_engine(engine, dev_priv, id) intel_engine_dump(engine, &p, "%s\n", engine->name); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -4051,7 +4051,8 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) if (INTEL_GEN(i915) < 6) return 0; - file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915); + file->private_data = + (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm); intel_uncore_forcewake_user_get(&i915->uncore); return 0; @@ -4065,7 +4066,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file) return 0; intel_uncore_forcewake_user_put(&i915->uncore); - intel_runtime_pm_put(i915, + intel_runtime_pm_put(&i915->runtime_pm, (intel_wakeref_t)(uintptr_t)file->private_data); return 0; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7232361973fd..9804e194fa7d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -374,7 +374,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, if (ret) return ret; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE | PIN_NONFAULT | @@ -461,7 +461,7 @@ out_unpin: i915_vma_unpin(vma); } out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return ret; @@ -561,6 +561,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_ggtt *ggtt = &i915->ggtt; + struct intel_runtime_pm *rpm = &i915->runtime_pm; intel_wakeref_t wakeref; struct drm_mm_node node; struct dma_fence *fence; @@ -581,14 +582,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, * This easily dwarfs any performance advantage from * using the cache bypass of indirect GGTT access. */ - wakeref = intel_runtime_pm_get_if_in_use(i915); + wakeref = intel_runtime_pm_get_if_in_use(rpm); if (!wakeref) { ret = -EFAULT; goto out_unlock; } } else { /* No backing pages, no fallback, we must force GGTT access */ - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(rpm); } vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, @@ -684,7 +685,7 @@ out_unpin: i915_vma_unpin(vma); } out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(rpm, wakeref); out_unlock: mutex_unlock(&i915->drm.struct_mutex); return ret; @@ -1174,7 +1175,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) GEM_TRACE("\n"); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); /* @@ -1197,7 +1198,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) intel_gt_sanitize(i915, false); intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_lock(&i915->drm.struct_mutex); i915_gem_contexts_lost(i915); @@ -1815,7 +1816,7 @@ int i915_gem_freeze_late(struct drm_i915_private *i915) * the objects as well, see i915_gem_freeze() */ - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); i915_gem_shrink(i915, -1UL, NULL, ~0); i915_gem_drain_freed_objects(i915); @@ -1826,7 +1827,7 @@ int i915_gem_freeze_late(struct drm_i915_private *i915) i915_gem_object_unlock(obj); } - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index fc4cf908afc3..3b35bb114b14 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -270,7 +270,7 @@ static int fence_update(struct i915_fence_reg *fence, * be cleared before we can use any other fences to ensure that * the new fences do not overlap the elided clears, confusing HW. */ - wakeref = intel_runtime_pm_get_if_in_use(fence->i915); + wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm); if (!wakeref) { GEM_BUG_ON(vma); return 0; @@ -284,7 +284,7 @@ static int fence_update(struct i915_fence_reg *fence, list_move_tail(&fence->link, &fence->i915->ggtt.fence_list); } - intel_runtime_pm_put(fence->i915, wakeref); + intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7be72388b052..057ea8303bce 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1825,7 +1825,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, unsigned int pde; bool flush = false; - wakeref = intel_runtime_pm_get(vm->i915); + wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); spin_lock(&ppgtt->base.pd.lock); gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) { @@ -1868,12 +1868,12 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, gen6_ggtt_invalidate(vm->i915); } - intel_runtime_pm_put(vm->i915, wakeref); + intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); return 0; unwind_out: - intel_runtime_pm_put(vm->i915, wakeref); + intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); gen6_ppgtt_clear_range(vm, from, start - from); return -ENOMEM; } diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index d92ddfada262..3d8162d28730 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1375,7 +1375,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) free_oa_buffer(dev_priv); intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv, stream->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref); if (stream->ctx) oa_put_render_ctx_id(stream); @@ -2112,7 +2112,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, * In our case we are expecting that taking pm + FORCEWAKE * references will effectively disable RC6. */ - stream->wakeref = intel_runtime_pm_get(dev_priv); + stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); ret = alloc_oa_buffer(dev_priv); @@ -2148,7 +2148,7 @@ err_oa_buf_alloc: put_oa_config(dev_priv, stream->oa_config); intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv, stream->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref); err_config: if (stream->ctx) diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index eb9c0e0e545c..507fcdb0f97e 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -171,7 +171,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) wakeref = 0; if (READ_ONCE(dev_priv->gt.awake)) - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); if (!wakeref) return; @@ -207,7 +207,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) } spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); } static void @@ -443,14 +443,15 @@ static u64 __get_rc6(struct drm_i915_private *i915) static u64 get_rc6(struct drm_i915_private *i915) { #if IS_ENABLED(CONFIG_PM) + struct intel_runtime_pm *rpm = &i915->runtime_pm; intel_wakeref_t wakeref; unsigned long flags; u64 val; - wakeref = intel_runtime_pm_get_if_in_use(i915); + wakeref = intel_runtime_pm_get_if_in_use(rpm); if (wakeref) { val = __get_rc6(i915); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(rpm, wakeref); /* * If we are coming back from being runtime suspended we must @@ -469,8 +470,7 @@ static u64 get_rc6(struct drm_i915_private *i915) spin_unlock_irqrestore(&i915->pmu.lock, flags); } else { - struct pci_dev *pdev = i915->drm.pdev; - struct device *kdev = &pdev->dev; + struct device *kdev = rpm->kdev; /* * We are runtime suspended. diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 3ef07b987d40..75acbf686ec9 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -264,7 +264,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, intel_wakeref_t wakeref; u32 freq; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { vlv_punit_get(dev_priv); @@ -276,7 +276,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1)); } - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq)); } @@ -364,7 +364,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, if (ret) return ret; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&rps->lock); val = intel_freq_opcode(dev_priv, val); @@ -392,7 +392,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, unlock: mutex_unlock(&rps->lock); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret ?: count; } @@ -420,7 +420,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, if (ret) return ret; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&rps->lock); val = intel_freq_opcode(dev_priv, val); @@ -444,7 +444,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, unlock: mutex_unlock(&rps->lock); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret ?: count; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e681ed99cdf2..0bf1e09acf22 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2112,7 +2112,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, * intel_runtime_pm_put(), so it is correct to wrap only the * pin/unpin/fence and not more. */ - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); i915_gem_object_lock(obj); atomic_inc(&dev_priv->gpu_error.pending_fb_pin); @@ -2169,7 +2169,7 @@ err: atomic_dec(&dev_priv->gpu_error.pending_fb_pin); i915_gem_object_unlock(obj); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return vma; } @@ -13927,7 +13927,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); } - intel_runtime_pm_put(dev_priv, intel_state->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref); /* * Defer the cleanup of the old state to a separate worker to not @@ -14006,7 +14006,7 @@ static int intel_atomic_commit(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; - intel_state->wakeref = intel_runtime_pm_get(dev_priv); + intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); drm_atomic_state_get(state); i915_sw_fence_init(&intel_state->commit_ready, @@ -14044,7 +14044,7 @@ static int intel_atomic_commit(struct drm_device *dev, if (ret) { DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); i915_sw_fence_commit(&intel_state->commit_ready); - intel_runtime_pm_put(dev_priv, intel_state->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref); return ret; } @@ -14056,7 +14056,7 @@ static int intel_atomic_commit(struct drm_device *dev, i915_sw_fence_commit(&intel_state->commit_ready); drm_atomic_helper_cleanup_planes(dev, state); - intel_runtime_pm_put(dev_priv, intel_state->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref); return ret; } dev_priv->wm.distrust_bios_wm = false; diff --git a/drivers/gpu/drm/i915/intel_display_power.c b/drivers/gpu/drm/i915/intel_display_power.c index 3e52453189ef..c672c8080a93 100644 --- a/drivers/gpu/drm/i915/intel_display_power.c +++ b/drivers/gpu/drm/i915/intel_display_power.c @@ -1644,7 +1644,7 @@ intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, goto out_verify; cancel_delayed_work(&power_domains->async_put_work); - intel_runtime_pm_put_raw(dev_priv, + intel_runtime_pm_put_raw(&dev_priv->runtime_pm, fetch_and_zero(&power_domains->async_put_wakeref)); out_verify: verify_async_put_domains_state(power_domains); @@ -1684,7 +1684,7 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { struct i915_power_domains *power_domains = &dev_priv->power_domains; - intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv); + intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&power_domains->lock); __intel_display_power_get_domain(dev_priv, domain); @@ -1713,7 +1713,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, intel_wakeref_t wakeref; bool is_enabled; - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); if (!wakeref) return false; @@ -1729,7 +1729,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); if (!is_enabled) { - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); wakeref = 0; } @@ -1786,7 +1786,7 @@ void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { __intel_display_power_put(dev_priv, domain); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } static void @@ -1806,6 +1806,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) struct drm_i915_private *dev_priv = container_of(power_domains, struct drm_i915_private, power_domains); + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; enum intel_display_power_domain domain; intel_wakeref_t wakeref; @@ -1814,8 +1815,8 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) * wakeref to make the state checker happy about the HW access during * power well disabling. */ - assert_rpm_raw_wakeref_held(&dev_priv->runtime_pm); - wakeref = intel_runtime_pm_get(dev_priv); + assert_rpm_raw_wakeref_held(rpm); + wakeref = intel_runtime_pm_get(rpm); for_each_power_domain(domain, mask) { /* Clear before put, so put's sanity check is happy. */ @@ -1823,7 +1824,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) __intel_display_power_put_domain(dev_priv, domain); } - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(rpm, wakeref); } static void @@ -1833,7 +1834,8 @@ intel_display_power_put_async_work(struct work_struct *work) container_of(work, struct drm_i915_private, power_domains.async_put_work.work); struct i915_power_domains *power_domains = &dev_priv->power_domains; - intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(dev_priv); + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); intel_wakeref_t old_work_wakeref = 0; mutex_lock(&power_domains->lock); @@ -1863,9 +1865,9 @@ out_verify: mutex_unlock(&power_domains->lock); if (old_work_wakeref) - intel_runtime_pm_put_raw(dev_priv, old_work_wakeref); + intel_runtime_pm_put_raw(rpm, old_work_wakeref); if (new_work_wakeref) - intel_runtime_pm_put_raw(dev_priv, new_work_wakeref); + intel_runtime_pm_put_raw(rpm, new_work_wakeref); } /** @@ -1883,7 +1885,8 @@ void __intel_display_power_put_async(struct drm_i915_private *i915, intel_wakeref_t wakeref) { struct i915_power_domains *power_domains = &i915->power_domains; - intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(i915); + struct intel_runtime_pm *rpm = &i915->runtime_pm; + intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); mutex_lock(&power_domains->lock); @@ -1910,9 +1913,9 @@ out_verify: mutex_unlock(&power_domains->lock); if (work_wakeref) - intel_runtime_pm_put_raw(i915, work_wakeref); + intel_runtime_pm_put_raw(rpm, work_wakeref); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(rpm, wakeref); } /** @@ -1948,7 +1951,7 @@ out_verify: mutex_unlock(&power_domains->lock); if (work_wakeref) - intel_runtime_pm_put_raw(i915, work_wakeref); + intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); } /** @@ -1987,7 +1990,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, intel_wakeref_t wakeref) { __intel_display_power_put(dev_priv, domain); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); } #endif @@ -4402,7 +4405,7 @@ void intel_power_domains_fini_hw(struct drm_i915_private *i915) intel_power_domains_verify_state(i915); /* Keep the power well enabled, but cancel its rpm wakeref. */ - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } /** diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 0d3a6fa674e6..1edd44ee32b2 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -213,7 +213,7 @@ static int intelfb_create(struct drm_fb_helper *helper, } mutex_lock(&dev->struct_mutex); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); /* Pin the GGTT vma for our access via info->screen_base. * This also validates that any existing fb inherited from the @@ -272,7 +272,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ifbdev->vma = vma; ifbdev->vma_flags = flags; - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(pdev, info); return 0; @@ -280,7 +280,7 @@ static int intelfb_create(struct drm_fb_helper *helper, out_unpin: intel_unpin_fb_vma(vma, flags); out_unlock: - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index ff9eb3c855d3..ea3de4acc850 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -230,7 +230,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) intel_wakeref_t wakeref; enum hpd_pin pin; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); spin_lock_irq(&dev_priv->irq_lock); for_each_hpd_pin(pin) { @@ -263,7 +263,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); } bool intel_encoder_hotplug(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 3d9ea3498679..502c54428570 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -369,7 +369,7 @@ static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm, /** * intel_runtime_pm_get_raw - grab a raw runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This is the unlocked version of intel_display_power_is_enabled() and should * only be used from error capture and recovery code where deadlocks are @@ -384,15 +384,14 @@ static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm, * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates * as True if the wakeref was acquired, or False otherwise. */ - -intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915) +intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm) { - return __intel_runtime_pm_get(&i915->runtime_pm, false); + return __intel_runtime_pm_get(rpm, false); } /** * intel_runtime_pm_get - grab a runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This function grabs a device-level runtime pm reference (mostly used for GEM * code to ensure the GTT or GT is on) and ensures that it is powered up. @@ -402,14 +401,14 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915) * * Returns: the wakeref cookie to pass to intel_runtime_pm_put() */ -intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915) +intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm) { - return __intel_runtime_pm_get(&i915->runtime_pm, true); + return __intel_runtime_pm_get(rpm, true); } /** * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This function grabs a device-level runtime pm reference if the device is * already in use and ensures that it is powered up. It is illegal to try @@ -421,10 +420,8 @@ intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915) * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates * as True if the wakeref was acquired, or False otherwise. */ -intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) +intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) { - struct intel_runtime_pm *rpm = &i915->runtime_pm; - if (IS_ENABLED(CONFIG_PM)) { /* * In cases runtime PM is disabled by the RPM core and we get @@ -443,7 +440,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) /** * intel_runtime_pm_get_noresume - grab a runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This function grabs a device-level runtime pm reference (mostly used for GEM * code to ensure the GTT or GT is on). @@ -460,10 +457,8 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) * * Returns: the wakeref cookie to pass to intel_runtime_pm_put() */ -intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915) +intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm) { - struct intel_runtime_pm *rpm = &i915->runtime_pm; - assert_rpm_wakelock_held(rpm); pm_runtime_get_noresume(rpm->kdev); @@ -488,7 +483,7 @@ static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm, /** * intel_runtime_pm_put_raw - release a raw runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * @wref: wakeref acquired for the reference that is being released * * This function drops the device-level runtime pm reference obtained by @@ -496,14 +491,14 @@ static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm, * hardware block right away if this is the last reference. */ void -intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref) +intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref) { - __intel_runtime_pm_put(&i915->runtime_pm, wref, false); + __intel_runtime_pm_put(rpm, wref, false); } /** * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This function drops the device-level runtime pm reference obtained by * intel_runtime_pm_get() and might power down the corresponding @@ -513,24 +508,24 @@ intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref) * new code, as the correctness of its use cannot be checked. Always use * intel_runtime_pm_put() instead. */ -void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915) +void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm) { - __intel_runtime_pm_put(&i915->runtime_pm, -1, true); + __intel_runtime_pm_put(rpm, -1, true); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) /** * intel_runtime_pm_put - release a runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * @wref: wakeref acquired for the reference that is being released * * This function drops the device-level runtime pm reference obtained by * intel_runtime_pm_get() and might power down the corresponding * hardware block right away if this is the last reference. */ -void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) +void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref) { - __intel_runtime_pm_put(&i915->runtime_pm, wref, true); + __intel_runtime_pm_put(rpm, wref, true); } #endif diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index 0890e698f196..f6445ca5bbf1 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -174,30 +174,30 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm); void intel_runtime_pm_disable(struct intel_runtime_pm *rpm); void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm); -intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915); -intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915); -intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915); -intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915); +intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm); +intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm); +intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm); +intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm); #define with_intel_runtime_pm(i915, wf) \ - for ((wf) = intel_runtime_pm_get(i915); (wf); \ - intel_runtime_pm_put((i915), (wf)), (wf) = 0) + for ((wf) = intel_runtime_pm_get(&(i915)->runtime_pm); (wf); \ + intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0) #define with_intel_runtime_pm_if_in_use(i915, wf) \ - for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \ - intel_runtime_pm_put((i915), (wf)), (wf) = 0) + for ((wf) = intel_runtime_pm_get_if_in_use(&(i915)->runtime_pm); (wf); \ + intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0) -void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915); +void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) -void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref); +void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref); #else static inline void -intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) +intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref) { - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put_unchecked(rpm); } #endif -void intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref); +void intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index b6c7167ce154..b677ae893d6f 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -9,14 +9,14 @@ static void rpm_get(struct drm_i915_private *i915, struct intel_wakeref *wf) { - wf->wakeref = intel_runtime_pm_get(i915); + wf->wakeref = intel_runtime_pm_get(&i915->runtime_pm); } static void rpm_put(struct drm_i915_private *i915, struct intel_wakeref *wf) { intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); GEM_BUG_ON(!wakeref); } @@ -86,7 +86,7 @@ static void wakeref_auto_timeout(struct timer_list *t) wakeref = fetch_and_zero(&wf->wakeref); spin_unlock_irqrestore(&wf->lock, flags); - intel_runtime_pm_put(wf->i915, wakeref); + intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref); } void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, @@ -116,7 +116,7 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) spin_lock_irqsave(&wf->lock, flags); if (!refcount_inc_not_zero(&wf->count)) { GEM_BUG_ON(wf->wakeref); - wf->wakeref = intel_runtime_pm_get_if_in_use(wf->i915); + wf->wakeref = intel_runtime_pm_get_if_in_use(&wf->i915->runtime_pm); refcount_set(&wf->count, 1); } spin_unlock_irqrestore(&wf->lock, flags); diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index cc1ca4be1a00..c0b3537a5fa6 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -97,7 +97,7 @@ static int live_active_wait(void *arg) /* Check that we get a callback when requests retire upon waiting */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); err = __live_active_setup(i915, &active); @@ -111,7 +111,7 @@ static int live_active_wait(void *arg) if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -126,7 +126,7 @@ static int live_active_retire(void *arg) /* Check that we get a callback when requests are indirectly retired */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); err = __live_active_setup(i915, &active); @@ -140,7 +140,7 @@ static int live_active_retire(void *arg) } i915_active_fini(&active.base); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 83643929416c..23a54da47ca5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -63,7 +63,7 @@ static void simulate_hibernate(struct drm_i915_private *i915) { intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); /* * As a final sting in the tail, invalidate stolen. Under a real S4, @@ -74,7 +74,7 @@ static void simulate_hibernate(struct drm_i915_private *i915) */ trash_stolen(i915); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } static int pm_prepare(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 71c1363ad536..e5f67474dcb4 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -404,7 +404,7 @@ static int igt_evict_contexts(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); /* Reserve a block so that we know we have enough to fit a few rq */ memset(&hole, 0, sizeof(hole)); @@ -515,7 +515,7 @@ out_locked: } if (drm_mm_node_allocated(&hole)) drm_mm_remove_node(&hole); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 2093d08a7569..1a60b9fe8221 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -295,9 +295,9 @@ static int lowlevel_hole(struct drm_i915_private *i915, mock_vma.node.size = BIT_ULL(size); mock_vma.node.start = addr; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } count = n; @@ -1171,7 +1171,7 @@ static int igt_ggtt_page(void *arg) if (err) goto out_unpin; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for (n = 0; n < count; n++) { u64 offset = tmp.start + n * PAGE_SIZE; @@ -1218,7 +1218,7 @@ static int igt_ggtt_page(void *arg) kfree(order); out_remove: ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); drm_mm_remove_node(&tmp); out_unpin: i915_gem_object_unpin_pages(obj); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 11278bac3a24..dd4e72eafc6c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -537,7 +537,7 @@ static int live_nop_request(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for_each_engine(engine, i915, id) { struct i915_request *request = NULL; @@ -597,7 +597,7 @@ static int live_nop_request(void *arg) } out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -682,7 +682,7 @@ static int live_empty_request(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); batch = empty_batch(i915); if (IS_ERR(batch)) { @@ -746,7 +746,7 @@ out_batch: i915_vma_unpin(batch); i915_vma_put(batch); out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -839,7 +839,7 @@ static int live_all_engines(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); err = igt_live_test_begin(&t, i915, __func__, ""); if (err) @@ -919,7 +919,7 @@ out_request: i915_vma_unpin(batch); i915_vma_put(batch); out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -942,7 +942,7 @@ static int live_sequential_engines(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); err = igt_live_test_begin(&t, i915, __func__, ""); if (err) @@ -1048,7 +1048,7 @@ out_request: i915_request_put(request[id]); } out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -1113,7 +1113,7 @@ static int live_breadcrumbs_smoketest(void *arg) * On real hardware this time. */ - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); file = mock_file(i915); if (IS_ERR(file)) { @@ -1220,7 +1220,7 @@ out_threads: out_file: mock_file_free(i915, file); out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); return ret; } diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c index acb2cc5136b7..724bf3650b3e 100644 --- a/drivers/gpu/drm/i915/selftests/i915_timeline.c +++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c @@ -515,7 +515,7 @@ static int live_hwsp_engine(void *arg) return -ENOMEM; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; for_each_engine(engine, i915, id) { @@ -558,7 +558,7 @@ out: i915_timeline_put(tl); } - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); kvfree(timelines); @@ -591,7 +591,7 @@ static int live_hwsp_alternate(void *arg) return -ENOMEM; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; for (n = 0; n < NUM_TIMELINES; n++) { @@ -634,7 +634,7 @@ out: i915_timeline_put(tl); } - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); kvfree(timelines); @@ -658,7 +658,7 @@ static int live_hwsp_wrap(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); tl = i915_timeline_create(i915, NULL); if (IS_ERR(tl)) { @@ -749,7 +749,7 @@ out: out_free: i915_timeline_put(tl); out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -771,7 +771,7 @@ static int live_hwsp_recycle(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; for_each_engine(engine, i915, id) { @@ -825,7 +825,7 @@ static int live_hwsp_recycle(void *arg) out: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index a166d9405a94..fbc79b14823a 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -873,7 +873,7 @@ static int igt_vma_remapped_gtt(void *arg) mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for (t = types; *t; t++) { for (p = planes; p->width; p++) { @@ -965,7 +965,7 @@ static int igt_vma_remapped_gtt(void *arg) } out: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); i915_gem_object_put(obj); diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index 7fd0321e0947..6ca8584cd64c 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -144,7 +144,7 @@ static int igt_guc_clients(void *args) GEM_BUG_ON(!HAS_GUC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); guc = &dev_priv->guc; if (!guc) { @@ -227,7 +227,7 @@ out: guc_clients_create(guc); guc_clients_enable(guc); unlock: - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } @@ -247,7 +247,7 @@ static int igt_guc_doorbells(void *arg) GEM_BUG_ON(!HAS_GUC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); guc = &dev_priv->guc; if (!guc) { @@ -340,7 +340,7 @@ out: guc_client_free(clients[i]); } unlock: - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index e0d7ebecb215..86815c6072a1 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -176,7 +176,7 @@ static int live_forcewake_ops(void *arg) return 0; } - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for_each_fw_domain(domain, uncore, tmp) { smp_store_mb(domain->active, false); @@ -247,7 +247,7 @@ static int live_forcewake_ops(void *arg) } out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); return err; } -- cgit v1.2.3 From c447ff7db34807082dcabbdcbbba2445b49211d9 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Thu, 13 Jun 2019 16:21:55 -0700 Subject: drm/i915: update with_intel_runtime_pm to use the rpm structure Matching the underlying get/put functions. Signed-off-by: Daniele Ceraolo Spurio Cc: Imre Deak Reviewed-by: Chris Wilson Acked-by: Imre Deak Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190613232156.34940-8-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 8 ++++---- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 6 +++--- drivers/gpu/drm/i915/gt/intel_context.c | 2 +- drivers/gpu/drm/i915/gt/intel_reset.c | 2 +- drivers/gpu/drm/i915/gt/selftest_reset.c | 2 +- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 4 ++-- drivers/gpu/drm/i915/i915_debugfs.c | 24 +++++++++++----------- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 8 ++++---- drivers/gpu/drm/i915/i915_pmu.c | 3 ++- drivers/gpu/drm/i915/i915_sysfs.c | 2 +- drivers/gpu/drm/i915/intel_guc_log.c | 6 +++--- drivers/gpu/drm/i915/intel_huc.c | 2 +- drivers/gpu/drm/i915/intel_panel.c | 2 +- drivers/gpu/drm/i915/intel_pm.c | 8 ++++---- drivers/gpu/drm/i915/intel_runtime_pm.h | 12 +++++------ drivers/gpu/drm/i915/intel_uc.c | 2 +- drivers/gpu/drm/i915/intel_uncore.c | 2 +- drivers/gpu/drm/i915/selftests/i915_gem.c | 6 +++--- drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 2 +- drivers/gpu/drm/i915/selftests/i915_request.c | 2 +- 21 files changed, 54 insertions(+), 53 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 13ff05566a0a..a521f23c18ad 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -297,7 +297,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915) intel_wakeref_t wakeref; unsigned long freed = 0; - with_intel_runtime_pm(i915, wakeref) { + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { freed = i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | @@ -358,7 +358,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) { intel_wakeref_t wakeref; - with_intel_runtime_pm(i915, wakeref) { + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { freed += i915_gem_shrink(i915, sc->nr_to_scan - sc->nr_scanned, &sc->nr_scanned, @@ -385,7 +385,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) unsigned long flags; freed_pages = 0; - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) freed_pages += i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | @@ -433,7 +433,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr MAX_SCHEDULE_TIMEOUT)) goto out; - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) freed_pages += i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index b85a4a43536a..03ac5003abf1 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -527,7 +527,7 @@ static int igt_ctx_exec(void *arg) } } - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", @@ -647,7 +647,7 @@ static int igt_shared_ctx_exec(void *arg) } err = 0; - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", @@ -1230,7 +1230,7 @@ static int igt_ctx_readonly(void *arg) } err = 0; - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index c78ec0b58e77..285e869713e8 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -54,7 +54,7 @@ int __intel_context_do_pin(struct intel_context *ce) intel_wakeref_t wakeref; err = 0; - with_intel_runtime_pm(ce->engine->i915, wakeref) + with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref) err = ce->ops->pin(ce); if (err) goto err; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 9e7d400c083a..6e6807a3f748 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -851,7 +851,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) intel_wakeref_t wakeref; mutex_lock(&error->wedge_mutex); - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) __i915_gem_set_wedged(i915); mutex_unlock(&error->wedge_mutex); } diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 91f0349882fc..89da9e7cc1ba 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -111,7 +111,7 @@ int intel_reset_live_selftests(struct drm_i915_private *i915) if (i915_terminally_wedged(i915)) return -EIO; /* we're long past hope of a successful reset */ - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) err = i915_subtests(tests, i915); return err; diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 8891e1a8ac16..91449d5157f6 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -256,7 +256,7 @@ switch_to_scratch_context(struct intel_engine_cs *engine, GEM_BUG_ON(i915_gem_context_is_bannable(ctx)); rq = ERR_PTR(-ENODEV); - with_intel_runtime_pm(engine->i915, wakeref) + with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref) rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); kernel_context_close(ctx); @@ -312,7 +312,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, if (err) goto out; - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) err = reset(engine); igt_spinner_end(&spin); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 5ca41165fb78..f58486820823 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -769,7 +769,7 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file) intel_wakeref_t wakeref; gpu = NULL; - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) gpu = i915_capture_gpu_state(i915); if (IS_ERR(gpu)) return PTR_ERR(gpu); @@ -1097,7 +1097,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) return 0; } - with_intel_runtime_pm(dev_priv, wakeref) { + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { for_each_engine(engine, dev_priv, id) acthd[id] = intel_engine_get_active_head(engine); @@ -1357,7 +1357,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused) intel_wakeref_t wakeref; int err = -ENODEV; - with_intel_runtime_pm(dev_priv, wakeref) { + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) err = vlv_drpc_info(m); else if (INTEL_GEN(dev_priv) >= 6) @@ -1524,7 +1524,7 @@ static int i915_emon_status(struct seq_file *m, void *unused) if (!IS_GEN(i915, 5)) return -ENODEV; - with_intel_runtime_pm(i915, wakeref) { + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { unsigned long temp, chipset, gfx; temp = i915_mch_val(i915); @@ -1816,7 +1816,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) u32 act_freq = rps->cur_freq; intel_wakeref_t wakeref; - with_intel_runtime_pm_if_in_use(dev_priv, wakeref) { + with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { vlv_punit_get(dev_priv); act_freq = vlv_punit_read(dev_priv, @@ -1899,7 +1899,7 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data) p = drm_seq_file_printer(m); intel_uc_fw_dump(&dev_priv->huc.fw, &p); - with_intel_runtime_pm(dev_priv, wakeref) + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); return 0; @@ -1917,7 +1917,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) p = drm_seq_file_printer(m); intel_uc_fw_dump(&dev_priv->guc.fw, &p); - with_intel_runtime_pm(dev_priv, wakeref) { + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { u32 tmp = I915_READ(GUC_STATUS); u32 i; @@ -2423,7 +2423,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data) return -ENODEV; units = (power & 0x1f00) >> 8; - with_intel_runtime_pm(dev_priv, wakeref) + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) power = I915_READ(MCH_SECP_NRG_STTS); power = (1000000 * power) >> units; /* convert to uJ */ @@ -3009,7 +3009,7 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, if (ret < 0) return ret; - with_intel_runtime_pm(dev_priv, wakeref) { + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { if (!dev_priv->ipc_enabled && enable) DRM_INFO("Enabling IPC: WM will be proper only after next commit\n"); dev_priv->wm.distrust_bios_wm = true; @@ -3761,7 +3761,7 @@ i915_cache_sharing_get(void *data, u64 *val) if (!(IS_GEN_RANGE(dev_priv, 6, 7))) return -ENODEV; - with_intel_runtime_pm(dev_priv, wakeref) + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; @@ -3782,7 +3782,7 @@ i915_cache_sharing_set(void *data, u64 val) return -EINVAL; DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); - with_intel_runtime_pm(dev_priv, wakeref) { + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { u32 snpcr; /* Update the cache sharing policy here as well */ @@ -4028,7 +4028,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused) sseu.max_eus_per_subslice = RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice; - with_intel_runtime_pm(dev_priv, wakeref) { + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { if (IS_CHERRYVIEW(dev_priv)) cherryview_sseu_device_status(dev_priv, &sseu); else if (IS_BROADWELL(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9804e194fa7d..217236596202 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -262,7 +262,7 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) i915_gem_chipset_flush(dev_priv); - with_intel_runtime_pm(dev_priv, wakeref) { + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { struct intel_uncore *uncore = &dev_priv->uncore; spin_lock_irq(&uncore->lock); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 057ea8303bce..278de04a96aa 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2619,7 +2619,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, if (i915_gem_object_is_readonly(obj)) pte_flags |= PTE_READ_ONLY; - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; @@ -2639,7 +2639,7 @@ static void ggtt_unbind_vma(struct i915_vma *vma) struct drm_i915_private *i915 = vma->vm->i915; intel_wakeref_t wakeref; - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) vma->vm->clear_range(vma->vm, vma->node.start, vma->size); } @@ -2674,7 +2674,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, if (flags & I915_VMA_GLOBAL_BIND) { intel_wakeref_t wakeref; - with_intel_runtime_pm(i915, wakeref) { + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); } @@ -2691,7 +2691,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma) struct i915_address_space *vm = vma->vm; intel_wakeref_t wakeref; - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) vm->clear_range(vm, vma->node.start, vma->size); } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 507fcdb0f97e..8fe46ee920a0 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -227,7 +227,8 @@ frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) if (dev_priv->gt.awake) { intel_wakeref_t wakeref; - with_intel_runtime_pm_if_in_use(dev_priv, wakeref) { + with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, + wakeref) { val = intel_uncore_read_notrace(&dev_priv->uncore, GEN6_RPSTAT1); val = intel_get_cagf(dev_priv, val); diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 75acbf686ec9..ecac1c386109 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -48,7 +48,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv, intel_wakeref_t wakeref; u64 res = 0; - with_intel_runtime_pm(dev_priv, wakeref) + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) res = intel_rc6_residency_us(dev_priv, reg); return DIV_ROUND_CLOSEST_ULL(res, 1000); diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index 4f9c53681f92..bf1446629703 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -436,7 +436,7 @@ static void guc_log_capture_logs(struct intel_guc_log *log) * Generally device is expected to be active only at this * time, so get/put should be really quick. */ - with_intel_runtime_pm(dev_priv, wakeref) + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) guc_action_flush_log_complete(guc); } @@ -515,7 +515,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) if (log->level == level) goto out_unlock; - with_intel_runtime_pm(dev_priv, wakeref) + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level), GUC_LOG_LEVEL_IS_ENABLED(level), @@ -600,7 +600,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) */ flush_work(&log->relay.flush_work); - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) guc_action_flush_log(guc); /* GuC would have updated log buffer by now, so capture it */ diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 8572a0588efc..fb6f693d3cac 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -174,7 +174,7 @@ int intel_huc_check_status(struct intel_huc *huc) if (!HAS_HUC(dev_priv)) return -ENODEV; - with_intel_runtime_pm(dev_priv, wakeref) + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) status = (I915_READ(huc->status.reg) & huc->status.mask) == huc->status.value; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 9cd4e37e3934..39d742094065 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1288,7 +1288,7 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd) intel_wakeref_t wakeref; int ret = 0; - with_intel_runtime_pm(dev_priv, wakeref) { + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { u32 hw_level; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 2c7f3ebc0117..b03e2a467e8b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -8167,7 +8167,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) if (!IS_GEN(dev_priv, 5)) return 0; - with_intel_runtime_pm(dev_priv, wakeref) { + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { spin_lock_irq(&mchdev_lock); val = __i915_chipset_val(dev_priv); spin_unlock_irq(&mchdev_lock); @@ -8253,7 +8253,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv) if (!IS_GEN(dev_priv, 5)) return; - with_intel_runtime_pm(dev_priv, wakeref) { + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { spin_lock_irq(&mchdev_lock); __i915_update_gfx_val(dev_priv); spin_unlock_irq(&mchdev_lock); @@ -8305,7 +8305,7 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) if (!IS_GEN(dev_priv, 5)) return 0; - with_intel_runtime_pm(dev_priv, wakeref) { + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { spin_lock_irq(&mchdev_lock); val = __i915_gfx_val(dev_priv); spin_unlock_irq(&mchdev_lock); @@ -8346,7 +8346,7 @@ unsigned long i915_read_mch_val(void) if (!i915) return 0; - with_intel_runtime_pm(i915, wakeref) { + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { spin_lock_irq(&mchdev_lock); chipset_val = __i915_chipset_val(i915); graphics_val = __i915_gfx_val(i915); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index f6445ca5bbf1..f2d6299a8161 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -179,13 +179,13 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm); -#define with_intel_runtime_pm(i915, wf) \ - for ((wf) = intel_runtime_pm_get(&(i915)->runtime_pm); (wf); \ - intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0) +#define with_intel_runtime_pm(rpm, wf) \ + for ((wf) = intel_runtime_pm_get(rpm); (wf); \ + intel_runtime_pm_put((rpm), (wf)), (wf) = 0) -#define with_intel_runtime_pm_if_in_use(i915, wf) \ - for ((wf) = intel_runtime_pm_get_if_in_use(&(i915)->runtime_pm); (wf); \ - intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0) +#define with_intel_runtime_pm_if_in_use(rpm, wf) \ + for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \ + intel_runtime_pm_put((rpm), (wf)), (wf) = 0) void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index a8e7f0ba7c3b..ae45651ac73c 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -537,7 +537,7 @@ void intel_uc_suspend(struct drm_i915_private *i915) if (!intel_guc_is_loaded(guc)) return; - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) intel_uc_runtime_suspend(i915); } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 5eb1d130fc2e..da33aa672c3d 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1702,7 +1702,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, flags = reg->offset & (entry->size - 1); - with_intel_runtime_pm(i915, wakeref) { + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { if (entry->size == 8 && flags == I915_REG_READ_8B_WA) reg->val = intel_uncore_read64_2x32(uncore, entry->offset_ldw, diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 23a54da47ca5..c6a01a6e87f1 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -88,7 +88,7 @@ static void pm_suspend(struct drm_i915_private *i915) { intel_wakeref_t wakeref; - with_intel_runtime_pm(i915, wakeref) { + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { i915_gem_suspend_gtt_mappings(i915); i915_gem_suspend_late(i915); } @@ -98,7 +98,7 @@ static void pm_hibernate(struct drm_i915_private *i915) { intel_wakeref_t wakeref; - with_intel_runtime_pm(i915, wakeref) { + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { i915_gem_suspend_gtt_mappings(i915); i915_gem_freeze(i915); @@ -114,7 +114,7 @@ static void pm_resume(struct drm_i915_private *i915) * Both suspend and hibernate follow the same wakeup path and assume * that runtime-pm just works. */ - with_intel_runtime_pm(i915, wakeref) { + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { intel_gt_sanitize(i915, false); i915_gem_sanitize(i915); i915_gem_resume(i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index e5f67474dcb4..8c69198c7e4e 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -539,7 +539,7 @@ int i915_gem_evict_mock_selftests(void) return -ENOMEM; mutex_lock(&i915->drm.struct_mutex); - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index dd4e72eafc6c..3de24f3d4ed5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -514,7 +514,7 @@ int i915_request_mock_selftests(void) if (!i915) return -ENOMEM; - with_intel_runtime_pm(i915, wakeref) + with_intel_runtime_pm(&i915->runtime_pm, wakeref) err = i915_subtests(tests, i915); drm_dev_put(&i915->drm); -- cgit v1.2.3 From 58a111f03a6e3f66f68260e09eb5cf1eefe2a387 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Thu, 13 Jun 2019 16:21:56 -0700 Subject: drm/i915: make intel_wakeref work on the rpm struct intel_runtime_pm is the only thing they use from the i915 structure, so use that directly. Signed-off-by: Daniele Ceraolo Spurio Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190613232156.34940-9-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/intel_engine_pm.c | 4 ++-- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 4 ++-- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 2 +- drivers/gpu/drm/i915/intel_wakeref.c | 32 +++++++++++++++---------------- drivers/gpu/drm/i915/intel_wakeref.h | 18 ++++++++--------- 5 files changed, 30 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index ccf034764741..903bee3d6c6d 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -37,7 +37,7 @@ static int __engine_unpark(struct intel_wakeref *wf) void intel_engine_pm_get(struct intel_engine_cs *engine) { - intel_wakeref_get(engine->i915, &engine->wakeref, __engine_unpark); + intel_wakeref_get(&engine->i915->runtime_pm, &engine->wakeref, __engine_unpark); } void intel_engine_park(struct intel_engine_cs *engine) @@ -131,7 +131,7 @@ static int __engine_park(struct intel_wakeref *wf) void intel_engine_pm_put(struct intel_engine_cs *engine) { - intel_wakeref_put(engine->i915, &engine->wakeref, __engine_park); + intel_wakeref_put(&engine->i915->runtime_pm, &engine->wakeref, __engine_park); } void intel_engine_init__pm(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index ae7155f0e063..7b5967751762 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -52,7 +52,7 @@ static int intel_gt_unpark(struct intel_wakeref *wf) void intel_gt_pm_get(struct drm_i915_private *i915) { - intel_wakeref_get(i915, &i915->gt.wakeref, intel_gt_unpark); + intel_wakeref_get(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_unpark); } static int intel_gt_park(struct intel_wakeref *wf) @@ -77,7 +77,7 @@ static int intel_gt_park(struct intel_wakeref *wf) void intel_gt_pm_put(struct drm_i915_private *i915) { - intel_wakeref_put(i915, &i915->gt.wakeref, intel_gt_park); + intel_wakeref_put(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_park); } void intel_gt_pm_init(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 3b35bb114b14..0bf53ac1c835 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -804,7 +804,7 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt) INIT_LIST_HEAD(&ggtt->fence_list); INIT_LIST_HEAD(&ggtt->userfault_list); - intel_wakeref_auto_init(&ggtt->userfault_wakeref, i915); + intel_wakeref_auto_init(&ggtt->userfault_wakeref, &i915->runtime_pm); detect_bit_6_swizzle(i915); diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index b677ae893d6f..3db6fa682823 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -4,23 +4,23 @@ * Copyright © 2019 Intel Corporation */ -#include "intel_drv.h" -#include "intel_wakeref.h" +#include "intel_runtime_pm.h" +#include "i915_gem.h" -static void rpm_get(struct drm_i915_private *i915, struct intel_wakeref *wf) +static void rpm_get(struct intel_runtime_pm *rpm, struct intel_wakeref *wf) { - wf->wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wf->wakeref = intel_runtime_pm_get(rpm); } -static void rpm_put(struct drm_i915_private *i915, struct intel_wakeref *wf) +static void rpm_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf) { intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_runtime_pm_put(rpm, wakeref); GEM_BUG_ON(!wakeref); } -int __intel_wakeref_get_first(struct drm_i915_private *i915, +int __intel_wakeref_get_first(struct intel_runtime_pm *rpm, struct intel_wakeref *wf, int (*fn)(struct intel_wakeref *wf)) { @@ -34,11 +34,11 @@ int __intel_wakeref_get_first(struct drm_i915_private *i915, if (!atomic_read(&wf->count)) { int err; - rpm_get(i915, wf); + rpm_get(rpm, wf); err = fn(wf); if (unlikely(err)) { - rpm_put(i915, wf); + rpm_put(rpm, wf); mutex_unlock(&wf->mutex); return err; } @@ -51,7 +51,7 @@ int __intel_wakeref_get_first(struct drm_i915_private *i915, return 0; } -int __intel_wakeref_put_last(struct drm_i915_private *i915, +int __intel_wakeref_put_last(struct intel_runtime_pm *rpm, struct intel_wakeref *wf, int (*fn)(struct intel_wakeref *wf)) { @@ -59,7 +59,7 @@ int __intel_wakeref_put_last(struct drm_i915_private *i915, err = fn(wf); if (likely(!err)) - rpm_put(i915, wf); + rpm_put(rpm, wf); else atomic_inc(&wf->count); mutex_unlock(&wf->mutex); @@ -86,17 +86,17 @@ static void wakeref_auto_timeout(struct timer_list *t) wakeref = fetch_and_zero(&wf->wakeref); spin_unlock_irqrestore(&wf->lock, flags); - intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref); + intel_runtime_pm_put(wf->rpm, wakeref); } void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, - struct drm_i915_private *i915) + struct intel_runtime_pm *rpm) { spin_lock_init(&wf->lock); timer_setup(&wf->timer, wakeref_auto_timeout, 0); refcount_set(&wf->count, 0); wf->wakeref = 0; - wf->i915 = i915; + wf->rpm = rpm; } void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) @@ -110,13 +110,13 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) } /* Our mission is that we only extend an already active wakeref */ - assert_rpm_wakelock_held(&wf->i915->runtime_pm); + assert_rpm_wakelock_held(wf->rpm); if (!refcount_inc_not_zero(&wf->count)) { spin_lock_irqsave(&wf->lock, flags); if (!refcount_inc_not_zero(&wf->count)) { GEM_BUG_ON(wf->wakeref); - wf->wakeref = intel_runtime_pm_get_if_in_use(&wf->i915->runtime_pm); + wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm); refcount_set(&wf->count, 1); } spin_unlock_irqrestore(&wf->lock, flags); diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 8a5f85c000ce..9cbb2ebf575b 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -13,7 +13,7 @@ #include #include -struct drm_i915_private; +struct intel_runtime_pm; typedef depot_stack_handle_t intel_wakeref_t; @@ -31,10 +31,10 @@ void __intel_wakeref_init(struct intel_wakeref *wf, __intel_wakeref_init((wf), &__key); \ } while (0) -int __intel_wakeref_get_first(struct drm_i915_private *i915, +int __intel_wakeref_get_first(struct intel_runtime_pm *rpm, struct intel_wakeref *wf, int (*fn)(struct intel_wakeref *wf)); -int __intel_wakeref_put_last(struct drm_i915_private *i915, +int __intel_wakeref_put_last(struct intel_runtime_pm *rpm, struct intel_wakeref *wf, int (*fn)(struct intel_wakeref *wf)); @@ -55,12 +55,12 @@ int __intel_wakeref_put_last(struct drm_i915_private *i915, * code otherwise. */ static inline int -intel_wakeref_get(struct drm_i915_private *i915, +intel_wakeref_get(struct intel_runtime_pm *rpm, struct intel_wakeref *wf, int (*fn)(struct intel_wakeref *wf)) { if (unlikely(!atomic_inc_not_zero(&wf->count))) - return __intel_wakeref_get_first(i915, wf, fn); + return __intel_wakeref_get_first(rpm, wf, fn); return 0; } @@ -82,12 +82,12 @@ intel_wakeref_get(struct drm_i915_private *i915, * code otherwise. */ static inline int -intel_wakeref_put(struct drm_i915_private *i915, +intel_wakeref_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf, int (*fn)(struct intel_wakeref *wf)) { if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex)) - return __intel_wakeref_put_last(i915, wf, fn); + return __intel_wakeref_put_last(rpm, wf, fn); return 0; } @@ -133,7 +133,7 @@ intel_wakeref_active(struct intel_wakeref *wf) } struct intel_wakeref_auto { - struct drm_i915_private *i915; + struct intel_runtime_pm *rpm; struct timer_list timer; intel_wakeref_t wakeref; spinlock_t lock; @@ -158,7 +158,7 @@ struct intel_wakeref_auto { void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout); void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, - struct drm_i915_private *i915); + struct intel_runtime_pm *rpm); void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf); #endif /* INTEL_WAKEREF_H */ -- cgit v1.2.3 From ce476c80b8bfa8a8e4c9182cdb686c5aea2431a6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 14 Jun 2019 17:46:04 +0100 Subject: drm/i915: Keep contexts pinned until after the next kernel context switch We need to keep the context image pinned in memory until after the GPU has finished writing into it. Since it continues to write as we signal the final breadcrumb, we need to keep it pinned until the request after it is complete. Currently we know the order in which requests execute on each engine, and so to remove that presumption we need to identify a request/context-switch we know must occur after our completion. Any request queued after the signal must imply a context switch, for simplicity we use a fresh request from the kernel context. The sequence of operations for keeping the context pinned until saved is: - On context activation, we preallocate a node for each physical engine the context may operate on. This is to avoid allocations during unpinning, which may be from inside FS_RECLAIM context (aka the shrinker) - On context deactivation on retirement of the last active request (which is before we know the context has been saved), we add the preallocated node onto a barrier list on each engine - On engine idling, we emit a switch to kernel context. When this switch completes, we know that all previous contexts must have been saved, and so on retiring this request we can finally unpin all the contexts that were marked as deactivated prior to the switch. We can enhance this in future by flushing all the idle contexts on a regular heartbeat pulse of a switch to kernel context, which will also be used to check for hung engines. v2: intel_context_active_acquire/_release Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 24 ++----- drivers/gpu/drm/i915/gem/i915_gem_context.h | 1 - drivers/gpu/drm/i915/gem/i915_gem_pm.c | 20 +++++- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 17 ++--- drivers/gpu/drm/i915/gt/intel_context.c | 80 +++++++++++++++++++++--- drivers/gpu/drm/i915/gt/intel_context.h | 3 + drivers/gpu/drm/i915/gt/intel_context_types.h | 6 +- drivers/gpu/drm/i915/gt/intel_engine.h | 2 - drivers/gpu/drm/i915/gt/intel_engine_cs.c | 23 +------ drivers/gpu/drm/i915/gt/intel_engine_pm.c | 2 + drivers/gpu/drm/i915/gt/intel_engine_types.h | 13 +--- drivers/gpu/drm/i915/gt/intel_lrc.c | 62 +++--------------- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 44 ++----------- drivers/gpu/drm/i915/gt/mock_engine.c | 11 ++-- drivers/gpu/drm/i915/i915_active.c | 78 +++++++++++++++++++++++ drivers/gpu/drm/i915/i915_active.h | 5 ++ drivers/gpu/drm/i915/i915_active_types.h | 3 + drivers/gpu/drm/i915/i915_gem.c | 4 -- drivers/gpu/drm/i915/i915_request.c | 15 ----- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 1 - 20 files changed, 219 insertions(+), 195 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index c86ca9f21532..6200060aef05 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -692,17 +692,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) return 0; } -void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - lockdep_assert_held(&dev_priv->drm.struct_mutex); - - for_each_engine(engine, dev_priv, id) - intel_engine_lost_context(engine); -} - void i915_gem_contexts_fini(struct drm_i915_private *i915) { lockdep_assert_held(&i915->drm.struct_mutex); @@ -1203,10 +1192,6 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) if (ret) goto out_add; - ret = gen8_emit_rpcs_config(rq, ce, sseu); - if (ret) - goto out_add; - /* * Guarantee context image and the timeline remains pinned until the * modifying request is retired by setting the ce activity tracker. @@ -1214,9 +1199,12 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) * But we only need to take one pin on the account of it. Or in other * words transfer the pinned ce object to tracked active request. */ - if (!i915_active_request_isset(&ce->active_tracker)) - __intel_context_pin(ce); - __i915_active_request_set(&ce->active_tracker, rq); + GEM_BUG_ON(i915_active_is_idle(&ce->active)); + ret = i915_active_ref(&ce->active, rq->fence.context, rq); + if (ret) + goto out_add; + + ret = gen8_emit_rpcs_config(rq, ce, sseu); out_add: i915_request_add(rq); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index 630392c77e48..9691dd062f72 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -134,7 +134,6 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx) /* i915_gem_context.c */ int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv); -void i915_gem_contexts_lost(struct drm_i915_private *dev_priv); void i915_gem_contexts_fini(struct drm_i915_private *dev_priv); int i915_gem_context_open(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 6e75702c5671..141f3ea349a4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -10,6 +10,22 @@ #include "i915_drv.h" #include "i915_globals.h" +static void call_idle_barriers(struct intel_engine_cs *engine) +{ + struct llist_node *node, *next; + + llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) { + struct i915_active_request *active = + container_of((struct list_head *)node, + typeof(*active), link); + + INIT_LIST_HEAD(&active->link); + RCU_INIT_POINTER(active->request, NULL); + + active->retire(active, NULL); + } +} + static void i915_gem_park(struct drm_i915_private *i915) { struct intel_engine_cs *engine; @@ -17,8 +33,10 @@ static void i915_gem_park(struct drm_i915_private *i915) lockdep_assert_held(&i915->drm.struct_mutex); - for_each_engine(engine, i915, id) + for_each_engine(engine, i915, id) { + call_idle_barriers(engine); /* cleanup after wedging */ i915_gem_batch_pool_fini(&engine->batch_pool); + } i915_timelines_park(i915); i915_vma_parked(i915); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index a521f23c18ad..c851c4029597 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -160,18 +160,13 @@ i915_gem_shrink(struct drm_i915_private *i915, return 0; /* - * When shrinking the active list, also consider active contexts. - * Active contexts are pinned until they are retired, and so can - * not be simply unbound to retire and unpin their pages. To shrink - * the contexts, we must wait until the gpu is idle. - * - * We don't care about errors here; if we cannot wait upon the GPU, - * we will free as much as we can and hope to get a second chance. + * When shrinking the active list, we should also consider active + * contexts. Active contexts are pinned until they are retired, and + * so can not be simply unbound to retire and unpin their pages. To + * shrink the contexts, we must wait until the gpu is idle and + * completed its switch to the kernel context. In short, we do + * not have a good mechanism for idling a specific context. */ - if (shrink & I915_SHRINK_ACTIVE) - i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); trace_i915_gem_shrink(i915, target, shrink); i915_retire_requests(i915); diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 285e869713e8..42f45744d859 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -61,7 +61,6 @@ int __intel_context_do_pin(struct intel_context *ce) i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */ - intel_context_get(ce); smp_mb__before_atomic(); /* flush pin before it is visible */ } @@ -89,20 +88,45 @@ void intel_context_unpin(struct intel_context *ce) ce->ops->unpin(ce); i915_gem_context_put(ce->gem_context); - intel_context_put(ce); + intel_context_active_release(ce); } mutex_unlock(&ce->pin_mutex); intel_context_put(ce); } -static void intel_context_retire(struct i915_active_request *active, - struct i915_request *rq) +static int __context_pin_state(struct i915_vma *vma, unsigned long flags) { - struct intel_context *ce = - container_of(active, typeof(*ce), active_tracker); + int err; - intel_context_unpin(ce); + err = i915_vma_pin(vma, 0, 0, flags | PIN_GLOBAL); + if (err) + return err; + + /* + * And mark it as a globally pinned object to let the shrinker know + * it cannot reclaim the object until we release it. + */ + vma->obj->pin_global++; + vma->obj->mm.dirty = true; + + return 0; +} + +static void __context_unpin_state(struct i915_vma *vma) +{ + vma->obj->pin_global--; + __i915_vma_unpin(vma); +} + +static void intel_context_retire(struct i915_active *active) +{ + struct intel_context *ce = container_of(active, typeof(*ce), active); + + if (ce->state) + __context_unpin_state(ce->state); + + intel_context_put(ce); } void @@ -125,8 +149,46 @@ intel_context_init(struct intel_context *ce, mutex_init(&ce->pin_mutex); - i915_active_request_init(&ce->active_tracker, - NULL, intel_context_retire); + i915_active_init(ctx->i915, &ce->active, intel_context_retire); +} + +int intel_context_active_acquire(struct intel_context *ce, unsigned long flags) +{ + int err; + + if (!i915_active_acquire(&ce->active)) + return 0; + + intel_context_get(ce); + + if (!ce->state) + return 0; + + err = __context_pin_state(ce->state, flags); + if (err) { + i915_active_cancel(&ce->active); + intel_context_put(ce); + return err; + } + + /* Preallocate tracking nodes */ + if (!i915_gem_context_is_kernel(ce->gem_context)) { + err = i915_active_acquire_preallocate_barrier(&ce->active, + ce->engine); + if (err) { + i915_active_release(&ce->active); + return err; + } + } + + return 0; +} + +void intel_context_active_release(struct intel_context *ce) +{ + /* Nodes preallocated in intel_context_active() */ + i915_active_acquire_barrier(&ce->active); + i915_active_release(&ce->active); } static void i915_global_context_shrink(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 6d5453ba2c1e..a47275bc4f01 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -102,6 +102,9 @@ static inline void intel_context_exit(struct intel_context *ce) ce->ops->exit(ce); } +int intel_context_active_acquire(struct intel_context *ce, unsigned long flags); +void intel_context_active_release(struct intel_context *ce); + static inline struct intel_context *intel_context_get(struct intel_context *ce) { kref_get(&ce->ref); diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 825fcf0ac9c4..e95be4be9612 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -56,10 +56,10 @@ struct intel_context { intel_engine_mask_t saturated; /* submitting semaphores too late? */ /** - * active_tracker: Active tracker for the external rq activity - * on this intel_context object. + * active: Active tracker for the rq activity (inc. external) on this + * intel_context object. */ - struct i915_active_request active_tracker; + struct i915_active active; const struct intel_context_ops *ops; diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 1439fa4093ac..3b5a6d152997 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -467,8 +467,6 @@ static inline void intel_engine_reset(struct intel_engine_cs *engine, bool intel_engine_is_idle(struct intel_engine_cs *engine); bool intel_engines_are_idle(struct drm_i915_private *dev_priv); -void intel_engine_lost_context(struct intel_engine_cs *engine); - void intel_engines_reset_default_submission(struct drm_i915_private *i915); unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 39220e16ea26..22242e927baa 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -611,6 +611,8 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine) { int err; + init_llist_head(&engine->barrier_tasks); + err = init_status_page(engine); if (err) return err; @@ -870,6 +872,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) if (engine->preempt_context) intel_context_unpin(engine->preempt_context); intel_context_unpin(engine->kernel_context); + GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); i915_timeline_fini(&engine->timeline); @@ -1201,26 +1204,6 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915) engine->set_default_submission(engine); } -/** - * intel_engine_lost_context: called when the GPU is reset into unknown state - * @engine: the engine - * - * We have either reset the GPU or otherwise about to lose state tracking of - * the current GPU logical state (e.g. suspend). On next use, it is therefore - * imperative that we make no presumptions about the current state and load - * from scratch. - */ -void intel_engine_lost_context(struct intel_engine_cs *engine) -{ - struct intel_context *ce; - - lockdep_assert_held(&engine->i915->drm.struct_mutex); - - ce = fetch_and_zero(&engine->last_retired_context); - if (ce) - intel_context_unpin(ce); -} - bool intel_engine_can_store_dword(struct intel_engine_cs *engine) { switch (INTEL_GEN(engine->i915)) { diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 903bee3d6c6d..d14e352b0b17 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -88,6 +88,8 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) /* Check again on the next retirement. */ engine->wakeref_serial = engine->serial + 1; + + i915_request_add_barriers(rq); __i915_request_commit(rq); return false; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 01223864237a..33a31aa2d2ae 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include "i915_gem.h" @@ -288,6 +289,7 @@ struct intel_engine_cs { struct intel_ring *buffer; struct i915_timeline timeline; + struct llist_head barrier_tasks; struct intel_context *kernel_context; /* pinned */ struct intel_context *preempt_context; /* pinned; optional */ @@ -435,17 +437,6 @@ struct intel_engine_cs { struct intel_engine_execlists execlists; - /* Contexts are pinned whilst they are active on the GPU. The last - * context executed remains active whilst the GPU is idle - the - * switch away and write to the context object only occurs on the - * next execution. Contexts are only unpinned on retirement of the - * following request ensuring that we can always write to the object - * on the context switch even after idling. Across suspend, we switch - * to the kernel context and trash it as the save may not happen - * before the hardware is powered down. - */ - struct intel_context *last_retired_context; - /* status_notifier: list of callbacks for context-switch changes */ struct atomic_notifier_head context_status_notifier; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index b8f5592da18f..d0a51752386f 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1422,60 +1422,11 @@ static void execlists_context_destroy(struct kref *kref) intel_context_free(ce); } -static int __context_pin(struct i915_vma *vma) -{ - unsigned int flags; - int err; - - flags = PIN_GLOBAL | PIN_HIGH; - flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); - - err = i915_vma_pin(vma, 0, 0, flags); - if (err) - return err; - - vma->obj->pin_global++; - vma->obj->mm.dirty = true; - - return 0; -} - -static void __context_unpin(struct i915_vma *vma) -{ - vma->obj->pin_global--; - __i915_vma_unpin(vma); -} - static void execlists_context_unpin(struct intel_context *ce) { - struct intel_engine_cs *engine; - - /* - * The tasklet may still be using a pointer to our state, via an - * old request. However, since we know we only unpin the context - * on retirement of the following request, we know that the last - * request referencing us will have had a completion CS interrupt. - * If we see that it is still active, it means that the tasklet hasn't - * had the chance to run yet; let it run before we teardown the - * reference it may use. - */ - engine = READ_ONCE(ce->inflight); - if (unlikely(engine)) { - unsigned long flags; - - spin_lock_irqsave(&engine->timeline.lock, flags); - process_csb(engine); - spin_unlock_irqrestore(&engine->timeline.lock, flags); - - GEM_BUG_ON(READ_ONCE(ce->inflight)); - } - i915_gem_context_unpin_hw_id(ce->gem_context); - - intel_ring_unpin(ce->ring); - i915_gem_object_unpin_map(ce->state->obj); - __context_unpin(ce->state); + intel_ring_unpin(ce->ring); } static void @@ -1512,7 +1463,10 @@ __execlists_context_pin(struct intel_context *ce, goto err; GEM_BUG_ON(!ce->state); - ret = __context_pin(ce->state); + ret = intel_context_active_acquire(ce, + engine->i915->ggtt.pin_bias | + PIN_OFFSET_BIAS | + PIN_HIGH); if (ret) goto err; @@ -1521,7 +1475,7 @@ __execlists_context_pin(struct intel_context *ce, I915_MAP_OVERRIDE); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); - goto unpin_vma; + goto unpin_active; } ret = intel_ring_pin(ce->ring); @@ -1542,8 +1496,8 @@ unpin_ring: intel_ring_unpin(ce->ring); unpin_map: i915_gem_object_unpin_map(ce->state->obj); -unpin_vma: - __context_unpin(ce->state); +unpin_active: + intel_context_active_release(ce); err: return ret; } diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index b3bf47e8162f..cc901edec09a 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1349,45 +1349,9 @@ static void __context_unpin_ppgtt(struct i915_gem_context *ctx) gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); } -static int __context_pin(struct intel_context *ce) -{ - struct i915_vma *vma; - int err; - - vma = ce->state; - if (!vma) - return 0; - - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); - if (err) - return err; - - /* - * And mark is as a globally pinned object to let the shrinker know - * it cannot reclaim the object until we release it. - */ - vma->obj->pin_global++; - vma->obj->mm.dirty = true; - - return 0; -} - -static void __context_unpin(struct intel_context *ce) -{ - struct i915_vma *vma; - - vma = ce->state; - if (!vma) - return; - - vma->obj->pin_global--; - i915_vma_unpin(vma); -} - static void ring_context_unpin(struct intel_context *ce) { __context_unpin_ppgtt(ce->gem_context); - __context_unpin(ce); } static struct i915_vma * @@ -1477,18 +1441,18 @@ static int ring_context_pin(struct intel_context *ce) ce->state = vma; } - err = __context_pin(ce); + err = intel_context_active_acquire(ce, PIN_HIGH); if (err) return err; err = __context_pin_ppgtt(ce->gem_context); if (err) - goto err_unpin; + goto err_active; return 0; -err_unpin: - __context_unpin(ce); +err_active: + intel_context_active_release(ce); return err; } diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 6d7562769eb2..d1ef515bac8d 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -146,12 +146,18 @@ static void mock_context_destroy(struct kref *ref) static int mock_context_pin(struct intel_context *ce) { + int ret; + if (!ce->ring) { ce->ring = mock_ring(ce->engine); if (!ce->ring) return -ENOMEM; } + ret = intel_context_active_acquire(ce, PIN_HIGH); + if (ret) + return ret; + mock_timeline_pin(ce->ring->timeline); return 0; } @@ -328,14 +334,9 @@ void mock_engine_free(struct intel_engine_cs *engine) { struct mock_engine *mock = container_of(engine, typeof(*mock), base); - struct intel_context *ce; GEM_BUG_ON(timer_pending(&mock->hw_delay)); - ce = fetch_and_zero(&engine->last_retired_context); - if (ce) - intel_context_unpin(ce); - intel_context_unpin(engine->kernel_context); intel_engine_fini_breadcrumbs(engine); diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 863ae12707ba..2d019ac6db20 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -157,6 +157,7 @@ void i915_active_init(struct drm_i915_private *i915, ref->retire = retire; ref->tree = RB_ROOT; i915_active_request_init(&ref->last, NULL, last_retire); + init_llist_head(&ref->barriers); ref->count = 0; } @@ -263,6 +264,83 @@ void i915_active_fini(struct i915_active *ref) } #endif +int i915_active_acquire_preallocate_barrier(struct i915_active *ref, + struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; + unsigned long tmp; + int err = 0; + + GEM_BUG_ON(!engine->mask); + for_each_engine_masked(engine, i915, engine->mask, tmp) { + struct intel_context *kctx = engine->kernel_context; + struct active_node *node; + + node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); + if (unlikely(!node)) { + err = -ENOMEM; + break; + } + + i915_active_request_init(&node->base, + (void *)engine, node_retire); + node->timeline = kctx->ring->timeline->fence_context; + node->ref = ref; + ref->count++; + + llist_add((struct llist_node *)&node->base.link, + &ref->barriers); + } + + return err; +} + +void i915_active_acquire_barrier(struct i915_active *ref) +{ + struct llist_node *pos, *next; + + i915_active_acquire(ref); + + llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) { + struct intel_engine_cs *engine; + struct active_node *node; + struct rb_node **p, *parent; + + node = container_of((struct list_head *)pos, + typeof(*node), base.link); + + engine = (void *)rcu_access_pointer(node->base.request); + RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN)); + + parent = NULL; + p = &ref->tree.rb_node; + while (*p) { + parent = *p; + if (rb_entry(parent, + struct active_node, + node)->timeline < node->timeline) + p = &parent->rb_right; + else + p = &parent->rb_left; + } + rb_link_node(&node->node, parent, p); + rb_insert_color(&node->node, &ref->tree); + + llist_add((struct llist_node *)&node->base.link, + &engine->barrier_tasks); + } + i915_active_release(ref); +} + +void i915_request_add_barriers(struct i915_request *rq) +{ + struct intel_engine_cs *engine = rq->engine; + struct llist_node *node, *next; + + llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) + list_add_tail((struct list_head *)node, &rq->active_list); +} + int i915_active_request_set(struct i915_active_request *active, struct i915_request *rq) { diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index 7d758719ce39..d55d37673944 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -406,4 +406,9 @@ void i915_active_fini(struct i915_active *ref); static inline void i915_active_fini(struct i915_active *ref) { } #endif +int i915_active_acquire_preallocate_barrier(struct i915_active *ref, + struct intel_engine_cs *engine); +void i915_active_acquire_barrier(struct i915_active *ref); +void i915_request_add_barriers(struct i915_request *rq); + #endif /* _I915_ACTIVE_H_ */ diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h index b679253b53a5..c025991b9233 100644 --- a/drivers/gpu/drm/i915/i915_active_types.h +++ b/drivers/gpu/drm/i915/i915_active_types.h @@ -7,6 +7,7 @@ #ifndef _I915_ACTIVE_TYPES_H_ #define _I915_ACTIVE_TYPES_H_ +#include #include #include @@ -31,6 +32,8 @@ struct i915_active { unsigned int count; void (*retire)(struct i915_active *ref); + + struct llist_head barriers; }; #endif /* _I915_ACTIVE_TYPES_H_ */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 217236596202..335efeaad4f1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1199,10 +1199,6 @@ void i915_gem_sanitize(struct drm_i915_private *i915) intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); intel_runtime_pm_put(&i915->runtime_pm, wakeref); - - mutex_lock(&i915->drm.struct_mutex); - i915_gem_contexts_lost(i915); - mutex_unlock(&i915->drm.struct_mutex); } void i915_gem_init_swizzling(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index da76e4d1c7f1..38d112d8aba7 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -213,18 +213,6 @@ static void __retire_engine_request(struct intel_engine_cs *engine, spin_unlock(&rq->lock); local_irq_enable(); - - /* - * The backing object for the context is done after switching to the - * *next* context. Therefore we cannot retire the previous context until - * the next context has already started running. However, since we - * cannot take the required locks at i915_request_submit() we - * defer the unpinning of the active context to now, retirement of - * the subsequent request. - */ - if (engine->last_retired_context) - intel_context_unpin(engine->last_retired_context); - engine->last_retired_context = rq->hw_context; } static void __retire_engine_upto(struct intel_engine_cs *engine, @@ -759,9 +747,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) rq->infix = rq->ring->emit; /* end of header; start of user payload */ - /* Keep a second pin for the dual retirement along engine and ring */ - __intel_context_pin(ce); - intel_context_mark_active(ce); return rq; diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 82f2be6cec5b..64bc51400ae7 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -56,7 +56,6 @@ static void mock_device_release(struct drm_device *dev) mutex_lock(&i915->drm.struct_mutex); mock_device_flush(i915); - i915_gem_contexts_lost(i915); mutex_unlock(&i915->drm.struct_mutex); flush_work(&i915->gem.idle_work); -- cgit v1.2.3 From 9db0c5caa7471b463627e1a87c58de874be1c55f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 14 Jun 2019 17:46:05 +0100 Subject: drm/i915: Stop retiring along engine We no longer track the execution order along the engine and so no longer need to enforce ordering of retire along the engine. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_request.c | 131 +++++++++++++++--------------------- 1 file changed, 53 insertions(+), 78 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 38d112d8aba7..c99136f78af9 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -183,72 +183,23 @@ static void free_capture_list(struct i915_request *request) } } -static void __retire_engine_request(struct intel_engine_cs *engine, - struct i915_request *rq) -{ - GEM_TRACE("%s(%s) fence %llx:%lld, current %d\n", - __func__, engine->name, - rq->fence.context, rq->fence.seqno, - hwsp_seqno(rq)); - - GEM_BUG_ON(!i915_request_completed(rq)); - - local_irq_disable(); - - spin_lock(&engine->timeline.lock); - GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests)); - list_del_init(&rq->link); - spin_unlock(&engine->timeline.lock); - - spin_lock(&rq->lock); - i915_request_mark_complete(rq); - if (!i915_request_signaled(rq)) - dma_fence_signal_locked(&rq->fence); - if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) - i915_request_cancel_breadcrumb(rq); - if (rq->waitboost) { - GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); - atomic_dec(&rq->i915->gt_pm.rps.num_waiters); - } - spin_unlock(&rq->lock); - - local_irq_enable(); -} - -static void __retire_engine_upto(struct intel_engine_cs *engine, - struct i915_request *rq) -{ - struct i915_request *tmp; - - if (list_empty(&rq->link)) - return; - - do { - tmp = list_first_entry(&engine->timeline.requests, - typeof(*tmp), link); - - GEM_BUG_ON(tmp->engine != engine); - __retire_engine_request(engine, tmp); - } while (tmp != rq); -} - -static void i915_request_retire(struct i915_request *request) +static bool i915_request_retire(struct i915_request *rq) { struct i915_active_request *active, *next; - GEM_TRACE("%s fence %llx:%lld, current %d\n", - request->engine->name, - request->fence.context, request->fence.seqno, - hwsp_seqno(request)); + lockdep_assert_held(&rq->i915->drm.struct_mutex); + if (!i915_request_completed(rq)) + return false; - lockdep_assert_held(&request->i915->drm.struct_mutex); - GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); - GEM_BUG_ON(!i915_request_completed(request)); + GEM_TRACE("%s fence %llx:%lld, current %d\n", + rq->engine->name, + rq->fence.context, rq->fence.seqno, + hwsp_seqno(rq)); - trace_i915_request_retire(request); + GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); + trace_i915_request_retire(rq); - advance_ring(request); - free_capture_list(request); + advance_ring(rq); /* * Walk through the active list, calling retire on each. This allows @@ -260,7 +211,7 @@ static void i915_request_retire(struct i915_request *request) * pass along the auxiliary information (to avoid dereferencing * the node after the callback). */ - list_for_each_entry_safe(active, next, &request->active_list, link) { + list_for_each_entry_safe(active, next, &rq->active_list, link) { /* * In microbenchmarks or focusing upon time inside the kernel, * we may spend an inordinate amount of time simply handling @@ -276,18 +227,39 @@ static void i915_request_retire(struct i915_request *request) INIT_LIST_HEAD(&active->link); RCU_INIT_POINTER(active->request, NULL); - active->retire(active, request); + active->retire(active, rq); + } + + local_irq_disable(); + + spin_lock(&rq->engine->timeline.lock); + list_del(&rq->link); + spin_unlock(&rq->engine->timeline.lock); + + spin_lock(&rq->lock); + i915_request_mark_complete(rq); + if (!i915_request_signaled(rq)) + dma_fence_signal_locked(&rq->fence); + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) + i915_request_cancel_breadcrumb(rq); + if (rq->waitboost) { + GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); + atomic_dec(&rq->i915->gt_pm.rps.num_waiters); } + spin_unlock(&rq->lock); + + local_irq_enable(); - i915_request_remove_from_client(request); + intel_context_exit(rq->hw_context); + intel_context_unpin(rq->hw_context); - __retire_engine_upto(request->engine, request); + i915_request_remove_from_client(rq); - intel_context_exit(request->hw_context); - intel_context_unpin(request->hw_context); + free_capture_list(rq); + i915_sched_node_fini(&rq->sched); + i915_request_put(rq); - i915_sched_node_fini(&request->sched); - i915_request_put(request); + return true; } void i915_request_retire_upto(struct i915_request *rq) @@ -309,9 +281,7 @@ void i915_request_retire_upto(struct i915_request *rq) do { tmp = list_first_entry(&ring->request_list, typeof(*tmp), ring_link); - - i915_request_retire(tmp); - } while (tmp != rq); + } while (i915_request_retire(tmp) && tmp != rq); } static void irq_execute_cb(struct irq_work *wrk) @@ -600,12 +570,9 @@ static void ring_retire_requests(struct intel_ring *ring) { struct i915_request *rq, *rn; - list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) { - if (!i915_request_completed(rq)) + list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) + if (!i915_request_retire(rq)) break; - - i915_request_retire(rq); - } } static noinline struct i915_request * @@ -620,6 +587,15 @@ request_alloc_slow(struct intel_context *ce, gfp_t gfp) if (!gfpflags_allow_blocking(gfp)) goto out; + /* Move our oldest request to the slab-cache (if not in use!) */ + rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link); + i915_request_retire(rq); + + rq = kmem_cache_alloc(global.slab_requests, + gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + if (rq) + return rq; + /* Ratelimit ourselves to prevent oom from malicious clients */ rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link); cond_synchronize_rcu(rq->rcustate); @@ -777,8 +753,7 @@ i915_request_create(struct intel_context *ce) /* Move our oldest request to the slab-cache (if not in use!) */ rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link); - if (!list_is_last(&rq->ring_link, &ce->ring->request_list) && - i915_request_completed(rq)) + if (!list_is_last(&rq->ring_link, &ce->ring->request_list)) i915_request_retire(rq); intel_context_enter(ce); -- cgit v1.2.3 From 422d7df4f090bbbc4d49e66d533a259ba63ec70d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 14 Jun 2019 17:46:06 +0100 Subject: drm/i915: Replace engine->timeline with a plain list To continue the onslaught of removing the assumption of a global execution ordering, another casualty is the engine->timeline. Without an actual timeline to track, it is overkill and we can replace it with a much less grand plain list. We still need a list of requests inflight, for the simple purpose of finding inflight requests (for retiring, resetting, preemption etc). Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine.h | 6 ++ drivers/gpu/drm/i915/gt/intel_engine_cs.c | 62 ++++++++--------- drivers/gpu/drm/i915/gt/intel_engine_types.h | 6 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 95 ++++++++++++++------------ drivers/gpu/drm/i915/gt/intel_reset.c | 10 +-- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 15 ++-- drivers/gpu/drm/i915/gt/mock_engine.c | 18 ++--- drivers/gpu/drm/i915/i915_gpu_error.c | 5 +- drivers/gpu/drm/i915/i915_request.c | 43 ++++-------- drivers/gpu/drm/i915/i915_request.h | 2 +- drivers/gpu/drm/i915/i915_scheduler.c | 38 +++++------ drivers/gpu/drm/i915/i915_timeline.c | 1 - drivers/gpu/drm/i915/i915_timeline.h | 19 ------ drivers/gpu/drm/i915/i915_timeline_types.h | 4 -- drivers/gpu/drm/i915/intel_guc_submission.c | 16 ++--- drivers/gpu/drm/i915/selftests/mock_timeline.c | 1 - 16 files changed, 153 insertions(+), 188 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 3b5a6d152997..2f1c6871ee95 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -565,4 +565,10 @@ static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) #endif +void intel_engine_init_active(struct intel_engine_cs *engine, + unsigned int subclass); +#define ENGINE_PHYSICAL 0 +#define ENGINE_MOCK 1 +#define ENGINE_VIRTUAL 2 + #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 22242e927baa..898692989313 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -617,14 +617,7 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine) if (err) return err; - err = i915_timeline_init(engine->i915, - &engine->timeline, - engine->status_page.vma); - if (err) - goto err_hwsp; - - i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE); - + intel_engine_init_active(engine, ENGINE_PHYSICAL); intel_engine_init_breadcrumbs(engine); intel_engine_init_execlists(engine); intel_engine_init_hangcheck(engine); @@ -637,10 +630,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine) intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu); return 0; - -err_hwsp: - cleanup_status_page(engine); - return err; } /** @@ -797,6 +786,27 @@ static int pin_context(struct i915_gem_context *ctx, return 0; } +void +intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass) +{ + INIT_LIST_HEAD(&engine->active.requests); + + spin_lock_init(&engine->active.lock); + lockdep_set_subclass(&engine->active.lock, subclass); + + /* + * Due to an interesting quirk in lockdep's internal debug tracking, + * after setting a subclass we must ensure the lock is used. Otherwise, + * nr_unused_locks is incremented once too often. + */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + local_irq_disable(); + lock_map_acquire(&engine->active.lock.dep_map); + lock_map_release(&engine->active.lock.dep_map); + local_irq_enable(); +#endif +} + /** * intel_engines_init_common - initialize cengine state which might require hw access * @engine: Engine to initialize. @@ -860,6 +870,8 @@ err_unpin: */ void intel_engine_cleanup_common(struct intel_engine_cs *engine) { + GEM_BUG_ON(!list_empty(&engine->active.requests)); + cleanup_status_page(engine); intel_engine_fini_breadcrumbs(engine); @@ -874,8 +886,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) intel_context_unpin(engine->kernel_context); GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); - i915_timeline_fini(&engine->timeline); - intel_wa_list_free(&engine->ctx_wa_list); intel_wa_list_free(&engine->wa_list); intel_wa_list_free(&engine->whitelist); @@ -1482,16 +1492,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, drm_printf(m, "\tRequests:\n"); - rq = list_first_entry(&engine->timeline.requests, - struct i915_request, link); - if (&rq->link != &engine->timeline.requests) - print_request(m, rq, "\t\tfirst "); - - rq = list_last_entry(&engine->timeline.requests, - struct i915_request, link); - if (&rq->link != &engine->timeline.requests) - print_request(m, rq, "\t\tlast "); - rq = intel_engine_find_active_request(engine); if (rq) { print_request(m, rq, "\t\tactive "); @@ -1572,7 +1572,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) if (!intel_engine_supports_stats(engine)) return -ENODEV; - spin_lock_irqsave(&engine->timeline.lock, flags); + spin_lock_irqsave(&engine->active.lock, flags); write_seqlock(&engine->stats.lock); if (unlikely(engine->stats.enabled == ~0)) { @@ -1598,7 +1598,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) unlock: write_sequnlock(&engine->stats.lock); - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); return err; } @@ -1683,22 +1683,22 @@ intel_engine_find_active_request(struct intel_engine_cs *engine) * At all other times, we must assume the GPU is still running, but * we only care about the snapshot of this moment. */ - spin_lock_irqsave(&engine->timeline.lock, flags); - list_for_each_entry(request, &engine->timeline.requests, link) { + spin_lock_irqsave(&engine->active.lock, flags); + list_for_each_entry(request, &engine->active.requests, sched.link) { if (i915_request_completed(request)) continue; if (!i915_request_started(request)) - break; + continue; /* More than one preemptible request may match! */ if (!match_ring(request)) - break; + continue; active = request; break; } - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); return active; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 33a31aa2d2ae..b2faca8e5dec 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -288,7 +288,11 @@ struct intel_engine_cs { struct intel_ring *buffer; - struct i915_timeline timeline; + struct { + spinlock_t lock; + struct list_head requests; + } active; + struct llist_head barrier_tasks; struct intel_context *kernel_context; /* pinned */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index d0a51752386f..c400c66d0ee5 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -298,8 +298,8 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, * Check against the first request in ELSP[1], it will, thanks to the * power of PI, be the highest priority of that context. */ - if (!list_is_last(&rq->link, &engine->timeline.requests) && - rq_prio(list_next_entry(rq, link)) > last_prio) + if (!list_is_last(&rq->sched.link, &engine->active.requests) && + rq_prio(list_next_entry(rq, sched.link)) > last_prio) return true; if (rb) { @@ -434,11 +434,11 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) struct list_head *uninitialized_var(pl); int prio = I915_PRIORITY_INVALID; - lockdep_assert_held(&engine->timeline.lock); + lockdep_assert_held(&engine->active.lock); list_for_each_entry_safe_reverse(rq, rn, - &engine->timeline.requests, - link) { + &engine->active.requests, + sched.link) { struct intel_engine_cs *owner; if (i915_request_completed(rq)) @@ -465,7 +465,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) } GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); - list_add(&rq->sched.link, pl); + list_move(&rq->sched.link, pl); active = rq; } else { rq->engine = owner; @@ -933,11 +933,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine) rb_entry(rb, typeof(*ve), nodes[engine->id].rb); struct i915_request *rq; - spin_lock(&ve->base.timeline.lock); + spin_lock(&ve->base.active.lock); rq = ve->request; if (unlikely(!rq)) { /* lost the race to a sibling */ - spin_unlock(&ve->base.timeline.lock); + spin_unlock(&ve->base.active.lock); rb_erase_cached(rb, &execlists->virtual); RB_CLEAR_NODE(rb); rb = rb_first_cached(&execlists->virtual); @@ -950,13 +950,13 @@ static void execlists_dequeue(struct intel_engine_cs *engine) if (rq_prio(rq) >= queue_prio(execlists)) { if (!virtual_matches(ve, rq, engine)) { - spin_unlock(&ve->base.timeline.lock); + spin_unlock(&ve->base.active.lock); rb = rb_next(rb); continue; } if (last && !can_merge_rq(last, rq)) { - spin_unlock(&ve->base.timeline.lock); + spin_unlock(&ve->base.active.lock); return; /* leave this rq for another engine */ } @@ -1011,7 +1011,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) last = rq; } - spin_unlock(&ve->base.timeline.lock); + spin_unlock(&ve->base.active.lock); break; } @@ -1068,8 +1068,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) GEM_BUG_ON(port_isset(port)); } - list_del_init(&rq->sched.link); - __i915_request_submit(rq); trace_i915_request_in(rq, port_index(port, execlists)); @@ -1170,7 +1168,7 @@ static void process_csb(struct intel_engine_cs *engine) const u8 num_entries = execlists->csb_size; u8 head, tail; - lockdep_assert_held(&engine->timeline.lock); + lockdep_assert_held(&engine->active.lock); /* * Note that csb_write, csb_status may be either in HWSP or mmio. @@ -1330,7 +1328,7 @@ static void process_csb(struct intel_engine_cs *engine) static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) { - lockdep_assert_held(&engine->timeline.lock); + lockdep_assert_held(&engine->active.lock); process_csb(engine); if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) @@ -1351,15 +1349,16 @@ static void execlists_submission_tasklet(unsigned long data) !!intel_wakeref_active(&engine->wakeref), engine->execlists.active); - spin_lock_irqsave(&engine->timeline.lock, flags); + spin_lock_irqsave(&engine->active.lock, flags); __execlists_submission_tasklet(engine); - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); } static void queue_request(struct intel_engine_cs *engine, struct i915_sched_node *node, int prio) { + GEM_BUG_ON(!list_empty(&node->link)); list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio)); } @@ -1390,7 +1389,7 @@ static void execlists_submit_request(struct i915_request *request) unsigned long flags; /* Will be called from irq-context when using foreign fences. */ - spin_lock_irqsave(&engine->timeline.lock, flags); + spin_lock_irqsave(&engine->active.lock, flags); queue_request(engine, &request->sched, rq_prio(request)); @@ -1399,7 +1398,7 @@ static void execlists_submit_request(struct i915_request *request) submit_queue(engine, rq_prio(request)); - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); } static void __execlists_context_fini(struct intel_context *ce) @@ -2050,8 +2049,8 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine) intel_engine_stop_cs(engine); /* And flush any current direct submission. */ - spin_lock_irqsave(&engine->timeline.lock, flags); - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_lock_irqsave(&engine->active.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); } static bool lrc_regs_ok(const struct i915_request *rq) @@ -2094,11 +2093,11 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists) static struct i915_request *active_request(struct i915_request *rq) { - const struct list_head * const list = &rq->engine->timeline.requests; + const struct list_head * const list = &rq->engine->active.requests; const struct intel_context * const context = rq->hw_context; struct i915_request *active = NULL; - list_for_each_entry_from_reverse(rq, list, link) { + list_for_each_entry_from_reverse(rq, list, sched.link) { if (i915_request_completed(rq)) break; @@ -2215,11 +2214,11 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled) GEM_TRACE("%s\n", engine->name); - spin_lock_irqsave(&engine->timeline.lock, flags); + spin_lock_irqsave(&engine->active.lock, flags); __execlists_reset(engine, stalled); - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); } static void nop_submission_tasklet(unsigned long data) @@ -2250,12 +2249,12 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) * submission's irq state, we also wish to remind ourselves that * it is irq state.) */ - spin_lock_irqsave(&engine->timeline.lock, flags); + spin_lock_irqsave(&engine->active.lock, flags); __execlists_reset(engine, true); /* Mark all executing requests as skipped. */ - list_for_each_entry(rq, &engine->timeline.requests, link) { + list_for_each_entry(rq, &engine->active.requests, sched.link) { if (!i915_request_signaled(rq)) dma_fence_set_error(&rq->fence, -EIO); @@ -2286,7 +2285,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) rb_erase_cached(rb, &execlists->virtual); RB_CLEAR_NODE(rb); - spin_lock(&ve->base.timeline.lock); + spin_lock(&ve->base.active.lock); if (ve->request) { ve->request->engine = engine; __i915_request_submit(ve->request); @@ -2295,7 +2294,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) ve->base.execlists.queue_priority_hint = INT_MIN; ve->request = NULL; } - spin_unlock(&ve->base.timeline.lock); + spin_unlock(&ve->base.active.lock); } /* Remaining _unready_ requests will be nop'ed when submitted */ @@ -2307,7 +2306,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); execlists->tasklet.func = nop_submission_tasklet; - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); } static void execlists_reset_finish(struct intel_engine_cs *engine) @@ -3010,12 +3009,18 @@ error_deref_obj: return ret; } +static struct list_head *virtual_queue(struct virtual_engine *ve) +{ + return &ve->base.execlists.default_priolist.requests[0]; +} + static void virtual_context_destroy(struct kref *kref) { struct virtual_engine *ve = container_of(kref, typeof(*ve), context.ref); unsigned int n; + GEM_BUG_ON(!list_empty(virtual_queue(ve))); GEM_BUG_ON(ve->request); GEM_BUG_ON(ve->context.inflight); @@ -3026,13 +3031,13 @@ static void virtual_context_destroy(struct kref *kref) if (RB_EMPTY_NODE(node)) continue; - spin_lock_irq(&sibling->timeline.lock); + spin_lock_irq(&sibling->active.lock); /* Detachment is lazily performed in the execlists tasklet */ if (!RB_EMPTY_NODE(node)) rb_erase_cached(node, &sibling->execlists.virtual); - spin_unlock_irq(&sibling->timeline.lock); + spin_unlock_irq(&sibling->active.lock); } GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); @@ -3040,8 +3045,6 @@ static void virtual_context_destroy(struct kref *kref) __execlists_context_fini(&ve->context); kfree(ve->bonds); - - i915_timeline_fini(&ve->base.timeline); kfree(ve); } @@ -3161,16 +3164,16 @@ static void virtual_submission_tasklet(unsigned long data) if (unlikely(!(mask & sibling->mask))) { if (!RB_EMPTY_NODE(&node->rb)) { - spin_lock(&sibling->timeline.lock); + spin_lock(&sibling->active.lock); rb_erase_cached(&node->rb, &sibling->execlists.virtual); RB_CLEAR_NODE(&node->rb); - spin_unlock(&sibling->timeline.lock); + spin_unlock(&sibling->active.lock); } continue; } - spin_lock(&sibling->timeline.lock); + spin_lock(&sibling->active.lock); if (!RB_EMPTY_NODE(&node->rb)) { /* @@ -3214,7 +3217,7 @@ submit_engine: tasklet_hi_schedule(&sibling->execlists.tasklet); } - spin_unlock(&sibling->timeline.lock); + spin_unlock(&sibling->active.lock); } local_irq_enable(); } @@ -3231,9 +3234,13 @@ static void virtual_submit_request(struct i915_request *rq) GEM_BUG_ON(ve->base.submit_request != virtual_submit_request); GEM_BUG_ON(ve->request); + GEM_BUG_ON(!list_empty(virtual_queue(ve))); + ve->base.execlists.queue_priority_hint = rq_prio(rq); WRITE_ONCE(ve->request, rq); + list_move_tail(&rq->sched.link, virtual_queue(ve)); + tasklet_schedule(&ve->base.execlists.tasklet); } @@ -3297,10 +3304,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, snprintf(ve->base.name, sizeof(ve->base.name), "virtual"); - err = i915_timeline_init(ctx->i915, &ve->base.timeline, NULL); - if (err) - goto err_put; - i915_timeline_set_subclass(&ve->base.timeline, TIMELINE_VIRTUAL); + intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); intel_engine_init_execlists(&ve->base); @@ -3311,6 +3315,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, ve->base.submit_request = virtual_submit_request; ve->base.bond_execute = virtual_bond_execute; + INIT_LIST_HEAD(virtual_queue(ve)); ve->base.execlists.queue_priority_hint = INT_MIN; tasklet_init(&ve->base.execlists.tasklet, virtual_submission_tasklet, @@ -3465,11 +3470,11 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine, unsigned int count; struct rb_node *rb; - spin_lock_irqsave(&engine->timeline.lock, flags); + spin_lock_irqsave(&engine->active.lock, flags); last = NULL; count = 0; - list_for_each_entry(rq, &engine->timeline.requests, link) { + list_for_each_entry(rq, &engine->active.requests, sched.link) { if (count++ < max - 1) show_request(m, rq, "\t\tE "); else @@ -3532,7 +3537,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine, show_request(m, last, "\t\tV "); } - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); } void intel_lr_context_reset(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 6e6807a3f748..84c670bdb081 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -49,12 +49,12 @@ static void engine_skip_context(struct i915_request *rq) struct intel_engine_cs *engine = rq->engine; struct i915_gem_context *hung_ctx = rq->gem_context; - lockdep_assert_held(&engine->timeline.lock); + lockdep_assert_held(&engine->active.lock); if (!i915_request_is_active(rq)) return; - list_for_each_entry_continue(rq, &engine->timeline.requests, link) + list_for_each_entry_continue(rq, &engine->active.requests, sched.link) if (rq->gem_context == hung_ctx) i915_request_skip(rq, -EIO); } @@ -130,7 +130,7 @@ void i915_reset_request(struct i915_request *rq, bool guilty) rq->fence.seqno, yesno(guilty)); - lockdep_assert_held(&rq->engine->timeline.lock); + lockdep_assert_held(&rq->engine->active.lock); GEM_BUG_ON(i915_request_completed(rq)); if (guilty) { @@ -785,10 +785,10 @@ static void nop_submit_request(struct i915_request *request) engine->name, request->fence.context, request->fence.seqno); dma_fence_set_error(&request->fence, -EIO); - spin_lock_irqsave(&engine->timeline.lock, flags); + spin_lock_irqsave(&engine->active.lock, flags); __i915_request_submit(request); i915_request_mark_complete(request); - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); intel_engine_queue_breadcrumbs(engine); } diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index cc901edec09a..019bf039f616 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -730,14 +730,13 @@ static void reset_prepare(struct intel_engine_cs *engine) static void reset_ring(struct intel_engine_cs *engine, bool stalled) { - struct i915_timeline *tl = &engine->timeline; struct i915_request *pos, *rq; unsigned long flags; u32 head; rq = NULL; - spin_lock_irqsave(&tl->lock, flags); - list_for_each_entry(pos, &tl->requests, link) { + spin_lock_irqsave(&engine->active.lock, flags); + list_for_each_entry(pos, &engine->active.requests, sched.link) { if (!i915_request_completed(pos)) { rq = pos; break; @@ -791,7 +790,7 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled) } engine->buffer->head = intel_ring_wrap(engine->buffer, head); - spin_unlock_irqrestore(&tl->lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); } static void reset_finish(struct intel_engine_cs *engine) @@ -877,10 +876,10 @@ static void cancel_requests(struct intel_engine_cs *engine) struct i915_request *request; unsigned long flags; - spin_lock_irqsave(&engine->timeline.lock, flags); + spin_lock_irqsave(&engine->active.lock, flags); /* Mark all submitted requests as skipped. */ - list_for_each_entry(request, &engine->timeline.requests, link) { + list_for_each_entry(request, &engine->active.requests, sched.link) { if (!i915_request_signaled(request)) dma_fence_set_error(&request->fence, -EIO); @@ -889,7 +888,7 @@ static void cancel_requests(struct intel_engine_cs *engine) /* Remaining _unready_ requests will be nop'ed when submitted */ - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); } static void i9xx_submit_request(struct i915_request *request) @@ -1267,8 +1266,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine, GEM_BUG_ON(!is_power_of_2(size)); GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); - GEM_BUG_ON(timeline == &engine->timeline); - lockdep_assert_held(&engine->i915->drm.struct_mutex); ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index d1ef515bac8d..086801b51441 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -229,17 +229,17 @@ static void mock_cancel_requests(struct intel_engine_cs *engine) struct i915_request *request; unsigned long flags; - spin_lock_irqsave(&engine->timeline.lock, flags); + spin_lock_irqsave(&engine->active.lock, flags); /* Mark all submitted requests as skipped. */ - list_for_each_entry(request, &engine->timeline.requests, sched.link) { + list_for_each_entry(request, &engine->active.requests, sched.link) { if (!i915_request_signaled(request)) dma_fence_set_error(&request->fence, -EIO); i915_request_mark_complete(request); } - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); } struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, @@ -285,28 +285,23 @@ int mock_engine_init(struct intel_engine_cs *engine) struct drm_i915_private *i915 = engine->i915; int err; + intel_engine_init_active(engine, ENGINE_MOCK); intel_engine_init_breadcrumbs(engine); intel_engine_init_execlists(engine); intel_engine_init__pm(engine); - if (i915_timeline_init(i915, &engine->timeline, NULL)) - goto err_breadcrumbs; - i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE); - engine->kernel_context = i915_gem_context_get_engine(i915->kernel_context, engine->id); if (IS_ERR(engine->kernel_context)) - goto err_timeline; + goto err_breadcrumbs; err = intel_context_pin(engine->kernel_context); intel_context_put(engine->kernel_context); if (err) - goto err_timeline; + goto err_breadcrumbs; return 0; -err_timeline: - i915_timeline_fini(&engine->timeline); err_breadcrumbs: intel_engine_fini_breadcrumbs(engine); return -ENOMEM; @@ -340,7 +335,6 @@ void mock_engine_free(struct intel_engine_cs *engine) intel_context_unpin(engine->kernel_context); intel_engine_fini_breadcrumbs(engine); - i915_timeline_fini(&engine->timeline); kfree(engine); } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 26c9c0595bdf..f411e3244208 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1275,7 +1275,7 @@ static void engine_record_requests(struct intel_engine_cs *engine, count = 0; request = first; - list_for_each_entry_from(request, &engine->timeline.requests, link) + list_for_each_entry_from(request, &engine->active.requests, sched.link) count++; if (!count) return; @@ -1288,7 +1288,8 @@ static void engine_record_requests(struct intel_engine_cs *engine, count = 0; request = first; - list_for_each_entry_from(request, &engine->timeline.requests, link) { + list_for_each_entry_from(request, + &engine->active.requests, sched.link) { if (count >= ee->num_requests) { /* * If the ring request list was changed in diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index c99136f78af9..9819483d1b5d 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -232,9 +232,9 @@ static bool i915_request_retire(struct i915_request *rq) local_irq_disable(); - spin_lock(&rq->engine->timeline.lock); - list_del(&rq->link); - spin_unlock(&rq->engine->timeline.lock); + spin_lock(&rq->engine->active.lock); + list_del(&rq->sched.link); + spin_unlock(&rq->engine->active.lock); spin_lock(&rq->lock); i915_request_mark_complete(rq); @@ -254,6 +254,7 @@ static bool i915_request_retire(struct i915_request *rq) intel_context_unpin(rq->hw_context); i915_request_remove_from_client(rq); + list_del(&rq->link); free_capture_list(rq); i915_sched_node_fini(&rq->sched); @@ -373,28 +374,17 @@ __i915_request_await_execution(struct i915_request *rq, return 0; } -static void move_to_timeline(struct i915_request *request, - struct i915_timeline *timeline) -{ - GEM_BUG_ON(request->timeline == &request->engine->timeline); - lockdep_assert_held(&request->engine->timeline.lock); - - spin_lock(&request->timeline->lock); - list_move_tail(&request->link, &timeline->requests); - spin_unlock(&request->timeline->lock); -} - void __i915_request_submit(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; - GEM_TRACE("%s fence %llx:%lld -> current %d\n", + GEM_TRACE("%s fence %llx:%lld, current %d\n", engine->name, request->fence.context, request->fence.seqno, hwsp_seqno(request)); GEM_BUG_ON(!irqs_disabled()); - lockdep_assert_held(&engine->timeline.lock); + lockdep_assert_held(&engine->active.lock); if (i915_gem_context_is_banned(request->gem_context)) i915_request_skip(request, -EIO); @@ -422,6 +412,8 @@ void __i915_request_submit(struct i915_request *request) /* We may be recursing from the signal callback of another i915 fence */ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); + list_move_tail(&request->sched.link, &engine->active.requests); + GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); @@ -437,9 +429,6 @@ void __i915_request_submit(struct i915_request *request) engine->emit_fini_breadcrumb(request, request->ring->vaddr + request->postfix); - /* Transfer from per-context onto the global per-engine timeline */ - move_to_timeline(request, &engine->timeline); - engine->serial++; trace_i915_request_execute(request); @@ -451,11 +440,11 @@ void i915_request_submit(struct i915_request *request) unsigned long flags; /* Will be called from irq-context when using foreign fences. */ - spin_lock_irqsave(&engine->timeline.lock, flags); + spin_lock_irqsave(&engine->active.lock, flags); __i915_request_submit(request); - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); } void __i915_request_unsubmit(struct i915_request *request) @@ -468,7 +457,7 @@ void __i915_request_unsubmit(struct i915_request *request) hwsp_seqno(request)); GEM_BUG_ON(!irqs_disabled()); - lockdep_assert_held(&engine->timeline.lock); + lockdep_assert_held(&engine->active.lock); /* * Only unwind in reverse order, required so that the per-context list @@ -486,9 +475,6 @@ void __i915_request_unsubmit(struct i915_request *request) spin_unlock(&request->lock); - /* Transfer back from the global per-engine timeline to per-context */ - move_to_timeline(request, request->timeline); - /* We've already spun, don't charge on resubmitting. */ if (request->sched.semaphores && i915_request_started(request)) { request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE; @@ -510,11 +496,11 @@ void i915_request_unsubmit(struct i915_request *request) unsigned long flags; /* Will be called from irq-context when using foreign fences. */ - spin_lock_irqsave(&engine->timeline.lock, flags); + spin_lock_irqsave(&engine->active.lock, flags); __i915_request_unsubmit(request); - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); } static int __i915_sw_fence_call @@ -669,7 +655,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) rq->engine = ce->engine; rq->ring = ce->ring; rq->timeline = tl; - GEM_BUG_ON(rq->timeline == &ce->engine->timeline); rq->hwsp_seqno = tl->hwsp_seqno; rq->hwsp_cacheline = tl->hwsp_cacheline; rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ @@ -1136,9 +1121,7 @@ __i915_request_add_to_timeline(struct i915_request *rq) 0); } - spin_lock_irq(&timeline->lock); list_add_tail(&rq->link, &timeline->requests); - spin_unlock_irq(&timeline->lock); /* * Make sure that no request gazumped us - if it was allocated after diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index c9f7d07991c8..edbbdfec24ab 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -217,7 +217,7 @@ struct i915_request { bool waitboost; - /** engine->request_list entry for this request */ + /** timeline->request entry for this request */ struct list_head link; /** ring->request_list entry for this request */ diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 78ceb56d7801..2e9b38bdc33c 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -77,7 +77,7 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio) bool first = true; int idx, i; - lockdep_assert_held(&engine->timeline.lock); + lockdep_assert_held(&engine->active.lock); assert_priolists(execlists); /* buckets sorted from highest [in slot 0] to lowest priority */ @@ -162,9 +162,9 @@ sched_lock_engine(const struct i915_sched_node *node, * check that the rq still belongs to the newly locked engine. */ while (locked != (engine = READ_ONCE(rq->engine))) { - spin_unlock(&locked->timeline.lock); + spin_unlock(&locked->active.lock); memset(cache, 0, sizeof(*cache)); - spin_lock(&engine->timeline.lock); + spin_lock(&engine->active.lock); locked = engine; } @@ -189,7 +189,7 @@ static void kick_submission(struct intel_engine_cs *engine, int prio) * tasklet, i.e. we have not change the priority queue * sufficiently to oust the running context. */ - if (inflight && !i915_scheduler_need_preempt(prio, rq_prio(inflight))) + if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight))) return; tasklet_hi_schedule(&engine->execlists.tasklet); @@ -278,7 +278,7 @@ static void __i915_schedule(struct i915_sched_node *node, memset(&cache, 0, sizeof(cache)); engine = node_to_request(node)->engine; - spin_lock(&engine->timeline.lock); + spin_lock(&engine->active.lock); /* Fifo and depth-first replacement ensure our deps execute before us */ engine = sched_lock_engine(node, engine, &cache); @@ -287,7 +287,7 @@ static void __i915_schedule(struct i915_sched_node *node, node = dep->signaler; engine = sched_lock_engine(node, engine, &cache); - lockdep_assert_held(&engine->timeline.lock); + lockdep_assert_held(&engine->active.lock); /* Recheck after acquiring the engine->timeline.lock */ if (prio <= node->attr.priority || node_signaled(node)) @@ -296,14 +296,8 @@ static void __i915_schedule(struct i915_sched_node *node, GEM_BUG_ON(node_to_request(node)->engine != engine); node->attr.priority = prio; - if (!list_empty(&node->link)) { - GEM_BUG_ON(intel_engine_is_virtual(engine)); - if (!cache.priolist) - cache.priolist = - i915_sched_lookup_priolist(engine, - prio); - list_move_tail(&node->link, cache.priolist); - } else { + + if (list_empty(&node->link)) { /* * If the request is not in the priolist queue because * it is not yet runnable, then it doesn't contribute @@ -312,8 +306,16 @@ static void __i915_schedule(struct i915_sched_node *node, * queue; but in that case we may still need to reorder * the inflight requests. */ - if (!i915_sw_fence_done(&node_to_request(node)->submit)) - continue; + continue; + } + + if (!intel_engine_is_virtual(engine) && + !i915_request_is_active(node_to_request(node))) { + if (!cache.priolist) + cache.priolist = + i915_sched_lookup_priolist(engine, + prio); + list_move_tail(&node->link, cache.priolist); } if (prio <= engine->execlists.queue_priority_hint) @@ -325,7 +327,7 @@ static void __i915_schedule(struct i915_sched_node *node, kick_submission(engine, prio); } - spin_unlock(&engine->timeline.lock); + spin_unlock(&engine->active.lock); } void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) @@ -439,8 +441,6 @@ void i915_sched_node_fini(struct i915_sched_node *node) { struct i915_dependency *dep, *tmp; - GEM_BUG_ON(!list_empty(&node->link)); - spin_lock_irq(&schedule_lock); /* diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c index 000e1a9b6750..c311ce9c6f9d 100644 --- a/drivers/gpu/drm/i915/i915_timeline.c +++ b/drivers/gpu/drm/i915/i915_timeline.c @@ -251,7 +251,6 @@ int i915_timeline_init(struct drm_i915_private *i915, timeline->fence_context = dma_fence_context_alloc(1); - spin_lock_init(&timeline->lock); mutex_init(&timeline->mutex); INIT_ACTIVE_REQUEST(&timeline->last_request); diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h index 27668a1a69a3..36e5e5a65155 100644 --- a/drivers/gpu/drm/i915/i915_timeline.h +++ b/drivers/gpu/drm/i915/i915_timeline.h @@ -36,25 +36,6 @@ int i915_timeline_init(struct drm_i915_private *i915, struct i915_vma *hwsp); void i915_timeline_fini(struct i915_timeline *tl); -static inline void -i915_timeline_set_subclass(struct i915_timeline *timeline, - unsigned int subclass) -{ - lockdep_set_subclass(&timeline->lock, subclass); - - /* - * Due to an interesting quirk in lockdep's internal debug tracking, - * after setting a subclass we must ensure the lock is used. Otherwise, - * nr_unused_locks is incremented once too often. - */ -#ifdef CONFIG_DEBUG_LOCK_ALLOC - local_irq_disable(); - lock_map_acquire(&timeline->lock.dep_map); - lock_map_release(&timeline->lock.dep_map); - local_irq_enable(); -#endif -} - struct i915_timeline * i915_timeline_create(struct drm_i915_private *i915, struct i915_vma *global_hwsp); diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h index 1688705f4a2b..fce5cb4f1090 100644 --- a/drivers/gpu/drm/i915/i915_timeline_types.h +++ b/drivers/gpu/drm/i915/i915_timeline_types.h @@ -23,10 +23,6 @@ struct i915_timeline { u64 fence_context; u32 seqno; - spinlock_t lock; -#define TIMELINE_CLIENT 0 /* default subclass */ -#define TIMELINE_ENGINE 1 -#define TIMELINE_VIRTUAL 2 struct mutex mutex; /* protects the flow of requests */ unsigned int pin_count; diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 97f6970d8da8..db531ebc7704 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -740,7 +740,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) bool submit = false; struct rb_node *rb; - lockdep_assert_held(&engine->timeline.lock); + lockdep_assert_held(&engine->active.lock); if (port_isset(port)) { if (intel_engine_has_preemption(engine)) { @@ -822,7 +822,7 @@ static void guc_submission_tasklet(unsigned long data) struct i915_request *rq; unsigned long flags; - spin_lock_irqsave(&engine->timeline.lock, flags); + spin_lock_irqsave(&engine->active.lock, flags); rq = port_request(port); while (rq && i915_request_completed(rq)) { @@ -847,7 +847,7 @@ static void guc_submission_tasklet(unsigned long data) if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)) guc_dequeue(engine); - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); } static void guc_reset_prepare(struct intel_engine_cs *engine) @@ -884,7 +884,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled) struct i915_request *rq; unsigned long flags; - spin_lock_irqsave(&engine->timeline.lock, flags); + spin_lock_irqsave(&engine->active.lock, flags); execlists_cancel_port_requests(execlists); @@ -900,7 +900,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled) intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled); out_unlock: - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); } static void guc_cancel_requests(struct intel_engine_cs *engine) @@ -926,13 +926,13 @@ static void guc_cancel_requests(struct intel_engine_cs *engine) * submission's irq state, we also wish to remind ourselves that * it is irq state.) */ - spin_lock_irqsave(&engine->timeline.lock, flags); + spin_lock_irqsave(&engine->active.lock, flags); /* Cancel the requests on the HW and clear the ELSP tracker. */ execlists_cancel_port_requests(execlists); /* Mark all executing requests as skipped. */ - list_for_each_entry(rq, &engine->timeline.requests, link) { + list_for_each_entry(rq, &engine->active.requests, sched.link) { if (!i915_request_signaled(rq)) dma_fence_set_error(&rq->fence, -EIO); @@ -961,7 +961,7 @@ static void guc_cancel_requests(struct intel_engine_cs *engine) execlists->queue = RB_ROOT_CACHED; GEM_BUG_ON(port_isset(execlists->port)); - spin_unlock_irqrestore(&engine->timeline.lock, flags); + spin_unlock_irqrestore(&engine->active.lock, flags); } static void guc_reset_finish(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c index e084476469ef..65b52be23d42 100644 --- a/drivers/gpu/drm/i915/selftests/mock_timeline.c +++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c @@ -13,7 +13,6 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context) timeline->i915 = NULL; timeline->fence_context = context; - spin_lock_init(&timeline->lock); mutex_init(&timeline->mutex); INIT_ACTIVE_REQUEST(&timeline->last_request); -- cgit v1.2.3 From dfdeaff293962a55a9ab8699cfc2ef960fa4b754 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 14 Jun 2019 23:06:16 +0100 Subject: drm/i915: Avoid tainting i915_gem_park() with wakeref.lock While we need to flush the wakeref before parking, we do not need to perform the i915_gem_park() itself underneath the wakeref lock, merely the struct_mutex. If we rearrange the locks, we can avoid the unnecessary tainting. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190614220616.24932-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 141f3ea349a4..05011d4a3b88 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -48,23 +48,22 @@ static void idle_work_handler(struct work_struct *work) { struct drm_i915_private *i915 = container_of(work, typeof(*i915), gem.idle_work); - bool restart = true; + bool park; - cancel_delayed_work(&i915->gem.retire_work); + cancel_delayed_work_sync(&i915->gem.retire_work); mutex_lock(&i915->drm.struct_mutex); intel_wakeref_lock(&i915->gt.wakeref); - if (!intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work)) { - i915_gem_park(i915); - restart = false; - } + park = !intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work); intel_wakeref_unlock(&i915->gt.wakeref); - - mutex_unlock(&i915->drm.struct_mutex); - if (restart) + if (park) + i915_gem_park(i915); + else queue_delayed_work(i915->wq, &i915->gem.retire_work, round_jiffies_up_relative(HZ)); + + mutex_unlock(&i915->drm.struct_mutex); } static void retire_work_handler(struct work_struct *work) -- cgit v1.2.3 From f4071997f1de016780ec6b79c63d90cd5886ee83 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Thu, 30 May 2019 16:40:14 -0700 Subject: drm/i915/ehl: Update MOCS table for EHL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit EHL defines two new MOCS table entries but is otherwise compatible with the ICL MOCS table. These table entries (16 and 17) should still be considered unused for ICL and as such their behavior remains undefined for that platform. Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190530234014.22340-1-matthew.d.roper@intel.com Reviewed-by: Lucas De Marchi Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/gt/intel_mocs.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 79df66022d3a..1f9db50b1869 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -200,6 +200,14 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = { MOCS_ENTRY(15, \ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \ L3_3_WB), \ + /* Bypass LLC - Uncached (EHL+) */ \ + MOCS_ENTRY(16, \ + LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \ + L3_1_UC), \ + /* Bypass LLC - L3 (Read-Only) (EHL+) */ \ + MOCS_ENTRY(17, \ + LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \ + L3_3_WB), \ /* Self-Snoop - L3 + LLC */ \ MOCS_ENTRY(18, \ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \ -- cgit v1.2.3 From ca851bae0f525ba5a9f75b1644acccb57f88aabf Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 13 Jun 2019 13:08:18 +0300 Subject: drm/i915: make intel_sdvo_regs.h self-contained MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ensure intel_sdvo_regs.h is self-contained and remains that way. v2: - include for __packed (Chris) Cc: Chris Wilson Reviewed-by: Chris Wilson Acked-by: Maarten Lankhorst Acked-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190613100818.24800-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/Makefile.header-test | 1 + drivers/gpu/drm/i915/intel_sdvo_regs.h | 8 ++++++++ 2 files changed, 9 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index 6ef3b647ac65..c04297ce57b4 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -56,6 +56,7 @@ header_test := \ intel_quirks.h \ intel_runtime_pm.h \ intel_sdvo.h \ + intel_sdvo_regs.h \ intel_sideband.h \ intel_sprite.h \ intel_tv.h \ diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h index e9ba3b047f93..13b9a8e257bb 100644 --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h @@ -24,6 +24,12 @@ * Eric Anholt */ +#ifndef __INTEL_SDVO_REGS_H__ +#define __INTEL_SDVO_REGS_H__ + +#include +#include + /* * SDVO command definitions and structures. */ @@ -731,3 +737,5 @@ struct intel_sdvo_encode { u8 dvi_rev; u8 hdmi_rev; } __packed; + +#endif /* __INTEL_SDVO_REGS_H__ */ -- cgit v1.2.3 From 379bc100232acd45b19421bd0748f9f549da8a8a Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 13 Jun 2019 11:44:15 +0300 Subject: drm/i915: move modesetting output/encoder code under display/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new subdirectory for display code, and start off by moving modesetting output/encoder code. Judging by the include changes, this is a surprisingly clean operation. v2: - move intel_sdvo_regs.h too - use tabs for Makefile file lists and sort them Cc: Chris Wilson Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Ville Syrjälä Reviewed-by: Chris Wilson Acked-by: Rodrigo Vivi Acked-by: Maarten Lankhorst Acked-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190613084416.6794-2-jani.nikula@intel.com --- drivers/gpu/drm/i915/Makefile | 56 +- drivers/gpu/drm/i915/Makefile.header-test | 19 - drivers/gpu/drm/i915/display/Makefile | 2 + drivers/gpu/drm/i915/display/Makefile.header-test | 16 + drivers/gpu/drm/i915/display/dvo_ch7017.c | 415 ++ drivers/gpu/drm/i915/display/dvo_ch7xxx.c | 367 + drivers/gpu/drm/i915/display/dvo_ivch.c | 503 ++ drivers/gpu/drm/i915/display/dvo_ns2501.c | 710 ++ drivers/gpu/drm/i915/display/dvo_sil164.c | 280 + drivers/gpu/drm/i915/display/dvo_tfp410.c | 319 + drivers/gpu/drm/i915/display/icl_dsi.c | 1589 ++++ drivers/gpu/drm/i915/display/intel_crt.c | 1069 +++ drivers/gpu/drm/i915/display/intel_crt.h | 21 + drivers/gpu/drm/i915/display/intel_ddi.c | 4335 +++++++++++ drivers/gpu/drm/i915/display/intel_ddi.h | 52 + drivers/gpu/drm/i915/display/intel_dp.c | 7577 ++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_dp.h | 123 + .../gpu/drm/i915/display/intel_dp_aux_backlight.c | 281 + .../gpu/drm/i915/display/intel_dp_aux_backlight.h | 13 + .../gpu/drm/i915/display/intel_dp_link_training.c | 382 + .../gpu/drm/i915/display/intel_dp_link_training.h | 14 + drivers/gpu/drm/i915/display/intel_dp_mst.c | 664 ++ drivers/gpu/drm/i915/display/intel_dp_mst.h | 14 + drivers/gpu/drm/i915/display/intel_dsi.c | 128 + drivers/gpu/drm/i915/display/intel_dsi.h | 204 + .../gpu/drm/i915/display/intel_dsi_dcs_backlight.c | 179 + .../gpu/drm/i915/display/intel_dsi_dcs_backlight.h | 13 + drivers/gpu/drm/i915/display/intel_dsi_vbt.c | 673 ++ drivers/gpu/drm/i915/display/intel_dvo.c | 555 ++ drivers/gpu/drm/i915/display/intel_dvo.h | 13 + drivers/gpu/drm/i915/display/intel_dvo_dev.h | 140 + drivers/gpu/drm/i915/display/intel_gmbus.c | 955 +++ drivers/gpu/drm/i915/display/intel_gmbus.h | 27 + drivers/gpu/drm/i915/display/intel_hdmi.c | 3204 +++++++++ drivers/gpu/drm/i915/display/intel_hdmi.h | 51 + drivers/gpu/drm/i915/display/intel_lspcon.c | 588 ++ drivers/gpu/drm/i915/display/intel_lspcon.h | 38 + drivers/gpu/drm/i915/display/intel_lvds.c | 1008 +++ drivers/gpu/drm/i915/display/intel_lvds.h | 22 + drivers/gpu/drm/i915/display/intel_panel.c | 2051 ++++++ drivers/gpu/drm/i915/display/intel_panel.h | 65 + drivers/gpu/drm/i915/display/intel_sdvo.c | 3333 +++++++++ drivers/gpu/drm/i915/display/intel_sdvo.h | 23 + drivers/gpu/drm/i915/display/intel_sdvo_regs.h | 741 ++ drivers/gpu/drm/i915/display/intel_tv.c | 1991 +++++ drivers/gpu/drm/i915/display/intel_tv.h | 13 + drivers/gpu/drm/i915/display/intel_vdsc.c | 966 +++ drivers/gpu/drm/i915/display/intel_vdsc.h | 21 + drivers/gpu/drm/i915/display/vlv_dsi.c | 1996 ++++++ drivers/gpu/drm/i915/display/vlv_dsi_pll.c | 569 ++ drivers/gpu/drm/i915/dvo_ch7017.c | 415 -- drivers/gpu/drm/i915/dvo_ch7xxx.c | 367 - drivers/gpu/drm/i915/dvo_ivch.c | 503 -- drivers/gpu/drm/i915/dvo_ns2501.c | 710 -- drivers/gpu/drm/i915/dvo_sil164.c | 280 - drivers/gpu/drm/i915/dvo_tfp410.c | 319 - drivers/gpu/drm/i915/i915_debugfs.c | 5 +- drivers/gpu/drm/i915/i915_drv.c | 5 +- drivers/gpu/drm/i915/i915_suspend.c | 3 +- drivers/gpu/drm/i915/icl_dsi.c | 1589 ---- drivers/gpu/drm/i915/intel_bios.c | 3 +- drivers/gpu/drm/i915/intel_connector.c | 3 +- drivers/gpu/drm/i915/intel_crt.c | 1069 --- drivers/gpu/drm/i915/intel_crt.h | 21 - drivers/gpu/drm/i915/intel_ddi.c | 4335 ----------- drivers/gpu/drm/i915/intel_ddi.h | 52 - drivers/gpu/drm/i915/intel_display.c | 23 +- drivers/gpu/drm/i915/intel_display_power.c | 5 +- drivers/gpu/drm/i915/intel_dp.c | 7577 -------------------- drivers/gpu/drm/i915/intel_dp.h | 123 - drivers/gpu/drm/i915/intel_dp_aux_backlight.c | 281 - drivers/gpu/drm/i915/intel_dp_aux_backlight.h | 13 - drivers/gpu/drm/i915/intel_dp_link_training.c | 382 - drivers/gpu/drm/i915/intel_dp_link_training.h | 14 - drivers/gpu/drm/i915/intel_dp_mst.c | 664 -- drivers/gpu/drm/i915/intel_dp_mst.h | 14 - drivers/gpu/drm/i915/intel_dpio_phy.c | 3 +- drivers/gpu/drm/i915/intel_dsi.c | 128 - drivers/gpu/drm/i915/intel_dsi.h | 204 - drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c | 179 - drivers/gpu/drm/i915/intel_dsi_dcs_backlight.h | 13 - drivers/gpu/drm/i915/intel_dsi_vbt.c | 673 -- drivers/gpu/drm/i915/intel_dvo.c | 555 -- drivers/gpu/drm/i915/intel_dvo.h | 13 - drivers/gpu/drm/i915/intel_dvo_dev.h | 140 - drivers/gpu/drm/i915/intel_frontbuffer.c | 3 +- drivers/gpu/drm/i915/intel_gmbus.c | 955 --- drivers/gpu/drm/i915/intel_gmbus.h | 27 - drivers/gpu/drm/i915/intel_hdmi.c | 3204 --------- drivers/gpu/drm/i915/intel_hdmi.h | 51 - drivers/gpu/drm/i915/intel_lspcon.c | 588 -- drivers/gpu/drm/i915/intel_lspcon.h | 38 - drivers/gpu/drm/i915/intel_lvds.c | 1008 --- drivers/gpu/drm/i915/intel_lvds.h | 22 - drivers/gpu/drm/i915/intel_opregion.c | 3 +- drivers/gpu/drm/i915/intel_panel.c | 2051 ------ drivers/gpu/drm/i915/intel_panel.h | 65 - drivers/gpu/drm/i915/intel_psr.c | 3 +- drivers/gpu/drm/i915/intel_sdvo.c | 3333 --------- drivers/gpu/drm/i915/intel_sdvo.h | 23 - drivers/gpu/drm/i915/intel_sdvo_regs.h | 741 -- drivers/gpu/drm/i915/intel_tv.c | 1991 ----- drivers/gpu/drm/i915/intel_tv.h | 13 - drivers/gpu/drm/i915/intel_vdsc.c | 966 --- drivers/gpu/drm/i915/intel_vdsc.h | 21 - drivers/gpu/drm/i915/vlv_dsi.c | 1996 ------ drivers/gpu/drm/i915/vlv_dsi_pll.c | 569 -- 107 files changed, 38377 insertions(+), 38365 deletions(-) create mode 100644 drivers/gpu/drm/i915/display/Makefile create mode 100644 drivers/gpu/drm/i915/display/Makefile.header-test create mode 100644 drivers/gpu/drm/i915/display/dvo_ch7017.c create mode 100644 drivers/gpu/drm/i915/display/dvo_ch7xxx.c create mode 100644 drivers/gpu/drm/i915/display/dvo_ivch.c create mode 100644 drivers/gpu/drm/i915/display/dvo_ns2501.c create mode 100644 drivers/gpu/drm/i915/display/dvo_sil164.c create mode 100644 drivers/gpu/drm/i915/display/dvo_tfp410.c create mode 100644 drivers/gpu/drm/i915/display/icl_dsi.c create mode 100644 drivers/gpu/drm/i915/display/intel_crt.c create mode 100644 drivers/gpu/drm/i915/display/intel_crt.h create mode 100644 drivers/gpu/drm/i915/display/intel_ddi.c create mode 100644 drivers/gpu/drm/i915/display/intel_ddi.h create mode 100644 drivers/gpu/drm/i915/display/intel_dp.c create mode 100644 drivers/gpu/drm/i915/display/intel_dp.h create mode 100644 drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c create mode 100644 drivers/gpu/drm/i915/display/intel_dp_aux_backlight.h create mode 100644 drivers/gpu/drm/i915/display/intel_dp_link_training.c create mode 100644 drivers/gpu/drm/i915/display/intel_dp_link_training.h create mode 100644 drivers/gpu/drm/i915/display/intel_dp_mst.c create mode 100644 drivers/gpu/drm/i915/display/intel_dp_mst.h create mode 100644 drivers/gpu/drm/i915/display/intel_dsi.c create mode 100644 drivers/gpu/drm/i915/display/intel_dsi.h create mode 100644 drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c create mode 100644 drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.h create mode 100644 drivers/gpu/drm/i915/display/intel_dsi_vbt.c create mode 100644 drivers/gpu/drm/i915/display/intel_dvo.c create mode 100644 drivers/gpu/drm/i915/display/intel_dvo.h create mode 100644 drivers/gpu/drm/i915/display/intel_dvo_dev.h create mode 100644 drivers/gpu/drm/i915/display/intel_gmbus.c create mode 100644 drivers/gpu/drm/i915/display/intel_gmbus.h create mode 100644 drivers/gpu/drm/i915/display/intel_hdmi.c create mode 100644 drivers/gpu/drm/i915/display/intel_hdmi.h create mode 100644 drivers/gpu/drm/i915/display/intel_lspcon.c create mode 100644 drivers/gpu/drm/i915/display/intel_lspcon.h create mode 100644 drivers/gpu/drm/i915/display/intel_lvds.c create mode 100644 drivers/gpu/drm/i915/display/intel_lvds.h create mode 100644 drivers/gpu/drm/i915/display/intel_panel.c create mode 100644 drivers/gpu/drm/i915/display/intel_panel.h create mode 100644 drivers/gpu/drm/i915/display/intel_sdvo.c create mode 100644 drivers/gpu/drm/i915/display/intel_sdvo.h create mode 100644 drivers/gpu/drm/i915/display/intel_sdvo_regs.h create mode 100644 drivers/gpu/drm/i915/display/intel_tv.c create mode 100644 drivers/gpu/drm/i915/display/intel_tv.h create mode 100644 drivers/gpu/drm/i915/display/intel_vdsc.c create mode 100644 drivers/gpu/drm/i915/display/intel_vdsc.h create mode 100644 drivers/gpu/drm/i915/display/vlv_dsi.c create mode 100644 drivers/gpu/drm/i915/display/vlv_dsi_pll.c delete mode 100644 drivers/gpu/drm/i915/dvo_ch7017.c delete mode 100644 drivers/gpu/drm/i915/dvo_ch7xxx.c delete mode 100644 drivers/gpu/drm/i915/dvo_ivch.c delete mode 100644 drivers/gpu/drm/i915/dvo_ns2501.c delete mode 100644 drivers/gpu/drm/i915/dvo_sil164.c delete mode 100644 drivers/gpu/drm/i915/dvo_tfp410.c delete mode 100644 drivers/gpu/drm/i915/icl_dsi.c delete mode 100644 drivers/gpu/drm/i915/intel_crt.c delete mode 100644 drivers/gpu/drm/i915/intel_crt.h delete mode 100644 drivers/gpu/drm/i915/intel_ddi.c delete mode 100644 drivers/gpu/drm/i915/intel_ddi.h delete mode 100644 drivers/gpu/drm/i915/intel_dp.c delete mode 100644 drivers/gpu/drm/i915/intel_dp.h delete mode 100644 drivers/gpu/drm/i915/intel_dp_aux_backlight.c delete mode 100644 drivers/gpu/drm/i915/intel_dp_aux_backlight.h delete mode 100644 drivers/gpu/drm/i915/intel_dp_link_training.c delete mode 100644 drivers/gpu/drm/i915/intel_dp_link_training.h delete mode 100644 drivers/gpu/drm/i915/intel_dp_mst.c delete mode 100644 drivers/gpu/drm/i915/intel_dp_mst.h delete mode 100644 drivers/gpu/drm/i915/intel_dsi.c delete mode 100644 drivers/gpu/drm/i915/intel_dsi.h delete mode 100644 drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c delete mode 100644 drivers/gpu/drm/i915/intel_dsi_dcs_backlight.h delete mode 100644 drivers/gpu/drm/i915/intel_dsi_vbt.c delete mode 100644 drivers/gpu/drm/i915/intel_dvo.c delete mode 100644 drivers/gpu/drm/i915/intel_dvo.h delete mode 100644 drivers/gpu/drm/i915/intel_dvo_dev.h delete mode 100644 drivers/gpu/drm/i915/intel_gmbus.c delete mode 100644 drivers/gpu/drm/i915/intel_gmbus.h delete mode 100644 drivers/gpu/drm/i915/intel_hdmi.c delete mode 100644 drivers/gpu/drm/i915/intel_hdmi.h delete mode 100644 drivers/gpu/drm/i915/intel_lspcon.c delete mode 100644 drivers/gpu/drm/i915/intel_lspcon.h delete mode 100644 drivers/gpu/drm/i915/intel_lvds.c delete mode 100644 drivers/gpu/drm/i915/intel_lvds.h delete mode 100644 drivers/gpu/drm/i915/intel_panel.c delete mode 100644 drivers/gpu/drm/i915/intel_panel.h delete mode 100644 drivers/gpu/drm/i915/intel_sdvo.c delete mode 100644 drivers/gpu/drm/i915/intel_sdvo.h delete mode 100644 drivers/gpu/drm/i915/intel_sdvo_regs.h delete mode 100644 drivers/gpu/drm/i915/intel_tv.c delete mode 100644 drivers/gpu/drm/i915/intel_tv.h delete mode 100644 drivers/gpu/drm/i915/intel_vdsc.c delete mode 100644 drivers/gpu/drm/i915/intel_vdsc.h delete mode 100644 drivers/gpu/drm/i915/vlv_dsi.c delete mode 100644 drivers/gpu/drm/i915/vlv_dsi_pll.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index c0a7b2994077..649f286887b7 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -176,33 +176,35 @@ i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o i915-$(CONFIG_DRM_FBDEV_EMULATION) += intel_fbdev.o # modesetting output/encoder code -i915-y += dvo_ch7017.o \ - dvo_ch7xxx.o \ - dvo_ivch.o \ - dvo_ns2501.o \ - dvo_sil164.o \ - dvo_tfp410.o \ - icl_dsi.o \ - intel_crt.o \ - intel_ddi.o \ - intel_dp_aux_backlight.o \ - intel_dp_link_training.o \ - intel_dp_mst.o \ - intel_dp.o \ - intel_dsi.o \ - intel_dsi_dcs_backlight.o \ - intel_dsi_vbt.o \ - intel_dvo.o \ - intel_gmbus.o \ - intel_hdmi.o \ - intel_lspcon.o \ - intel_lvds.o \ - intel_panel.o \ - intel_sdvo.o \ - intel_tv.o \ - vlv_dsi.o \ - vlv_dsi_pll.o \ - intel_vdsc.o +obj-y += display/ +i915-y += \ + display/dvo_ch7017.o \ + display/dvo_ch7xxx.o \ + display/dvo_ivch.o \ + display/dvo_ns2501.o \ + display/dvo_sil164.o \ + display/dvo_tfp410.o \ + display/icl_dsi.o \ + display/intel_crt.o \ + display/intel_ddi.o \ + display/intel_dp.o \ + display/intel_dp_aux_backlight.o \ + display/intel_dp_link_training.o \ + display/intel_dp_mst.o \ + display/intel_dsi.o \ + display/intel_dsi_dcs_backlight.o \ + display/intel_dsi_vbt.o \ + display/intel_dvo.o \ + display/intel_gmbus.o \ + display/intel_hdmi.o \ + display/intel_lspcon.o \ + display/intel_lvds.o \ + display/intel_panel.o \ + display/intel_sdvo.o \ + display/intel_tv.o \ + display/intel_vdsc.o \ + display/vlv_dsi.o \ + display/vlv_dsi_pll.o # Post-mortem debug and GPU hang state capture i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index c04297ce57b4..5a04858c9b7b 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -22,46 +22,27 @@ header_test := \ intel_color.h \ intel_combo_phy.h \ intel_connector.h \ - intel_crt.h \ intel_csr.h \ - intel_ddi.h \ intel_display_power.h \ - intel_dp.h \ - intel_dp_aux_backlight.h \ - intel_dp_link_training.h \ - intel_dp_mst.h \ intel_dpio_phy.h \ intel_dpll_mgr.h \ intel_drv.h \ - intel_dsi.h \ - intel_dsi_dcs_backlight.h \ - intel_dvo.h \ - intel_dvo_dev.h \ intel_fbc.h \ intel_fbdev.h \ intel_fifo_underrun.h \ intel_frontbuffer.h \ - intel_gmbus.h \ intel_hdcp.h \ - intel_hdmi.h \ intel_hotplug.h \ intel_lpe_audio.h \ - intel_lspcon.h \ - intel_lvds.h \ intel_overlay.h \ - intel_panel.h \ intel_pipe_crc.h \ intel_pm.h \ intel_psr.h \ intel_quirks.h \ intel_runtime_pm.h \ - intel_sdvo.h \ - intel_sdvo_regs.h \ intel_sideband.h \ intel_sprite.h \ - intel_tv.h \ intel_uncore.h \ - intel_vdsc.h \ intel_wakeref.h quiet_cmd_header_test = HDRTEST $@ diff --git a/drivers/gpu/drm/i915/display/Makefile b/drivers/gpu/drm/i915/display/Makefile new file mode 100644 index 000000000000..1c75b5c9790c --- /dev/null +++ b/drivers/gpu/drm/i915/display/Makefile @@ -0,0 +1,2 @@ +# Extra header tests +include $(src)/Makefile.header-test diff --git a/drivers/gpu/drm/i915/display/Makefile.header-test b/drivers/gpu/drm/i915/display/Makefile.header-test new file mode 100644 index 000000000000..61e06cbb4b32 --- /dev/null +++ b/drivers/gpu/drm/i915/display/Makefile.header-test @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: MIT +# Copyright © 2019 Intel Corporation + +# Test the headers are compilable as standalone units +header_test := $(notdir $(wildcard $(src)/*.h)) + +quiet_cmd_header_test = HDRTEST $@ + cmd_header_test = echo "\#include \"$( $@ + +header_test_%.c: %.h + $(call cmd,header_test) + +extra-$(CONFIG_DRM_I915_WERROR) += \ + $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h))) + +clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h))) diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c new file mode 100644 index 000000000000..602380fe74f3 --- /dev/null +++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c @@ -0,0 +1,415 @@ +/* + * Copyright © 2006 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * + */ + +#include "intel_drv.h" +#include "intel_dvo_dev.h" + +#define CH7017_TV_DISPLAY_MODE 0x00 +#define CH7017_FLICKER_FILTER 0x01 +#define CH7017_VIDEO_BANDWIDTH 0x02 +#define CH7017_TEXT_ENHANCEMENT 0x03 +#define CH7017_START_ACTIVE_VIDEO 0x04 +#define CH7017_HORIZONTAL_POSITION 0x05 +#define CH7017_VERTICAL_POSITION 0x06 +#define CH7017_BLACK_LEVEL 0x07 +#define CH7017_CONTRAST_ENHANCEMENT 0x08 +#define CH7017_TV_PLL 0x09 +#define CH7017_TV_PLL_M 0x0a +#define CH7017_TV_PLL_N 0x0b +#define CH7017_SUB_CARRIER_0 0x0c +#define CH7017_CIV_CONTROL 0x10 +#define CH7017_CIV_0 0x11 +#define CH7017_CHROMA_BOOST 0x14 +#define CH7017_CLOCK_MODE 0x1c +#define CH7017_INPUT_CLOCK 0x1d +#define CH7017_GPIO_CONTROL 0x1e +#define CH7017_INPUT_DATA_FORMAT 0x1f +#define CH7017_CONNECTION_DETECT 0x20 +#define CH7017_DAC_CONTROL 0x21 +#define CH7017_BUFFERED_CLOCK_OUTPUT 0x22 +#define CH7017_DEFEAT_VSYNC 0x47 +#define CH7017_TEST_PATTERN 0x48 + +#define CH7017_POWER_MANAGEMENT 0x49 +/** Enables the TV output path. */ +#define CH7017_TV_EN (1 << 0) +#define CH7017_DAC0_POWER_DOWN (1 << 1) +#define CH7017_DAC1_POWER_DOWN (1 << 2) +#define CH7017_DAC2_POWER_DOWN (1 << 3) +#define CH7017_DAC3_POWER_DOWN (1 << 4) +/** Powers down the TV out block, and DAC0-3 */ +#define CH7017_TV_POWER_DOWN_EN (1 << 5) + +#define CH7017_VERSION_ID 0x4a + +#define CH7017_DEVICE_ID 0x4b +#define CH7017_DEVICE_ID_VALUE 0x1b +#define CH7018_DEVICE_ID_VALUE 0x1a +#define CH7019_DEVICE_ID_VALUE 0x19 + +#define CH7017_XCLK_D2_ADJUST 0x53 +#define CH7017_UP_SCALER_COEFF_0 0x55 +#define CH7017_UP_SCALER_COEFF_1 0x56 +#define CH7017_UP_SCALER_COEFF_2 0x57 +#define CH7017_UP_SCALER_COEFF_3 0x58 +#define CH7017_UP_SCALER_COEFF_4 0x59 +#define CH7017_UP_SCALER_VERTICAL_INC_0 0x5a +#define CH7017_UP_SCALER_VERTICAL_INC_1 0x5b +#define CH7017_GPIO_INVERT 0x5c +#define CH7017_UP_SCALER_HORIZONTAL_INC_0 0x5d +#define CH7017_UP_SCALER_HORIZONTAL_INC_1 0x5e + +#define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT 0x5f +/**< Low bits of horizontal active pixel input */ + +#define CH7017_ACTIVE_INPUT_LINE_OUTPUT 0x60 +/** High bits of horizontal active pixel input */ +#define CH7017_LVDS_HAP_INPUT_MASK (0x7 << 0) +/** High bits of vertical active line output */ +#define CH7017_LVDS_VAL_HIGH_MASK (0x7 << 3) + +#define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT 0x61 +/**< Low bits of vertical active line output */ + +#define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT 0x62 +/**< Low bits of horizontal active pixel output */ + +#define CH7017_LVDS_POWER_DOWN 0x63 +/** High bits of horizontal active pixel output */ +#define CH7017_LVDS_HAP_HIGH_MASK (0x7 << 0) +/** Enables the LVDS power down state transition */ +#define CH7017_LVDS_POWER_DOWN_EN (1 << 6) +/** Enables the LVDS upscaler */ +#define CH7017_LVDS_UPSCALER_EN (1 << 7) +#define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08 + +#define CH7017_LVDS_ENCODING 0x64 +#define CH7017_LVDS_DITHER_2D (1 << 2) +#define CH7017_LVDS_DITHER_DIS (1 << 3) +#define CH7017_LVDS_DUAL_CHANNEL_EN (1 << 4) +#define CH7017_LVDS_24_BIT (1 << 5) + +#define CH7017_LVDS_ENCODING_2 0x65 + +#define CH7017_LVDS_PLL_CONTROL 0x66 +/** Enables the LVDS panel output path */ +#define CH7017_LVDS_PANEN (1 << 0) +/** Enables the LVDS panel backlight */ +#define CH7017_LVDS_BKLEN (1 << 3) + +#define CH7017_POWER_SEQUENCING_T1 0x67 +#define CH7017_POWER_SEQUENCING_T2 0x68 +#define CH7017_POWER_SEQUENCING_T3 0x69 +#define CH7017_POWER_SEQUENCING_T4 0x6a +#define CH7017_POWER_SEQUENCING_T5 0x6b +#define CH7017_GPIO_DRIVER_TYPE 0x6c +#define CH7017_GPIO_DATA 0x6d +#define CH7017_GPIO_DIRECTION_CONTROL 0x6e + +#define CH7017_LVDS_PLL_FEEDBACK_DIV 0x71 +# define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4 +# define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0 +# define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80 + +#define CH7017_LVDS_PLL_VCO_CONTROL 0x72 +# define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80 +# define CH7017_LVDS_PLL_VCO_SHIFT 4 +# define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0 + +#define CH7017_OUTPUTS_ENABLE 0x73 +# define CH7017_CHARGE_PUMP_LOW 0x0 +# define CH7017_CHARGE_PUMP_HIGH 0x3 +# define CH7017_LVDS_CHANNEL_A (1 << 3) +# define CH7017_LVDS_CHANNEL_B (1 << 4) +# define CH7017_TV_DAC_A (1 << 5) +# define CH7017_TV_DAC_B (1 << 6) +# define CH7017_DDC_SELECT_DC2 (1 << 7) + +#define CH7017_LVDS_OUTPUT_AMPLITUDE 0x74 +#define CH7017_LVDS_PLL_EMI_REDUCTION 0x75 +#define CH7017_LVDS_POWER_DOWN_FLICKER 0x76 + +#define CH7017_LVDS_CONTROL_2 0x78 +# define CH7017_LOOP_FILTER_SHIFT 5 +# define CH7017_PHASE_DETECTOR_SHIFT 0 + +#define CH7017_BANG_LIMIT_CONTROL 0x7f + +struct ch7017_priv { + u8 dummy; +}; + +static void ch7017_dump_regs(struct intel_dvo_device *dvo); +static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable); + +static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val) +{ + struct i2c_msg msgs[] = { + { + .addr = dvo->slave_addr, + .flags = 0, + .len = 1, + .buf = &addr, + }, + { + .addr = dvo->slave_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = val, + } + }; + return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2; +} + +static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val) +{ + u8 buf[2] = { addr, val }; + struct i2c_msg msg = { + .addr = dvo->slave_addr, + .flags = 0, + .len = 2, + .buf = buf, + }; + return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1; +} + +/** Probes for a CH7017 on the given bus and slave address. */ +static bool ch7017_init(struct intel_dvo_device *dvo, + struct i2c_adapter *adapter) +{ + struct ch7017_priv *priv; + const char *str; + u8 val; + + priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL); + if (priv == NULL) + return false; + + dvo->i2c_bus = adapter; + dvo->dev_priv = priv; + + if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val)) + goto fail; + + switch (val) { + case CH7017_DEVICE_ID_VALUE: + str = "ch7017"; + break; + case CH7018_DEVICE_ID_VALUE: + str = "ch7018"; + break; + case CH7019_DEVICE_ID_VALUE: + str = "ch7019"; + break; + default: + DRM_DEBUG_KMS("ch701x not detected, got %d: from %s " + "slave %d.\n", + val, adapter->name, dvo->slave_addr); + goto fail; + } + + DRM_DEBUG_KMS("%s detected on %s, addr %d\n", + str, adapter->name, dvo->slave_addr); + return true; + +fail: + kfree(priv); + return false; +} + +static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo) +{ + return connector_status_connected; +} + +static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo, + struct drm_display_mode *mode) +{ + if (mode->clock > 160000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static void ch7017_mode_set(struct intel_dvo_device *dvo, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + u8 lvds_pll_feedback_div, lvds_pll_vco_control; + u8 outputs_enable, lvds_control_2, lvds_power_down; + u8 horizontal_active_pixel_input; + u8 horizontal_active_pixel_output, vertical_active_line_output; + u8 active_input_line_output; + + DRM_DEBUG_KMS("Registers before mode setting\n"); + ch7017_dump_regs(dvo); + + /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/ + if (mode->clock < 100000) { + outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW; + lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED | + (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) | + (13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT); + lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED | + (2 << CH7017_LVDS_PLL_VCO_SHIFT) | + (3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT); + lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) | + (0 << CH7017_PHASE_DETECTOR_SHIFT); + } else { + outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH; + lvds_pll_feedback_div = + CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED | + (2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) | + (3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT); + lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) | + (0 << CH7017_PHASE_DETECTOR_SHIFT); + if (1) { /* XXX: dual channel panel detection. Assume yes for now. */ + outputs_enable |= CH7017_LVDS_CHANNEL_B; + lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED | + (2 << CH7017_LVDS_PLL_VCO_SHIFT) | + (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT); + } else { + lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED | + (1 << CH7017_LVDS_PLL_VCO_SHIFT) | + (13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT); + } + } + + horizontal_active_pixel_input = mode->hdisplay & 0x00ff; + + vertical_active_line_output = mode->vdisplay & 0x00ff; + horizontal_active_pixel_output = mode->hdisplay & 0x00ff; + + active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) | + (((mode->vdisplay & 0x0700) >> 8) << 3); + + lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED | + (mode->hdisplay & 0x0700) >> 8; + + ch7017_dpms(dvo, false); + ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, + horizontal_active_pixel_input); + ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT, + horizontal_active_pixel_output); + ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT, + vertical_active_line_output); + ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT, + active_input_line_output); + ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control); + ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div); + ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2); + ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable); + + /* Turn the LVDS back on with new settings. */ + ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down); + + DRM_DEBUG_KMS("Registers after mode setting\n"); + ch7017_dump_regs(dvo); +} + +/* set the CH7017 power state */ +static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable) +{ + u8 val; + + ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val); + + /* Turn off TV/VGA, and never turn it on since we don't support it. */ + ch7017_write(dvo, CH7017_POWER_MANAGEMENT, + CH7017_DAC0_POWER_DOWN | + CH7017_DAC1_POWER_DOWN | + CH7017_DAC2_POWER_DOWN | + CH7017_DAC3_POWER_DOWN | + CH7017_TV_POWER_DOWN_EN); + + if (enable) { + /* Turn on the LVDS */ + ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, + val & ~CH7017_LVDS_POWER_DOWN_EN); + } else { + /* Turn off the LVDS */ + ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, + val | CH7017_LVDS_POWER_DOWN_EN); + } + + /* XXX: Should actually wait for update power status somehow */ + msleep(20); +} + +static bool ch7017_get_hw_state(struct intel_dvo_device *dvo) +{ + u8 val; + + ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val); + + if (val & CH7017_LVDS_POWER_DOWN_EN) + return false; + else + return true; +} + +static void ch7017_dump_regs(struct intel_dvo_device *dvo) +{ + u8 val; + +#define DUMP(reg) \ +do { \ + ch7017_read(dvo, reg, &val); \ + DRM_DEBUG_KMS(#reg ": %02x\n", val); \ +} while (0) + + DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT); + DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT); + DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT); + DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT); + DUMP(CH7017_LVDS_PLL_VCO_CONTROL); + DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV); + DUMP(CH7017_LVDS_CONTROL_2); + DUMP(CH7017_OUTPUTS_ENABLE); + DUMP(CH7017_LVDS_POWER_DOWN); +} + +static void ch7017_destroy(struct intel_dvo_device *dvo) +{ + struct ch7017_priv *priv = dvo->dev_priv; + + if (priv) { + kfree(priv); + dvo->dev_priv = NULL; + } +} + +const struct intel_dvo_dev_ops ch7017_ops = { + .init = ch7017_init, + .detect = ch7017_detect, + .mode_valid = ch7017_mode_valid, + .mode_set = ch7017_mode_set, + .dpms = ch7017_dpms, + .get_hw_state = ch7017_get_hw_state, + .dump_regs = ch7017_dump_regs, + .destroy = ch7017_destroy, +}; diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c new file mode 100644 index 000000000000..e070bebee7b5 --- /dev/null +++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c @@ -0,0 +1,367 @@ +/************************************************************************** + +Copyright © 2006 Dave Airlie + +All Rights Reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sub license, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +**************************************************************************/ + +#include "intel_drv.h" +#include "intel_dvo_dev.h" + +#define CH7xxx_REG_VID 0x4a +#define CH7xxx_REG_DID 0x4b + +#define CH7011_VID 0x83 /* 7010 as well */ +#define CH7010B_VID 0x05 +#define CH7009A_VID 0x84 +#define CH7009B_VID 0x85 +#define CH7301_VID 0x95 + +#define CH7xxx_VID 0x84 +#define CH7xxx_DID 0x17 +#define CH7010_DID 0x16 + +#define CH7xxx_NUM_REGS 0x4c + +#define CH7xxx_CM 0x1c +#define CH7xxx_CM_XCM (1<<0) +#define CH7xxx_CM_MCP (1<<2) +#define CH7xxx_INPUT_CLOCK 0x1d +#define CH7xxx_GPIO 0x1e +#define CH7xxx_GPIO_HPIR (1<<3) +#define CH7xxx_IDF 0x1f + +#define CH7xxx_IDF_HSP (1<<3) +#define CH7xxx_IDF_VSP (1<<4) + +#define CH7xxx_CONNECTION_DETECT 0x20 +#define CH7xxx_CDET_DVI (1<<5) + +#define CH7301_DAC_CNTL 0x21 +#define CH7301_HOTPLUG 0x23 +#define CH7xxx_TCTL 0x31 +#define CH7xxx_TVCO 0x32 +#define CH7xxx_TPCP 0x33 +#define CH7xxx_TPD 0x34 +#define CH7xxx_TPVT 0x35 +#define CH7xxx_TLPF 0x36 +#define CH7xxx_TCT 0x37 +#define CH7301_TEST_PATTERN 0x48 + +#define CH7xxx_PM 0x49 +#define CH7xxx_PM_FPD (1<<0) +#define CH7301_PM_DACPD0 (1<<1) +#define CH7301_PM_DACPD1 (1<<2) +#define CH7301_PM_DACPD2 (1<<3) +#define CH7xxx_PM_DVIL (1<<6) +#define CH7xxx_PM_DVIP (1<<7) + +#define CH7301_SYNC_POLARITY 0x56 +#define CH7301_SYNC_RGB_YUV (1<<0) +#define CH7301_SYNC_POL_DVI (1<<5) + +/** @file + * driver for the Chrontel 7xxx DVI chip over DVO. + */ + +static struct ch7xxx_id_struct { + u8 vid; + char *name; +} ch7xxx_ids[] = { + { CH7011_VID, "CH7011" }, + { CH7010B_VID, "CH7010B" }, + { CH7009A_VID, "CH7009A" }, + { CH7009B_VID, "CH7009B" }, + { CH7301_VID, "CH7301" }, +}; + +static struct ch7xxx_did_struct { + u8 did; + char *name; +} ch7xxx_dids[] = { + { CH7xxx_DID, "CH7XXX" }, + { CH7010_DID, "CH7010B" }, +}; + +struct ch7xxx_priv { + bool quiet; +}; + +static char *ch7xxx_get_id(u8 vid) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) { + if (ch7xxx_ids[i].vid == vid) + return ch7xxx_ids[i].name; + } + + return NULL; +} + +static char *ch7xxx_get_did(u8 did) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ch7xxx_dids); i++) { + if (ch7xxx_dids[i].did == did) + return ch7xxx_dids[i].name; + } + + return NULL; +} + +/** Reads an 8 bit register */ +static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) +{ + struct ch7xxx_priv *ch7xxx = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[2]; + u8 in_buf[2]; + + struct i2c_msg msgs[] = { + { + .addr = dvo->slave_addr, + .flags = 0, + .len = 1, + .buf = out_buf, + }, + { + .addr = dvo->slave_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = in_buf, + } + }; + + out_buf[0] = addr; + out_buf[1] = 0; + + if (i2c_transfer(adapter, msgs, 2) == 2) { + *ch = in_buf[0]; + return true; + } + + if (!ch7xxx->quiet) { + DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", + addr, adapter->name, dvo->slave_addr); + } + return false; +} + +/** Writes an 8 bit register */ +static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) +{ + struct ch7xxx_priv *ch7xxx = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[2]; + struct i2c_msg msg = { + .addr = dvo->slave_addr, + .flags = 0, + .len = 2, + .buf = out_buf, + }; + + out_buf[0] = addr; + out_buf[1] = ch; + + if (i2c_transfer(adapter, &msg, 1) == 1) + return true; + + if (!ch7xxx->quiet) { + DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", + addr, adapter->name, dvo->slave_addr); + } + + return false; +} + +static bool ch7xxx_init(struct intel_dvo_device *dvo, + struct i2c_adapter *adapter) +{ + /* this will detect the CH7xxx chip on the specified i2c bus */ + struct ch7xxx_priv *ch7xxx; + u8 vendor, device; + char *name, *devid; + + ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL); + if (ch7xxx == NULL) + return false; + + dvo->i2c_bus = adapter; + dvo->dev_priv = ch7xxx; + ch7xxx->quiet = true; + + if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor)) + goto out; + + name = ch7xxx_get_id(vendor); + if (!name) { + DRM_DEBUG_KMS("ch7xxx not detected; got VID 0x%02x from %s slave %d.\n", + vendor, adapter->name, dvo->slave_addr); + goto out; + } + + + if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device)) + goto out; + + devid = ch7xxx_get_did(device); + if (!devid) { + DRM_DEBUG_KMS("ch7xxx not detected; got DID 0x%02x from %s slave %d.\n", + device, adapter->name, dvo->slave_addr); + goto out; + } + + ch7xxx->quiet = false; + DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n", + name, vendor, device); + return true; +out: + kfree(ch7xxx); + return false; +} + +static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo) +{ + u8 cdet, orig_pm, pm; + + ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm); + + pm = orig_pm; + pm &= ~CH7xxx_PM_FPD; + pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP; + + ch7xxx_writeb(dvo, CH7xxx_PM, pm); + + ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet); + + ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm); + + if (cdet & CH7xxx_CDET_DVI) + return connector_status_connected; + return connector_status_disconnected; +} + +static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo, + struct drm_display_mode *mode) +{ + if (mode->clock > 165000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static void ch7xxx_mode_set(struct intel_dvo_device *dvo, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + u8 tvco, tpcp, tpd, tlpf, idf; + + if (mode->clock <= 65000) { + tvco = 0x23; + tpcp = 0x08; + tpd = 0x16; + tlpf = 0x60; + } else { + tvco = 0x2d; + tpcp = 0x06; + tpd = 0x26; + tlpf = 0xa0; + } + + ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00); + ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco); + ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp); + ch7xxx_writeb(dvo, CH7xxx_TPD, tpd); + ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30); + ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf); + ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00); + + ch7xxx_readb(dvo, CH7xxx_IDF, &idf); + + idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP); + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + idf |= CH7xxx_IDF_HSP; + + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + idf |= CH7xxx_IDF_VSP; + + ch7xxx_writeb(dvo, CH7xxx_IDF, idf); +} + +/* set the CH7xxx power state */ +static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable) +{ + if (enable) + ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP); + else + ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD); +} + +static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo) +{ + u8 val; + + ch7xxx_readb(dvo, CH7xxx_PM, &val); + + if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP)) + return true; + else + return false; +} + +static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) +{ + int i; + + for (i = 0; i < CH7xxx_NUM_REGS; i++) { + u8 val; + if ((i % 8) == 0) + DRM_DEBUG_KMS("\n %02X: ", i); + ch7xxx_readb(dvo, i, &val); + DRM_DEBUG_KMS("%02X ", val); + } +} + +static void ch7xxx_destroy(struct intel_dvo_device *dvo) +{ + struct ch7xxx_priv *ch7xxx = dvo->dev_priv; + + if (ch7xxx) { + kfree(ch7xxx); + dvo->dev_priv = NULL; + } +} + +const struct intel_dvo_dev_ops ch7xxx_ops = { + .init = ch7xxx_init, + .detect = ch7xxx_detect, + .mode_valid = ch7xxx_mode_valid, + .mode_set = ch7xxx_mode_set, + .dpms = ch7xxx_dpms, + .get_hw_state = ch7xxx_get_hw_state, + .dump_regs = ch7xxx_dump_regs, + .destroy = ch7xxx_destroy, +}; diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c new file mode 100644 index 000000000000..09dba35f3ffa --- /dev/null +++ b/drivers/gpu/drm/i915/display/dvo_ivch.c @@ -0,0 +1,503 @@ +/* + * Copyright © 2006 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Thomas Richter + * + * Minor modifications (Dithering enable): + * Thomas Richter + * + */ + +#include "intel_drv.h" +#include "intel_dvo_dev.h" + +/* + * register definitions for the i82807aa. + * + * Documentation on this chipset can be found in datasheet #29069001 at + * intel.com. + */ + +/* + * VCH Revision & GMBus Base Addr + */ +#define VR00 0x00 +# define VR00_BASE_ADDRESS_MASK 0x007f + +/* + * Functionality Enable + */ +#define VR01 0x01 + +/* + * Enable the panel fitter + */ +# define VR01_PANEL_FIT_ENABLE (1 << 3) +/* + * Enables the LCD display. + * + * This must not be set while VR01_DVO_BYPASS_ENABLE is set. + */ +# define VR01_LCD_ENABLE (1 << 2) +/* Enables the DVO repeater. */ +# define VR01_DVO_BYPASS_ENABLE (1 << 1) +/* Enables the DVO clock */ +# define VR01_DVO_ENABLE (1 << 0) +/* Enable dithering for 18bpp panels. Not documented. */ +# define VR01_DITHER_ENABLE (1 << 4) + +/* + * LCD Interface Format + */ +#define VR10 0x10 +/* Enables LVDS output instead of CMOS */ +# define VR10_LVDS_ENABLE (1 << 4) +/* Enables 18-bit LVDS output. */ +# define VR10_INTERFACE_1X18 (0 << 2) +/* Enables 24-bit LVDS or CMOS output */ +# define VR10_INTERFACE_1X24 (1 << 2) +/* Enables 2x18-bit LVDS or CMOS output. */ +# define VR10_INTERFACE_2X18 (2 << 2) +/* Enables 2x24-bit LVDS output */ +# define VR10_INTERFACE_2X24 (3 << 2) +/* Mask that defines the depth of the pipeline */ +# define VR10_INTERFACE_DEPTH_MASK (3 << 2) + +/* + * VR20 LCD Horizontal Display Size + */ +#define VR20 0x20 + +/* + * LCD Vertical Display Size + */ +#define VR21 0x21 + +/* + * Panel power down status + */ +#define VR30 0x30 +/* Read only bit indicating that the panel is not in a safe poweroff state. */ +# define VR30_PANEL_ON (1 << 15) + +#define VR40 0x40 +# define VR40_STALL_ENABLE (1 << 13) +# define VR40_VERTICAL_INTERP_ENABLE (1 << 12) +# define VR40_ENHANCED_PANEL_FITTING (1 << 11) +# define VR40_HORIZONTAL_INTERP_ENABLE (1 << 10) +# define VR40_AUTO_RATIO_ENABLE (1 << 9) +# define VR40_CLOCK_GATING_ENABLE (1 << 8) + +/* + * Panel Fitting Vertical Ratio + * (((image_height - 1) << 16) / ((panel_height - 1))) >> 2 + */ +#define VR41 0x41 + +/* + * Panel Fitting Horizontal Ratio + * (((image_width - 1) << 16) / ((panel_width - 1))) >> 2 + */ +#define VR42 0x42 + +/* + * Horizontal Image Size + */ +#define VR43 0x43 + +/* VR80 GPIO 0 + */ +#define VR80 0x80 +#define VR81 0x81 +#define VR82 0x82 +#define VR83 0x83 +#define VR84 0x84 +#define VR85 0x85 +#define VR86 0x86 +#define VR87 0x87 + +/* VR88 GPIO 8 + */ +#define VR88 0x88 + +/* Graphics BIOS scratch 0 + */ +#define VR8E 0x8E +# define VR8E_PANEL_TYPE_MASK (0xf << 0) +# define VR8E_PANEL_INTERFACE_CMOS (0 << 4) +# define VR8E_PANEL_INTERFACE_LVDS (1 << 4) +# define VR8E_FORCE_DEFAULT_PANEL (1 << 5) + +/* Graphics BIOS scratch 1 + */ +#define VR8F 0x8F +# define VR8F_VCH_PRESENT (1 << 0) +# define VR8F_DISPLAY_CONN (1 << 1) +# define VR8F_POWER_MASK (0x3c) +# define VR8F_POWER_POS (2) + +/* Some Bios implementations do not restore the DVO state upon + * resume from standby. Thus, this driver has to handle it + * instead. The following list contains all registers that + * require saving. + */ +static const u16 backup_addresses[] = { + 0x11, 0x12, + 0x18, 0x19, 0x1a, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x8e, 0x8f, + 0x10 /* this must come last */ +}; + + +struct ivch_priv { + bool quiet; + + u16 width, height; + + /* Register backup */ + + u16 reg_backup[ARRAY_SIZE(backup_addresses)]; +}; + + +static void ivch_dump_regs(struct intel_dvo_device *dvo); +/* + * Reads a register on the ivch. + * + * Each of the 256 registers are 16 bits long. + */ +static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data) +{ + struct ivch_priv *priv = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[1]; + u8 in_buf[2]; + + struct i2c_msg msgs[] = { + { + .addr = dvo->slave_addr, + .flags = I2C_M_RD, + .len = 0, + }, + { + .addr = 0, + .flags = I2C_M_NOSTART, + .len = 1, + .buf = out_buf, + }, + { + .addr = dvo->slave_addr, + .flags = I2C_M_RD | I2C_M_NOSTART, + .len = 2, + .buf = in_buf, + } + }; + + out_buf[0] = addr; + + if (i2c_transfer(adapter, msgs, 3) == 3) { + *data = (in_buf[1] << 8) | in_buf[0]; + return true; + } + + if (!priv->quiet) { + DRM_DEBUG_KMS("Unable to read register 0x%02x from " + "%s:%02x.\n", + addr, adapter->name, dvo->slave_addr); + } + return false; +} + +/* Writes a 16-bit register on the ivch */ +static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data) +{ + struct ivch_priv *priv = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[3]; + struct i2c_msg msg = { + .addr = dvo->slave_addr, + .flags = 0, + .len = 3, + .buf = out_buf, + }; + + out_buf[0] = addr; + out_buf[1] = data & 0xff; + out_buf[2] = data >> 8; + + if (i2c_transfer(adapter, &msg, 1) == 1) + return true; + + if (!priv->quiet) { + DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", + addr, adapter->name, dvo->slave_addr); + } + + return false; +} + +/* Probes the given bus and slave address for an ivch */ +static bool ivch_init(struct intel_dvo_device *dvo, + struct i2c_adapter *adapter) +{ + struct ivch_priv *priv; + u16 temp; + int i; + + priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL); + if (priv == NULL) + return false; + + dvo->i2c_bus = adapter; + dvo->dev_priv = priv; + priv->quiet = true; + + if (!ivch_read(dvo, VR00, &temp)) + goto out; + priv->quiet = false; + + /* Since the identification bits are probably zeroes, which doesn't seem + * very unique, check that the value in the base address field matches + * the address it's responding on. + */ + if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) { + DRM_DEBUG_KMS("ivch detect failed due to address mismatch " + "(%d vs %d)\n", + (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr); + goto out; + } + + ivch_read(dvo, VR20, &priv->width); + ivch_read(dvo, VR21, &priv->height); + + /* Make a backup of the registers to be able to restore them + * upon suspend. + */ + for (i = 0; i < ARRAY_SIZE(backup_addresses); i++) + ivch_read(dvo, backup_addresses[i], priv->reg_backup + i); + + ivch_dump_regs(dvo); + + return true; + +out: + kfree(priv); + return false; +} + +static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo) +{ + return connector_status_connected; +} + +static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo, + struct drm_display_mode *mode) +{ + if (mode->clock > 112000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +/* Restore the DVO registers after a resume + * from RAM. Registers have been saved during + * the initialization. + */ +static void ivch_reset(struct intel_dvo_device *dvo) +{ + struct ivch_priv *priv = dvo->dev_priv; + int i; + + DRM_DEBUG_KMS("Resetting the IVCH registers\n"); + + ivch_write(dvo, VR10, 0x0000); + + for (i = 0; i < ARRAY_SIZE(backup_addresses); i++) + ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]); +} + +/* Sets the power state of the panel connected to the ivch */ +static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) +{ + int i; + u16 vr01, vr30, backlight; + + ivch_reset(dvo); + + /* Set the new power state of the panel. */ + if (!ivch_read(dvo, VR01, &vr01)) + return; + + if (enable) + backlight = 1; + else + backlight = 0; + + ivch_write(dvo, VR80, backlight); + + if (enable) + vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE; + else + vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE); + + ivch_write(dvo, VR01, vr01); + + /* Wait for the panel to make its state transition */ + for (i = 0; i < 100; i++) { + if (!ivch_read(dvo, VR30, &vr30)) + break; + + if (((vr30 & VR30_PANEL_ON) != 0) == enable) + break; + udelay(1000); + } + /* wait some more; vch may fail to resync sometimes without this */ + udelay(16 * 1000); +} + +static bool ivch_get_hw_state(struct intel_dvo_device *dvo) +{ + u16 vr01; + + ivch_reset(dvo); + + /* Set the new power state of the panel. */ + if (!ivch_read(dvo, VR01, &vr01)) + return false; + + if (vr01 & VR01_LCD_ENABLE) + return true; + else + return false; +} + +static void ivch_mode_set(struct intel_dvo_device *dvo, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + struct ivch_priv *priv = dvo->dev_priv; + u16 vr40 = 0; + u16 vr01 = 0; + u16 vr10; + + ivch_reset(dvo); + + vr10 = priv->reg_backup[ARRAY_SIZE(backup_addresses) - 1]; + + /* Enable dithering for 18 bpp pipelines */ + vr10 &= VR10_INTERFACE_DEPTH_MASK; + if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18) + vr01 = VR01_DITHER_ENABLE; + + vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE | + VR40_HORIZONTAL_INTERP_ENABLE); + + if (mode->hdisplay != adjusted_mode->crtc_hdisplay || + mode->vdisplay != adjusted_mode->crtc_vdisplay) { + u16 x_ratio, y_ratio; + + vr01 |= VR01_PANEL_FIT_ENABLE; + vr40 |= VR40_CLOCK_GATING_ENABLE; + x_ratio = (((mode->hdisplay - 1) << 16) / + (adjusted_mode->crtc_hdisplay - 1)) >> 2; + y_ratio = (((mode->vdisplay - 1) << 16) / + (adjusted_mode->crtc_vdisplay - 1)) >> 2; + ivch_write(dvo, VR42, x_ratio); + ivch_write(dvo, VR41, y_ratio); + } else { + vr01 &= ~VR01_PANEL_FIT_ENABLE; + vr40 &= ~VR40_CLOCK_GATING_ENABLE; + } + vr40 &= ~VR40_AUTO_RATIO_ENABLE; + + ivch_write(dvo, VR01, vr01); + ivch_write(dvo, VR40, vr40); +} + +static void ivch_dump_regs(struct intel_dvo_device *dvo) +{ + u16 val; + + ivch_read(dvo, VR00, &val); + DRM_DEBUG_KMS("VR00: 0x%04x\n", val); + ivch_read(dvo, VR01, &val); + DRM_DEBUG_KMS("VR01: 0x%04x\n", val); + ivch_read(dvo, VR10, &val); + DRM_DEBUG_KMS("VR10: 0x%04x\n", val); + ivch_read(dvo, VR30, &val); + DRM_DEBUG_KMS("VR30: 0x%04x\n", val); + ivch_read(dvo, VR40, &val); + DRM_DEBUG_KMS("VR40: 0x%04x\n", val); + + /* GPIO registers */ + ivch_read(dvo, VR80, &val); + DRM_DEBUG_KMS("VR80: 0x%04x\n", val); + ivch_read(dvo, VR81, &val); + DRM_DEBUG_KMS("VR81: 0x%04x\n", val); + ivch_read(dvo, VR82, &val); + DRM_DEBUG_KMS("VR82: 0x%04x\n", val); + ivch_read(dvo, VR83, &val); + DRM_DEBUG_KMS("VR83: 0x%04x\n", val); + ivch_read(dvo, VR84, &val); + DRM_DEBUG_KMS("VR84: 0x%04x\n", val); + ivch_read(dvo, VR85, &val); + DRM_DEBUG_KMS("VR85: 0x%04x\n", val); + ivch_read(dvo, VR86, &val); + DRM_DEBUG_KMS("VR86: 0x%04x\n", val); + ivch_read(dvo, VR87, &val); + DRM_DEBUG_KMS("VR87: 0x%04x\n", val); + ivch_read(dvo, VR88, &val); + DRM_DEBUG_KMS("VR88: 0x%04x\n", val); + + /* Scratch register 0 - AIM Panel type */ + ivch_read(dvo, VR8E, &val); + DRM_DEBUG_KMS("VR8E: 0x%04x\n", val); + + /* Scratch register 1 - Status register */ + ivch_read(dvo, VR8F, &val); + DRM_DEBUG_KMS("VR8F: 0x%04x\n", val); +} + +static void ivch_destroy(struct intel_dvo_device *dvo) +{ + struct ivch_priv *priv = dvo->dev_priv; + + if (priv) { + kfree(priv); + dvo->dev_priv = NULL; + } +} + +const struct intel_dvo_dev_ops ivch_ops = { + .init = ivch_init, + .dpms = ivch_dpms, + .get_hw_state = ivch_get_hw_state, + .mode_valid = ivch_mode_valid, + .mode_set = ivch_mode_set, + .detect = ivch_detect, + .dump_regs = ivch_dump_regs, + .destroy = ivch_destroy, +}; diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c new file mode 100644 index 000000000000..c83a5d88d62b --- /dev/null +++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c @@ -0,0 +1,710 @@ +/* + * + * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "i915_drv.h" +#include "i915_reg.h" +#include "intel_drv.h" +#include "intel_dvo_dev.h" + +#define NS2501_VID 0x1305 +#define NS2501_DID 0x6726 + +#define NS2501_VID_LO 0x00 +#define NS2501_VID_HI 0x01 +#define NS2501_DID_LO 0x02 +#define NS2501_DID_HI 0x03 +#define NS2501_REV 0x04 +#define NS2501_RSVD 0x05 +#define NS2501_FREQ_LO 0x06 +#define NS2501_FREQ_HI 0x07 + +#define NS2501_REG8 0x08 +#define NS2501_8_VEN (1<<5) +#define NS2501_8_HEN (1<<4) +#define NS2501_8_DSEL (1<<3) +#define NS2501_8_BPAS (1<<2) +#define NS2501_8_RSVD (1<<1) +#define NS2501_8_PD (1<<0) + +#define NS2501_REG9 0x09 +#define NS2501_9_VLOW (1<<7) +#define NS2501_9_MSEL_MASK (0x7<<4) +#define NS2501_9_TSEL (1<<3) +#define NS2501_9_RSEN (1<<2) +#define NS2501_9_RSVD (1<<1) +#define NS2501_9_MDI (1<<0) + +#define NS2501_REGC 0x0c + +/* + * The following registers are not part of the official datasheet + * and are the result of reverse engineering. + */ + +/* + * Register c0 controls how the DVO synchronizes with + * its input. + */ +#define NS2501_REGC0 0xc0 +#define NS2501_C0_ENABLE (1<<0) /* enable the DVO sync in general */ +#define NS2501_C0_HSYNC (1<<1) /* synchronize horizontal with input */ +#define NS2501_C0_VSYNC (1<<2) /* synchronize vertical with input */ +#define NS2501_C0_RESET (1<<7) /* reset the synchronization flip/flops */ + +/* + * Register 41 is somehow related to the sync register and sync + * configuration. It should be 0x32 whenever regC0 is 0x05 (hsync off) + * and 0x00 otherwise. + */ +#define NS2501_REG41 0x41 + +/* + * this register controls the dithering of the DVO + * One bit enables it, the other define the dithering depth. + * The higher the value, the lower the dithering depth. + */ +#define NS2501_F9_REG 0xf9 +#define NS2501_F9_ENABLE (1<<0) /* if set, dithering is enabled */ +#define NS2501_F9_DITHER_MASK (0x7f<<1) /* controls the dither depth */ +#define NS2501_F9_DITHER_SHIFT 1 /* shifts the dither mask */ + +/* + * PLL configuration register. This is a pair of registers, + * one single byte register at 1B, and a pair at 1C,1D. + * These registers are counters/dividers. + */ +#define NS2501_REG1B 0x1b /* one byte PLL control register */ +#define NS2501_REG1C 0x1c /* low-part of the second register */ +#define NS2501_REG1D 0x1d /* high-part of the second register */ + +/* + * Scaler control registers. Horizontal at b8,b9, + * vertical at 10,11. The scale factor is computed as + * 2^16/control-value. The low-byte comes first. + */ +#define NS2501_REG10 0x10 /* low-byte vertical scaler */ +#define NS2501_REG11 0x11 /* high-byte vertical scaler */ +#define NS2501_REGB8 0xb8 /* low-byte horizontal scaler */ +#define NS2501_REGB9 0xb9 /* high-byte horizontal scaler */ + +/* + * Display window definition. This consists of four registers + * per dimension. One register pair defines the start of the + * display, one the end. + * As far as I understand, this defines the window within which + * the scaler samples the input. + */ +#define NS2501_REGC1 0xc1 /* low-byte horizontal display start */ +#define NS2501_REGC2 0xc2 /* high-byte horizontal display start */ +#define NS2501_REGC3 0xc3 /* low-byte horizontal display stop */ +#define NS2501_REGC4 0xc4 /* high-byte horizontal display stop */ +#define NS2501_REGC5 0xc5 /* low-byte vertical display start */ +#define NS2501_REGC6 0xc6 /* high-byte vertical display start */ +#define NS2501_REGC7 0xc7 /* low-byte vertical display stop */ +#define NS2501_REGC8 0xc8 /* high-byte vertical display stop */ + +/* + * The following register pair seems to define the start of + * the vertical sync. If automatic syncing is enabled, and the + * register value defines a sync pulse that is later than the + * incoming sync, then the register value is ignored and the + * external hsync triggers the synchronization. + */ +#define NS2501_REG80 0x80 /* low-byte vsync-start */ +#define NS2501_REG81 0x81 /* high-byte vsync-start */ + +/* + * The following register pair seems to define the total number + * of lines created at the output side of the scaler. + * This is again a low-high register pair. + */ +#define NS2501_REG82 0x82 /* output display height, low byte */ +#define NS2501_REG83 0x83 /* output display height, high byte */ + +/* + * The following registers define the end of the front-porch + * in horizontal and vertical position and hence allow to shift + * the image left/right or up/down. + */ +#define NS2501_REG98 0x98 /* horizontal start of display + 256, low */ +#define NS2501_REG99 0x99 /* horizontal start of display + 256, high */ +#define NS2501_REG8E 0x8e /* vertical start of the display, low byte */ +#define NS2501_REG8F 0x8f /* vertical start of the display, high byte */ + +/* + * The following register pair control the function of the + * backlight and the DVO output. To enable the corresponding + * function, the corresponding bit must be set in both registers. + */ +#define NS2501_REG34 0x34 /* DVO enable functions, first register */ +#define NS2501_REG35 0x35 /* DVO enable functions, second register */ +#define NS2501_34_ENABLE_OUTPUT (1<<0) /* enable DVO output */ +#define NS2501_34_ENABLE_BACKLIGHT (1<<1) /* enable backlight */ + +/* + * Registers 9C and 9D define the vertical output offset + * of the visible region. + */ +#define NS2501_REG9C 0x9c +#define NS2501_REG9D 0x9d + +/* + * The register 9F defines the dithering. This requires the + * scaler to be ON. Bit 0 enables dithering, the remaining + * bits control the depth of the dither. The higher the value, + * the LOWER the dithering amplitude. A good value seems to be + * 15 (total register value). + */ +#define NS2501_REGF9 0xf9 +#define NS2501_F9_ENABLE_DITHER (1<<0) /* enable dithering */ +#define NS2501_F9_DITHER_MASK (0x7f<<1) /* dither masking */ +#define NS2501_F9_DITHER_SHIFT 1 /* upshift of the dither mask */ + +enum { + MODE_640x480, + MODE_800x600, + MODE_1024x768, +}; + +struct ns2501_reg { + u8 offset; + u8 value; +}; + +/* + * The following structure keeps the complete configuration of + * the DVO, given a specific output configuration. + * This is pretty much guess-work from reverse-engineering, so + * read all this with a grain of salt. + */ +struct ns2501_configuration { + u8 sync; /* configuration of the C0 register */ + u8 conf; /* configuration register 8 */ + u8 syncb; /* configuration register 41 */ + u8 dither; /* configuration of the dithering */ + u8 pll_a; /* PLL configuration, register A, 1B */ + u16 pll_b; /* PLL configuration, register B, 1C/1D */ + u16 hstart; /* horizontal start, registers C1/C2 */ + u16 hstop; /* horizontal total, registers C3/C4 */ + u16 vstart; /* vertical start, registers C5/C6 */ + u16 vstop; /* vertical total, registers C7/C8 */ + u16 vsync; /* manual vertical sync start, 80/81 */ + u16 vtotal; /* number of lines generated, 82/83 */ + u16 hpos; /* horizontal position + 256, 98/99 */ + u16 vpos; /* vertical position, 8e/8f */ + u16 voffs; /* vertical output offset, 9c/9d */ + u16 hscale; /* horizontal scaling factor, b8/b9 */ + u16 vscale; /* vertical scaling factor, 10/11 */ +}; + +/* + * DVO configuration values, partially based on what the BIOS + * of the Fujitsu Lifebook S6010 writes into registers, + * partially found by manual tweaking. These configurations assume + * a 1024x768 panel. + */ +static const struct ns2501_configuration ns2501_modes[] = { + [MODE_640x480] = { + .sync = NS2501_C0_ENABLE | NS2501_C0_VSYNC, + .conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD, + .syncb = 0x32, + .dither = 0x0f, + .pll_a = 17, + .pll_b = 852, + .hstart = 144, + .hstop = 783, + .vstart = 22, + .vstop = 514, + .vsync = 2047, /* actually, ignored with this config */ + .vtotal = 1341, + .hpos = 0, + .vpos = 16, + .voffs = 36, + .hscale = 40960, + .vscale = 40960 + }, + [MODE_800x600] = { + .sync = NS2501_C0_ENABLE | + NS2501_C0_HSYNC | NS2501_C0_VSYNC, + .conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD, + .syncb = 0x00, + .dither = 0x0f, + .pll_a = 25, + .pll_b = 612, + .hstart = 215, + .hstop = 1016, + .vstart = 26, + .vstop = 627, + .vsync = 807, + .vtotal = 1341, + .hpos = 0, + .vpos = 4, + .voffs = 35, + .hscale = 51248, + .vscale = 51232 + }, + [MODE_1024x768] = { + .sync = NS2501_C0_ENABLE | NS2501_C0_VSYNC, + .conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD, + .syncb = 0x32, + .dither = 0x0f, + .pll_a = 11, + .pll_b = 1350, + .hstart = 276, + .hstop = 1299, + .vstart = 15, + .vstop = 1056, + .vsync = 2047, + .vtotal = 1341, + .hpos = 0, + .vpos = 7, + .voffs = 27, + .hscale = 65535, + .vscale = 65535 + } +}; + +/* + * Other configuration values left by the BIOS of the + * Fujitsu S6010 in the DVO control registers. Their + * value does not depend on the BIOS and their meaning + * is unknown. + */ + +static const struct ns2501_reg mode_agnostic_values[] = { + /* 08 is mode specific */ + [0] = { .offset = 0x0a, .value = 0x81, }, + /* 10,11 are part of the mode specific configuration */ + [1] = { .offset = 0x12, .value = 0x02, }, + [2] = { .offset = 0x18, .value = 0x07, }, + [3] = { .offset = 0x19, .value = 0x00, }, + [4] = { .offset = 0x1a, .value = 0x00, }, /* PLL?, ignored */ + /* 1b,1c,1d are part of the mode specific configuration */ + [5] = { .offset = 0x1e, .value = 0x02, }, + [6] = { .offset = 0x1f, .value = 0x40, }, + [7] = { .offset = 0x20, .value = 0x00, }, + [8] = { .offset = 0x21, .value = 0x00, }, + [9] = { .offset = 0x22, .value = 0x00, }, + [10] = { .offset = 0x23, .value = 0x00, }, + [11] = { .offset = 0x24, .value = 0x00, }, + [12] = { .offset = 0x25, .value = 0x00, }, + [13] = { .offset = 0x26, .value = 0x00, }, + [14] = { .offset = 0x27, .value = 0x00, }, + [15] = { .offset = 0x7e, .value = 0x18, }, + /* 80-84 are part of the mode-specific configuration */ + [16] = { .offset = 0x84, .value = 0x00, }, + [17] = { .offset = 0x85, .value = 0x00, }, + [18] = { .offset = 0x86, .value = 0x00, }, + [19] = { .offset = 0x87, .value = 0x00, }, + [20] = { .offset = 0x88, .value = 0x00, }, + [21] = { .offset = 0x89, .value = 0x00, }, + [22] = { .offset = 0x8a, .value = 0x00, }, + [23] = { .offset = 0x8b, .value = 0x00, }, + [24] = { .offset = 0x8c, .value = 0x10, }, + [25] = { .offset = 0x8d, .value = 0x02, }, + /* 8e,8f are part of the mode-specific configuration */ + [26] = { .offset = 0x90, .value = 0xff, }, + [27] = { .offset = 0x91, .value = 0x07, }, + [28] = { .offset = 0x92, .value = 0xa0, }, + [29] = { .offset = 0x93, .value = 0x02, }, + [30] = { .offset = 0x94, .value = 0x00, }, + [31] = { .offset = 0x95, .value = 0x00, }, + [32] = { .offset = 0x96, .value = 0x05, }, + [33] = { .offset = 0x97, .value = 0x00, }, + /* 98,99 are part of the mode-specific configuration */ + [34] = { .offset = 0x9a, .value = 0x88, }, + [35] = { .offset = 0x9b, .value = 0x00, }, + /* 9c,9d are part of the mode-specific configuration */ + [36] = { .offset = 0x9e, .value = 0x25, }, + [37] = { .offset = 0x9f, .value = 0x03, }, + [38] = { .offset = 0xa0, .value = 0x28, }, + [39] = { .offset = 0xa1, .value = 0x01, }, + [40] = { .offset = 0xa2, .value = 0x28, }, + [41] = { .offset = 0xa3, .value = 0x05, }, + /* register 0xa4 is mode specific, but 0x80..0x84 works always */ + [42] = { .offset = 0xa4, .value = 0x84, }, + [43] = { .offset = 0xa5, .value = 0x00, }, + [44] = { .offset = 0xa6, .value = 0x00, }, + [45] = { .offset = 0xa7, .value = 0x00, }, + [46] = { .offset = 0xa8, .value = 0x00, }, + /* 0xa9 to 0xab are mode specific, but have no visible effect */ + [47] = { .offset = 0xa9, .value = 0x04, }, + [48] = { .offset = 0xaa, .value = 0x70, }, + [49] = { .offset = 0xab, .value = 0x4f, }, + [50] = { .offset = 0xac, .value = 0x00, }, + [51] = { .offset = 0xad, .value = 0x00, }, + [52] = { .offset = 0xb6, .value = 0x09, }, + [53] = { .offset = 0xb7, .value = 0x03, }, + /* b8,b9 are part of the mode-specific configuration */ + [54] = { .offset = 0xba, .value = 0x00, }, + [55] = { .offset = 0xbb, .value = 0x20, }, + [56] = { .offset = 0xf3, .value = 0x90, }, + [57] = { .offset = 0xf4, .value = 0x00, }, + [58] = { .offset = 0xf7, .value = 0x88, }, + /* f8 is mode specific, but the value does not matter */ + [59] = { .offset = 0xf8, .value = 0x0a, }, + [60] = { .offset = 0xf9, .value = 0x00, } +}; + +static const struct ns2501_reg regs_init[] = { + [0] = { .offset = 0x35, .value = 0xff, }, + [1] = { .offset = 0x34, .value = 0x00, }, + [2] = { .offset = 0x08, .value = 0x30, }, +}; + +struct ns2501_priv { + bool quiet; + const struct ns2501_configuration *conf; +}; + +#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr)) + +/* +** Read a register from the ns2501. +** Returns true if successful, false otherwise. +** If it returns false, it might be wise to enable the +** DVO with the above function. +*/ +static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) +{ + struct ns2501_priv *ns = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[2]; + u8 in_buf[2]; + + struct i2c_msg msgs[] = { + { + .addr = dvo->slave_addr, + .flags = 0, + .len = 1, + .buf = out_buf, + }, + { + .addr = dvo->slave_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = in_buf, + } + }; + + out_buf[0] = addr; + out_buf[1] = 0; + + if (i2c_transfer(adapter, msgs, 2) == 2) { + *ch = in_buf[0]; + return true; + } + + if (!ns->quiet) { + DRM_DEBUG_KMS + ("Unable to read register 0x%02x from %s:0x%02x.\n", addr, + adapter->name, dvo->slave_addr); + } + + return false; +} + +/* +** Write a register to the ns2501. +** Returns true if successful, false otherwise. +** If it returns false, it might be wise to enable the +** DVO with the above function. +*/ +static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) +{ + struct ns2501_priv *ns = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[2]; + + struct i2c_msg msg = { + .addr = dvo->slave_addr, + .flags = 0, + .len = 2, + .buf = out_buf, + }; + + out_buf[0] = addr; + out_buf[1] = ch; + + if (i2c_transfer(adapter, &msg, 1) == 1) { + return true; + } + + if (!ns->quiet) { + DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n", + addr, adapter->name, dvo->slave_addr); + } + + return false; +} + +/* National Semiconductor 2501 driver for chip on i2c bus + * scan for the chip on the bus. + * Hope the VBIOS initialized the PLL correctly so we can + * talk to it. If not, it will not be seen and not detected. + * Bummer! + */ +static bool ns2501_init(struct intel_dvo_device *dvo, + struct i2c_adapter *adapter) +{ + /* this will detect the NS2501 chip on the specified i2c bus */ + struct ns2501_priv *ns; + unsigned char ch; + + ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL); + if (ns == NULL) + return false; + + dvo->i2c_bus = adapter; + dvo->dev_priv = ns; + ns->quiet = true; + + if (!ns2501_readb(dvo, NS2501_VID_LO, &ch)) + goto out; + + if (ch != (NS2501_VID & 0xff)) { + DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", + ch, adapter->name, dvo->slave_addr); + goto out; + } + + if (!ns2501_readb(dvo, NS2501_DID_LO, &ch)) + goto out; + + if (ch != (NS2501_DID & 0xff)) { + DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n", + ch, adapter->name, dvo->slave_addr); + goto out; + } + ns->quiet = false; + + DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n"); + + return true; + +out: + kfree(ns); + return false; +} + +static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo) +{ + /* + * This is a Laptop display, it doesn't have hotplugging. + * Even if not, the detection bit of the 2501 is unreliable as + * it only works for some display types. + * It is even more unreliable as the PLL must be active for + * allowing reading from the chiop. + */ + return connector_status_connected; +} + +static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo, + struct drm_display_mode *mode) +{ + DRM_DEBUG_KMS + ("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n", + mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal); + + /* + * Currently, these are all the modes I have data from. + * More might exist. Unclear how to find the native resolution + * of the panel in here so we could always accept it + * by disabling the scaler. + */ + if ((mode->hdisplay == 640 && mode->vdisplay == 480 && mode->clock == 25175) || + (mode->hdisplay == 800 && mode->vdisplay == 600 && mode->clock == 40000) || + (mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 65000)) { + return MODE_OK; + } else { + return MODE_ONE_SIZE; /* Is this a reasonable error? */ + } +} + +static void ns2501_mode_set(struct intel_dvo_device *dvo, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + const struct ns2501_configuration *conf; + struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); + int mode_idx, i; + + DRM_DEBUG_KMS + ("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n", + mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal); + + DRM_DEBUG_KMS("Detailed requested mode settings are:\n" + "clock : %d kHz\n" + "hdisplay : %d\n" + "hblank start : %d\n" + "hblank end : %d\n" + "hsync start : %d\n" + "hsync end : %d\n" + "htotal : %d\n" + "hskew : %d\n" + "vdisplay : %d\n" + "vblank start : %d\n" + "hblank end : %d\n" + "vsync start : %d\n" + "vsync end : %d\n" + "vtotal : %d\n", + adjusted_mode->crtc_clock, + adjusted_mode->crtc_hdisplay, + adjusted_mode->crtc_hblank_start, + adjusted_mode->crtc_hblank_end, + adjusted_mode->crtc_hsync_start, + adjusted_mode->crtc_hsync_end, + adjusted_mode->crtc_htotal, + adjusted_mode->crtc_hskew, + adjusted_mode->crtc_vdisplay, + adjusted_mode->crtc_vblank_start, + adjusted_mode->crtc_vblank_end, + adjusted_mode->crtc_vsync_start, + adjusted_mode->crtc_vsync_end, + adjusted_mode->crtc_vtotal); + + if (mode->hdisplay == 640 && mode->vdisplay == 480) + mode_idx = MODE_640x480; + else if (mode->hdisplay == 800 && mode->vdisplay == 600) + mode_idx = MODE_800x600; + else if (mode->hdisplay == 1024 && mode->vdisplay == 768) + mode_idx = MODE_1024x768; + else + return; + + /* Hopefully doing it every time won't hurt... */ + for (i = 0; i < ARRAY_SIZE(regs_init); i++) + ns2501_writeb(dvo, regs_init[i].offset, regs_init[i].value); + + /* Write the mode-agnostic values */ + for (i = 0; i < ARRAY_SIZE(mode_agnostic_values); i++) + ns2501_writeb(dvo, mode_agnostic_values[i].offset, + mode_agnostic_values[i].value); + + /* Write now the mode-specific configuration */ + conf = ns2501_modes + mode_idx; + ns->conf = conf; + + ns2501_writeb(dvo, NS2501_REG8, conf->conf); + ns2501_writeb(dvo, NS2501_REG1B, conf->pll_a); + ns2501_writeb(dvo, NS2501_REG1C, conf->pll_b & 0xff); + ns2501_writeb(dvo, NS2501_REG1D, conf->pll_b >> 8); + ns2501_writeb(dvo, NS2501_REGC1, conf->hstart & 0xff); + ns2501_writeb(dvo, NS2501_REGC2, conf->hstart >> 8); + ns2501_writeb(dvo, NS2501_REGC3, conf->hstop & 0xff); + ns2501_writeb(dvo, NS2501_REGC4, conf->hstop >> 8); + ns2501_writeb(dvo, NS2501_REGC5, conf->vstart & 0xff); + ns2501_writeb(dvo, NS2501_REGC6, conf->vstart >> 8); + ns2501_writeb(dvo, NS2501_REGC7, conf->vstop & 0xff); + ns2501_writeb(dvo, NS2501_REGC8, conf->vstop >> 8); + ns2501_writeb(dvo, NS2501_REG80, conf->vsync & 0xff); + ns2501_writeb(dvo, NS2501_REG81, conf->vsync >> 8); + ns2501_writeb(dvo, NS2501_REG82, conf->vtotal & 0xff); + ns2501_writeb(dvo, NS2501_REG83, conf->vtotal >> 8); + ns2501_writeb(dvo, NS2501_REG98, conf->hpos & 0xff); + ns2501_writeb(dvo, NS2501_REG99, conf->hpos >> 8); + ns2501_writeb(dvo, NS2501_REG8E, conf->vpos & 0xff); + ns2501_writeb(dvo, NS2501_REG8F, conf->vpos >> 8); + ns2501_writeb(dvo, NS2501_REG9C, conf->voffs & 0xff); + ns2501_writeb(dvo, NS2501_REG9D, conf->voffs >> 8); + ns2501_writeb(dvo, NS2501_REGB8, conf->hscale & 0xff); + ns2501_writeb(dvo, NS2501_REGB9, conf->hscale >> 8); + ns2501_writeb(dvo, NS2501_REG10, conf->vscale & 0xff); + ns2501_writeb(dvo, NS2501_REG11, conf->vscale >> 8); + ns2501_writeb(dvo, NS2501_REGF9, conf->dither); + ns2501_writeb(dvo, NS2501_REG41, conf->syncb); + ns2501_writeb(dvo, NS2501_REGC0, conf->sync); +} + +/* set the NS2501 power state */ +static bool ns2501_get_hw_state(struct intel_dvo_device *dvo) +{ + unsigned char ch; + + if (!ns2501_readb(dvo, NS2501_REG8, &ch)) + return false; + + return ch & NS2501_8_PD; +} + +/* set the NS2501 power state */ +static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable) +{ + struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); + + DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable); + + if (enable) { + ns2501_writeb(dvo, NS2501_REGC0, ns->conf->sync | 0x08); + + ns2501_writeb(dvo, NS2501_REG41, ns->conf->syncb); + + ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT); + msleep(15); + + ns2501_writeb(dvo, NS2501_REG8, + ns->conf->conf | NS2501_8_BPAS); + if (!(ns->conf->conf & NS2501_8_BPAS)) + ns2501_writeb(dvo, NS2501_REG8, ns->conf->conf); + msleep(200); + + ns2501_writeb(dvo, NS2501_REG34, + NS2501_34_ENABLE_OUTPUT | NS2501_34_ENABLE_BACKLIGHT); + + ns2501_writeb(dvo, NS2501_REGC0, ns->conf->sync); + } else { + ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT); + msleep(200); + + ns2501_writeb(dvo, NS2501_REG8, NS2501_8_VEN | NS2501_8_HEN | + NS2501_8_BPAS); + msleep(15); + + ns2501_writeb(dvo, NS2501_REG34, 0x00); + } +} + +static void ns2501_destroy(struct intel_dvo_device *dvo) +{ + struct ns2501_priv *ns = dvo->dev_priv; + + if (ns) { + kfree(ns); + dvo->dev_priv = NULL; + } +} + +const struct intel_dvo_dev_ops ns2501_ops = { + .init = ns2501_init, + .detect = ns2501_detect, + .mode_valid = ns2501_mode_valid, + .mode_set = ns2501_mode_set, + .dpms = ns2501_dpms, + .get_hw_state = ns2501_get_hw_state, + .destroy = ns2501_destroy, +}; diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c new file mode 100644 index 000000000000..04698eaeb632 --- /dev/null +++ b/drivers/gpu/drm/i915/display/dvo_sil164.c @@ -0,0 +1,280 @@ +/************************************************************************** + +Copyright © 2006 Dave Airlie + +All Rights Reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sub license, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice (including the +next paragraph) shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +**************************************************************************/ + +#include "intel_drv.h" +#include "intel_dvo_dev.h" + +#define SIL164_VID 0x0001 +#define SIL164_DID 0x0006 + +#define SIL164_VID_LO 0x00 +#define SIL164_VID_HI 0x01 +#define SIL164_DID_LO 0x02 +#define SIL164_DID_HI 0x03 +#define SIL164_REV 0x04 +#define SIL164_RSVD 0x05 +#define SIL164_FREQ_LO 0x06 +#define SIL164_FREQ_HI 0x07 + +#define SIL164_REG8 0x08 +#define SIL164_8_VEN (1<<5) +#define SIL164_8_HEN (1<<4) +#define SIL164_8_DSEL (1<<3) +#define SIL164_8_BSEL (1<<2) +#define SIL164_8_EDGE (1<<1) +#define SIL164_8_PD (1<<0) + +#define SIL164_REG9 0x09 +#define SIL164_9_VLOW (1<<7) +#define SIL164_9_MSEL_MASK (0x7<<4) +#define SIL164_9_TSEL (1<<3) +#define SIL164_9_RSEN (1<<2) +#define SIL164_9_HTPLG (1<<1) +#define SIL164_9_MDI (1<<0) + +#define SIL164_REGC 0x0c + +struct sil164_priv { + //I2CDevRec d; + bool quiet; +}; + +#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr)) + +static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) +{ + struct sil164_priv *sil = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[2]; + u8 in_buf[2]; + + struct i2c_msg msgs[] = { + { + .addr = dvo->slave_addr, + .flags = 0, + .len = 1, + .buf = out_buf, + }, + { + .addr = dvo->slave_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = in_buf, + } + }; + + out_buf[0] = addr; + out_buf[1] = 0; + + if (i2c_transfer(adapter, msgs, 2) == 2) { + *ch = in_buf[0]; + return true; + } + + if (!sil->quiet) { + DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", + addr, adapter->name, dvo->slave_addr); + } + return false; +} + +static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) +{ + struct sil164_priv *sil = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[2]; + struct i2c_msg msg = { + .addr = dvo->slave_addr, + .flags = 0, + .len = 2, + .buf = out_buf, + }; + + out_buf[0] = addr; + out_buf[1] = ch; + + if (i2c_transfer(adapter, &msg, 1) == 1) + return true; + + if (!sil->quiet) { + DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", + addr, adapter->name, dvo->slave_addr); + } + + return false; +} + +/* Silicon Image 164 driver for chip on i2c bus */ +static bool sil164_init(struct intel_dvo_device *dvo, + struct i2c_adapter *adapter) +{ + /* this will detect the SIL164 chip on the specified i2c bus */ + struct sil164_priv *sil; + unsigned char ch; + + sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL); + if (sil == NULL) + return false; + + dvo->i2c_bus = adapter; + dvo->dev_priv = sil; + sil->quiet = true; + + if (!sil164_readb(dvo, SIL164_VID_LO, &ch)) + goto out; + + if (ch != (SIL164_VID & 0xff)) { + DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n", + ch, adapter->name, dvo->slave_addr); + goto out; + } + + if (!sil164_readb(dvo, SIL164_DID_LO, &ch)) + goto out; + + if (ch != (SIL164_DID & 0xff)) { + DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n", + ch, adapter->name, dvo->slave_addr); + goto out; + } + sil->quiet = false; + + DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n"); + return true; + +out: + kfree(sil); + return false; +} + +static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo) +{ + u8 reg9; + + sil164_readb(dvo, SIL164_REG9, ®9); + + if (reg9 & SIL164_9_HTPLG) + return connector_status_connected; + else + return connector_status_disconnected; +} + +static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static void sil164_mode_set(struct intel_dvo_device *dvo, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + /* As long as the basics are set up, since we don't have clock + * dependencies in the mode setup, we can just leave the + * registers alone and everything will work fine. + */ + /* recommended programming sequence from doc */ + /*sil164_writeb(sil, 0x08, 0x30); + sil164_writeb(sil, 0x09, 0x00); + sil164_writeb(sil, 0x0a, 0x90); + sil164_writeb(sil, 0x0c, 0x89); + sil164_writeb(sil, 0x08, 0x31);*/ + /* don't do much */ + return; +} + +/* set the SIL164 power state */ +static void sil164_dpms(struct intel_dvo_device *dvo, bool enable) +{ + int ret; + unsigned char ch; + + ret = sil164_readb(dvo, SIL164_REG8, &ch); + if (ret == false) + return; + + if (enable) + ch |= SIL164_8_PD; + else + ch &= ~SIL164_8_PD; + + sil164_writeb(dvo, SIL164_REG8, ch); + return; +} + +static bool sil164_get_hw_state(struct intel_dvo_device *dvo) +{ + int ret; + unsigned char ch; + + ret = sil164_readb(dvo, SIL164_REG8, &ch); + if (ret == false) + return false; + + if (ch & SIL164_8_PD) + return true; + else + return false; +} + +static void sil164_dump_regs(struct intel_dvo_device *dvo) +{ + u8 val; + + sil164_readb(dvo, SIL164_FREQ_LO, &val); + DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val); + sil164_readb(dvo, SIL164_FREQ_HI, &val); + DRM_DEBUG_KMS("SIL164_FREQ_HI: 0x%02x\n", val); + sil164_readb(dvo, SIL164_REG8, &val); + DRM_DEBUG_KMS("SIL164_REG8: 0x%02x\n", val); + sil164_readb(dvo, SIL164_REG9, &val); + DRM_DEBUG_KMS("SIL164_REG9: 0x%02x\n", val); + sil164_readb(dvo, SIL164_REGC, &val); + DRM_DEBUG_KMS("SIL164_REGC: 0x%02x\n", val); +} + +static void sil164_destroy(struct intel_dvo_device *dvo) +{ + struct sil164_priv *sil = dvo->dev_priv; + + if (sil) { + kfree(sil); + dvo->dev_priv = NULL; + } +} + +const struct intel_dvo_dev_ops sil164_ops = { + .init = sil164_init, + .detect = sil164_detect, + .mode_valid = sil164_mode_valid, + .mode_set = sil164_mode_set, + .dpms = sil164_dpms, + .get_hw_state = sil164_get_hw_state, + .dump_regs = sil164_dump_regs, + .destroy = sil164_destroy, +}; diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c new file mode 100644 index 000000000000..623114ee73cd --- /dev/null +++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c @@ -0,0 +1,319 @@ +/* + * Copyright © 2007 Dave Mueller + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dave Mueller + * + */ + +#include "intel_drv.h" +#include "intel_dvo_dev.h" + +/* register definitions according to the TFP410 data sheet */ +#define TFP410_VID 0x014C +#define TFP410_DID 0x0410 + +#define TFP410_VID_LO 0x00 +#define TFP410_VID_HI 0x01 +#define TFP410_DID_LO 0x02 +#define TFP410_DID_HI 0x03 +#define TFP410_REV 0x04 + +#define TFP410_CTL_1 0x08 +#define TFP410_CTL_1_TDIS (1<<6) +#define TFP410_CTL_1_VEN (1<<5) +#define TFP410_CTL_1_HEN (1<<4) +#define TFP410_CTL_1_DSEL (1<<3) +#define TFP410_CTL_1_BSEL (1<<2) +#define TFP410_CTL_1_EDGE (1<<1) +#define TFP410_CTL_1_PD (1<<0) + +#define TFP410_CTL_2 0x09 +#define TFP410_CTL_2_VLOW (1<<7) +#define TFP410_CTL_2_MSEL_MASK (0x7<<4) +#define TFP410_CTL_2_MSEL (1<<4) +#define TFP410_CTL_2_TSEL (1<<3) +#define TFP410_CTL_2_RSEN (1<<2) +#define TFP410_CTL_2_HTPLG (1<<1) +#define TFP410_CTL_2_MDI (1<<0) + +#define TFP410_CTL_3 0x0A +#define TFP410_CTL_3_DK_MASK (0x7<<5) +#define TFP410_CTL_3_DK (1<<5) +#define TFP410_CTL_3_DKEN (1<<4) +#define TFP410_CTL_3_CTL_MASK (0x7<<1) +#define TFP410_CTL_3_CTL (1<<1) + +#define TFP410_USERCFG 0x0B + +#define TFP410_DE_DLY 0x32 + +#define TFP410_DE_CTL 0x33 +#define TFP410_DE_CTL_DEGEN (1<<6) +#define TFP410_DE_CTL_VSPOL (1<<5) +#define TFP410_DE_CTL_HSPOL (1<<4) +#define TFP410_DE_CTL_DEDLY8 (1<<0) + +#define TFP410_DE_TOP 0x34 + +#define TFP410_DE_CNT_LO 0x36 +#define TFP410_DE_CNT_HI 0x37 + +#define TFP410_DE_LIN_LO 0x38 +#define TFP410_DE_LIN_HI 0x39 + +#define TFP410_H_RES_LO 0x3A +#define TFP410_H_RES_HI 0x3B + +#define TFP410_V_RES_LO 0x3C +#define TFP410_V_RES_HI 0x3D + +struct tfp410_priv { + bool quiet; +}; + +static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) +{ + struct tfp410_priv *tfp = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[2]; + u8 in_buf[2]; + + struct i2c_msg msgs[] = { + { + .addr = dvo->slave_addr, + .flags = 0, + .len = 1, + .buf = out_buf, + }, + { + .addr = dvo->slave_addr, + .flags = I2C_M_RD, + .len = 1, + .buf = in_buf, + } + }; + + out_buf[0] = addr; + out_buf[1] = 0; + + if (i2c_transfer(adapter, msgs, 2) == 2) { + *ch = in_buf[0]; + return true; + } + + if (!tfp->quiet) { + DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", + addr, adapter->name, dvo->slave_addr); + } + return false; +} + +static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) +{ + struct tfp410_priv *tfp = dvo->dev_priv; + struct i2c_adapter *adapter = dvo->i2c_bus; + u8 out_buf[2]; + struct i2c_msg msg = { + .addr = dvo->slave_addr, + .flags = 0, + .len = 2, + .buf = out_buf, + }; + + out_buf[0] = addr; + out_buf[1] = ch; + + if (i2c_transfer(adapter, &msg, 1) == 1) + return true; + + if (!tfp->quiet) { + DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", + addr, adapter->name, dvo->slave_addr); + } + + return false; +} + +static int tfp410_getid(struct intel_dvo_device *dvo, int addr) +{ + u8 ch1, ch2; + + if (tfp410_readb(dvo, addr+0, &ch1) && + tfp410_readb(dvo, addr+1, &ch2)) + return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF); + + return -1; +} + +/* Ti TFP410 driver for chip on i2c bus */ +static bool tfp410_init(struct intel_dvo_device *dvo, + struct i2c_adapter *adapter) +{ + /* this will detect the tfp410 chip on the specified i2c bus */ + struct tfp410_priv *tfp; + int id; + + tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL); + if (tfp == NULL) + return false; + + dvo->i2c_bus = adapter; + dvo->dev_priv = tfp; + tfp->quiet = true; + + if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { + DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s " + "Slave %d.\n", + id, adapter->name, dvo->slave_addr); + goto out; + } + + if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { + DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s " + "Slave %d.\n", + id, adapter->name, dvo->slave_addr); + goto out; + } + tfp->quiet = false; + return true; +out: + kfree(tfp); + return false; +} + +static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo) +{ + enum drm_connector_status ret = connector_status_disconnected; + u8 ctl2; + + if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { + if (ctl2 & TFP410_CTL_2_RSEN) + ret = connector_status_connected; + else + ret = connector_status_disconnected; + } + + return ret; +} + +static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static void tfp410_mode_set(struct intel_dvo_device *dvo, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + /* As long as the basics are set up, since we don't have clock dependencies + * in the mode setup, we can just leave the registers alone and everything + * will work fine. + */ + /* don't do much */ + return; +} + +/* set the tfp410 power state */ +static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable) +{ + u8 ctl1; + + if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) + return; + + if (enable) + ctl1 |= TFP410_CTL_1_PD; + else + ctl1 &= ~TFP410_CTL_1_PD; + + tfp410_writeb(dvo, TFP410_CTL_1, ctl1); +} + +static bool tfp410_get_hw_state(struct intel_dvo_device *dvo) +{ + u8 ctl1; + + if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) + return false; + + if (ctl1 & TFP410_CTL_1_PD) + return true; + else + return false; +} + +static void tfp410_dump_regs(struct intel_dvo_device *dvo) +{ + u8 val, val2; + + tfp410_readb(dvo, TFP410_REV, &val); + DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_CTL_1, &val); + DRM_DEBUG_KMS("TFP410_CTL1: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_CTL_2, &val); + DRM_DEBUG_KMS("TFP410_CTL2: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_CTL_3, &val); + DRM_DEBUG_KMS("TFP410_CTL3: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_USERCFG, &val); + DRM_DEBUG_KMS("TFP410_USERCFG: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_DE_DLY, &val); + DRM_DEBUG_KMS("TFP410_DE_DLY: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_DE_CTL, &val); + DRM_DEBUG_KMS("TFP410_DE_CTL: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_DE_TOP, &val); + DRM_DEBUG_KMS("TFP410_DE_TOP: 0x%02X\n", val); + tfp410_readb(dvo, TFP410_DE_CNT_LO, &val); + tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2); + DRM_DEBUG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); + tfp410_readb(dvo, TFP410_DE_LIN_LO, &val); + tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2); + DRM_DEBUG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); + tfp410_readb(dvo, TFP410_H_RES_LO, &val); + tfp410_readb(dvo, TFP410_H_RES_HI, &val2); + DRM_DEBUG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val); + tfp410_readb(dvo, TFP410_V_RES_LO, &val); + tfp410_readb(dvo, TFP410_V_RES_HI, &val2); + DRM_DEBUG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); +} + +static void tfp410_destroy(struct intel_dvo_device *dvo) +{ + struct tfp410_priv *tfp = dvo->dev_priv; + + if (tfp) { + kfree(tfp); + dvo->dev_priv = NULL; + } +} + +const struct intel_dvo_dev_ops tfp410_ops = { + .init = tfp410_init, + .detect = tfp410_detect, + .mode_valid = tfp410_mode_valid, + .mode_set = tfp410_mode_set, + .dpms = tfp410_dpms, + .get_hw_state = tfp410_get_hw_state, + .dump_regs = tfp410_dump_regs, + .destroy = tfp410_destroy, +}; diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c new file mode 100644 index 000000000000..74448e6bf749 --- /dev/null +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -0,0 +1,1589 @@ +/* + * Copyright © 2018 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Madhav Chauhan + * Jani Nikula + */ + +#include +#include + +#include "intel_atomic.h" +#include "intel_combo_phy.h" +#include "intel_connector.h" +#include "intel_ddi.h" +#include "intel_dsi.h" +#include "intel_panel.h" + +static inline int header_credits_available(struct drm_i915_private *dev_priv, + enum transcoder dsi_trans) +{ + return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK) + >> FREE_HEADER_CREDIT_SHIFT; +} + +static inline int payload_credits_available(struct drm_i915_private *dev_priv, + enum transcoder dsi_trans) +{ + return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK) + >> FREE_PLOAD_CREDIT_SHIFT; +} + +static void wait_for_header_credits(struct drm_i915_private *dev_priv, + enum transcoder dsi_trans) +{ + if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >= + MAX_HEADER_CREDIT, 100)) + DRM_ERROR("DSI header credits not released\n"); +} + +static void wait_for_payload_credits(struct drm_i915_private *dev_priv, + enum transcoder dsi_trans) +{ + if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >= + MAX_PLOAD_CREDIT, 100)) + DRM_ERROR("DSI payload credits not released\n"); +} + +static enum transcoder dsi_port_to_transcoder(enum port port) +{ + if (port == PORT_A) + return TRANSCODER_DSI_0; + else + return TRANSCODER_DSI_1; +} + +static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct mipi_dsi_device *dsi; + enum port port; + enum transcoder dsi_trans; + int ret; + + /* wait for header/payload credits to be released */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + wait_for_header_credits(dev_priv, dsi_trans); + wait_for_payload_credits(dev_priv, dsi_trans); + } + + /* send nop DCS command */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi = intel_dsi->dsi_hosts[port]->device; + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + dsi->channel = 0; + ret = mipi_dsi_dcs_nop(dsi); + if (ret < 0) + DRM_ERROR("error sending DCS NOP command\n"); + } + + /* wait for header credits to be released */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + wait_for_header_credits(dev_priv, dsi_trans); + } + + /* wait for LP TX in progress bit to be cleared */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) & + LPTX_IN_PROGRESS), 20)) + DRM_ERROR("LPTX bit not cleared\n"); + } +} + +static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data, + u32 len) +{ + struct intel_dsi *intel_dsi = host->intel_dsi; + struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); + enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); + int free_credits; + int i, j; + + for (i = 0; i < len; i += 4) { + u32 tmp = 0; + + free_credits = payload_credits_available(dev_priv, dsi_trans); + if (free_credits < 1) { + DRM_ERROR("Payload credit not available\n"); + return false; + } + + for (j = 0; j < min_t(u32, len - i, 4); j++) + tmp |= *data++ << 8 * j; + + I915_WRITE(DSI_CMD_TXPYLD(dsi_trans), tmp); + } + + return true; +} + +static int dsi_send_pkt_hdr(struct intel_dsi_host *host, + struct mipi_dsi_packet pkt, bool enable_lpdt) +{ + struct intel_dsi *intel_dsi = host->intel_dsi; + struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); + enum transcoder dsi_trans = dsi_port_to_transcoder(host->port); + u32 tmp; + int free_credits; + + /* check if header credit available */ + free_credits = header_credits_available(dev_priv, dsi_trans); + if (free_credits < 1) { + DRM_ERROR("send pkt header failed, not enough hdr credits\n"); + return -1; + } + + tmp = I915_READ(DSI_CMD_TXHDR(dsi_trans)); + + if (pkt.payload) + tmp |= PAYLOAD_PRESENT; + else + tmp &= ~PAYLOAD_PRESENT; + + tmp &= ~VBLANK_FENCE; + + if (enable_lpdt) + tmp |= LP_DATA_TRANSFER; + + tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK); + tmp |= ((pkt.header[0] & VC_MASK) << VC_SHIFT); + tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT); + tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT); + tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT); + I915_WRITE(DSI_CMD_TXHDR(dsi_trans), tmp); + + return 0; +} + +static int dsi_send_pkt_payld(struct intel_dsi_host *host, + struct mipi_dsi_packet pkt) +{ + /* payload queue can accept *256 bytes*, check limit */ + if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) { + DRM_ERROR("payload size exceeds max queue limit\n"); + return -1; + } + + /* load data into command payload queue */ + if (!add_payld_to_queue(host, pkt.payload, + pkt.payload_length)) { + DRM_ERROR("adding payload to queue failed\n"); + return -1; + } + + return 0; +} + +static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + u32 tmp; + int lane; + + for_each_dsi_port(port, intel_dsi->ports) { + + /* + * Program voltage swing and pre-emphasis level values as per + * table in BSPEC under DDI buffer programing + */ + tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); + tmp |= SCALING_MODE_SEL(0x2); + tmp |= TAP2_DISABLE | TAP3_DISABLE; + tmp |= RTERM_SELECT(0x6); + I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp); + + tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port)); + tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); + tmp |= SCALING_MODE_SEL(0x2); + tmp |= TAP2_DISABLE | TAP3_DISABLE; + tmp |= RTERM_SELECT(0x6); + I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp); + + tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port)); + tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | + RCOMP_SCALAR_MASK); + tmp |= SWING_SEL_UPPER(0x2); + tmp |= SWING_SEL_LOWER(0x2); + tmp |= RCOMP_SCALAR(0x98); + I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp); + + tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port)); + tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | + RCOMP_SCALAR_MASK); + tmp |= SWING_SEL_UPPER(0x2); + tmp |= SWING_SEL_LOWER(0x2); + tmp |= RCOMP_SCALAR(0x98); + I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp); + + tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port)); + tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | + CURSOR_COEFF_MASK); + tmp |= POST_CURSOR_1(0x0); + tmp |= POST_CURSOR_2(0x0); + tmp |= CURSOR_COEFF(0x3f); + I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp); + + for (lane = 0; lane <= 3; lane++) { + /* Bspec: must not use GRP register for write */ + tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port)); + tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | + CURSOR_COEFF_MASK); + tmp |= POST_CURSOR_1(0x0); + tmp |= POST_CURSOR_2(0x0); + tmp |= CURSOR_COEFF(0x3f); + I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp); + } + } +} + +static void configure_dual_link_mode(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 dss_ctl1; + + dss_ctl1 = I915_READ(DSS_CTL1); + dss_ctl1 |= SPLITTER_ENABLE; + dss_ctl1 &= ~OVERLAP_PIXELS_MASK; + dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap); + + if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { + const struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + u32 dss_ctl2; + u16 hactive = adjusted_mode->crtc_hdisplay; + u16 dl_buffer_depth; + + dss_ctl1 &= ~DUAL_LINK_MODE_INTERLEAVE; + dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap; + + if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH) + DRM_ERROR("DL buffer depth exceed max value\n"); + + dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK; + dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); + dss_ctl2 = I915_READ(DSS_CTL2); + dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK; + dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); + I915_WRITE(DSS_CTL2, dss_ctl2); + } else { + /* Interleave */ + dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE; + } + + I915_WRITE(DSS_CTL1, dss_ctl1); +} + +static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + u32 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + u32 afe_clk_khz; /* 8X Clock */ + u32 esc_clk_div_m; + + afe_clk_khz = DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp, + intel_dsi->lane_count); + + esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK); + + for_each_dsi_port(port, intel_dsi->ports) { + I915_WRITE(ICL_DSI_ESC_CLK_DIV(port), + esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); + POSTING_READ(ICL_DSI_ESC_CLK_DIV(port)); + } + + for_each_dsi_port(port, intel_dsi->ports) { + I915_WRITE(ICL_DPHY_ESC_CLK_DIV(port), + esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); + POSTING_READ(ICL_DPHY_ESC_CLK_DIV(port)); + } +} + +static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, + struct intel_dsi *intel_dsi) +{ + enum port port; + + for_each_dsi_port(port, intel_dsi->ports) { + WARN_ON(intel_dsi->io_wakeref[port]); + intel_dsi->io_wakeref[port] = + intel_display_power_get(dev_priv, + port == PORT_A ? + POWER_DOMAIN_PORT_DDI_A_IO : + POWER_DOMAIN_PORT_DDI_B_IO); + } +} + +static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + u32 tmp; + + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_DSI_IO_MODECTL(port)); + tmp |= COMBO_PHY_MODE_DSI; + I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp); + } + + get_dsi_io_power_domains(dev_priv, intel_dsi); +} + +static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + + for_each_dsi_port(port, intel_dsi->ports) + intel_combo_phy_power_up_lanes(dev_priv, port, true, + intel_dsi->lane_count, false); +} + +static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + u32 tmp; + int lane; + + /* Step 4b(i) set loadgen select for transmit and aux lanes */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port)); + tmp &= ~LOADGEN_SELECT; + I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp); + for (lane = 0; lane <= 3; lane++) { + tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port)); + tmp &= ~LOADGEN_SELECT; + if (lane != 2) + tmp |= LOADGEN_SELECT; + I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp); + } + } + + /* Step 4b(ii) set latency optimization for transmit and aux lanes */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port)); + tmp &= ~FRC_LATENCY_OPTIM_MASK; + tmp |= FRC_LATENCY_OPTIM_VAL(0x5); + I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp); + tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port)); + tmp &= ~FRC_LATENCY_OPTIM_MASK; + tmp |= FRC_LATENCY_OPTIM_VAL(0x5); + I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp); + } + +} + +static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 tmp; + enum port port; + + /* clear common keeper enable bit */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port)); + tmp &= ~COMMON_KEEPER_EN; + I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp); + tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port)); + tmp &= ~COMMON_KEEPER_EN; + I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp); + } + + /* + * Set SUS Clock Config bitfield to 11b + * Note: loadgen select program is done + * as part of lane phy sequence configuration + */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_PORT_CL_DW5(port)); + tmp |= SUS_CLOCK_CONFIG; + I915_WRITE(ICL_PORT_CL_DW5(port), tmp); + } + + /* Clear training enable to change swing values */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + tmp &= ~TX_TRAINING_EN; + I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp); + tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port)); + tmp &= ~TX_TRAINING_EN; + I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp); + } + + /* Program swing and de-emphasis */ + dsi_program_swing_and_deemphasis(encoder); + + /* Set training enable to trigger update */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + tmp |= TX_TRAINING_EN; + I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp); + tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port)); + tmp |= TX_TRAINING_EN; + I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp); + } +} + +static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 tmp; + enum port port; + + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(DDI_BUF_CTL(port)); + tmp |= DDI_BUF_CTL_ENABLE; + I915_WRITE(DDI_BUF_CTL(port), tmp); + + if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) & + DDI_BUF_IS_IDLE), + 500)) + DRM_ERROR("DDI port:%c buffer idle\n", port_name(port)); + } +} + +static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 tmp; + enum port port; + + /* Program T-INIT master registers */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port)); + tmp &= ~MASTER_INIT_TIMER_MASK; + tmp |= intel_dsi->init_count; + I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp); + } + + /* Program DPHY clock lanes timings */ + for_each_dsi_port(port, intel_dsi->ports) { + I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); + + /* shadow register inside display core */ + I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); + } + + /* Program DPHY data lanes timings */ + for_each_dsi_port(port, intel_dsi->ports) { + I915_WRITE(DPHY_DATA_TIMING_PARAM(port), + intel_dsi->dphy_data_lane_reg); + + /* shadow register inside display core */ + I915_WRITE(DSI_DATA_TIMING_PARAM(port), + intel_dsi->dphy_data_lane_reg); + } + + /* + * If DSI link operating at or below an 800 MHz, + * TA_SURE should be override and programmed to + * a value '0' inside TA_PARAM_REGISTERS otherwise + * leave all fields at HW default values. + */ + if (intel_dsi_bitrate(intel_dsi) <= 800000) { + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(DPHY_TA_TIMING_PARAM(port)); + tmp &= ~TA_SURE_MASK; + tmp |= TA_SURE_OVERRIDE | TA_SURE(0); + I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp); + + /* shadow register inside display core */ + tmp = I915_READ(DSI_TA_TIMING_PARAM(port)); + tmp &= ~TA_SURE_MASK; + tmp |= TA_SURE_OVERRIDE | TA_SURE(0); + I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp); + } + } +} + +static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 tmp; + enum port port; + + mutex_lock(&dev_priv->dpll_lock); + tmp = I915_READ(DPCLKA_CFGCR0_ICL); + for_each_dsi_port(port, intel_dsi->ports) { + tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port); + } + + I915_WRITE(DPCLKA_CFGCR0_ICL, tmp); + mutex_unlock(&dev_priv->dpll_lock); +} + +static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 tmp; + enum port port; + + mutex_lock(&dev_priv->dpll_lock); + tmp = I915_READ(DPCLKA_CFGCR0_ICL); + for_each_dsi_port(port, intel_dsi->ports) { + tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); + } + + I915_WRITE(DPCLKA_CFGCR0_ICL, tmp); + mutex_unlock(&dev_priv->dpll_lock); +} + +static void gen11_dsi_map_pll(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_shared_dpll *pll = crtc_state->shared_dpll; + enum port port; + u32 val; + + mutex_lock(&dev_priv->dpll_lock); + + val = I915_READ(DPCLKA_CFGCR0_ICL); + for_each_dsi_port(port, intel_dsi->ports) { + val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); + val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); + } + I915_WRITE(DPCLKA_CFGCR0_ICL, val); + + for_each_dsi_port(port, intel_dsi->ports) { + val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); + } + I915_WRITE(DPCLKA_CFGCR0_ICL, val); + + POSTING_READ(DPCLKA_CFGCR0_ICL); + + mutex_unlock(&dev_priv->dpll_lock); +} + +static void +gen11_dsi_configure_transcoder(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); + enum pipe pipe = intel_crtc->pipe; + u32 tmp; + enum port port; + enum transcoder dsi_trans; + + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)); + + if (intel_dsi->eotp_pkt) + tmp &= ~EOTP_DISABLED; + else + tmp |= EOTP_DISABLED; + + /* enable link calibration if freq > 1.5Gbps */ + if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) { + tmp &= ~LINK_CALIBRATION_MASK; + tmp |= CALIBRATION_ENABLED_INITIAL_ONLY; + } + + /* configure continuous clock */ + tmp &= ~CONTINUOUS_CLK_MASK; + if (intel_dsi->clock_stop) + tmp |= CLK_ENTER_LP_AFTER_DATA; + else + tmp |= CLK_HS_CONTINUOUS; + + /* configure buffer threshold limit to minimum */ + tmp &= ~PIX_BUF_THRESHOLD_MASK; + tmp |= PIX_BUF_THRESHOLD_1_4; + + /* set virtual channel to '0' */ + tmp &= ~PIX_VIRT_CHAN_MASK; + tmp |= PIX_VIRT_CHAN(0); + + /* program BGR transmission */ + if (intel_dsi->bgr_enabled) + tmp |= BGR_TRANSMISSION; + + /* select pixel format */ + tmp &= ~PIX_FMT_MASK; + switch (intel_dsi->pixel_format) { + default: + MISSING_CASE(intel_dsi->pixel_format); + /* fallthrough */ + case MIPI_DSI_FMT_RGB565: + tmp |= PIX_FMT_RGB565; + break; + case MIPI_DSI_FMT_RGB666_PACKED: + tmp |= PIX_FMT_RGB666_PACKED; + break; + case MIPI_DSI_FMT_RGB666: + tmp |= PIX_FMT_RGB666_LOOSE; + break; + case MIPI_DSI_FMT_RGB888: + tmp |= PIX_FMT_RGB888; + break; + } + + /* program DSI operation mode */ + if (is_vid_mode(intel_dsi)) { + tmp &= ~OP_MODE_MASK; + switch (intel_dsi->video_mode_format) { + default: + MISSING_CASE(intel_dsi->video_mode_format); + /* fallthrough */ + case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS: + tmp |= VIDEO_MODE_SYNC_EVENT; + break; + case VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE: + tmp |= VIDEO_MODE_SYNC_PULSE; + break; + } + } + + I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp); + } + + /* enable port sync mode if dual link */ + if (intel_dsi->dual_link) { + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans)); + tmp |= PORT_SYNC_MODE_ENABLE; + I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); + } + + /* configure stream splitting */ + configure_dual_link_mode(encoder, pipe_config); + } + + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + + /* select data lane width */ + tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans)); + tmp &= ~DDI_PORT_WIDTH_MASK; + tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count); + + /* select input pipe */ + tmp &= ~TRANS_DDI_EDP_INPUT_MASK; + switch (pipe) { + default: + MISSING_CASE(pipe); + /* fallthrough */ + case PIPE_A: + tmp |= TRANS_DDI_EDP_INPUT_A_ON; + break; + case PIPE_B: + tmp |= TRANS_DDI_EDP_INPUT_B_ONOFF; + break; + case PIPE_C: + tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF; + break; + } + + /* enable DDI buffer */ + tmp |= TRANS_DDI_FUNC_ENABLE; + I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp); + } + + /* wait for link ready */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) & + LINK_READY), 2500)) + DRM_ERROR("DSI link not ready\n"); + } +} + +static void +gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + const struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + enum port port; + enum transcoder dsi_trans; + /* horizontal timings */ + u16 htotal, hactive, hsync_start, hsync_end, hsync_size; + u16 hfront_porch, hback_porch; + /* vertical timings */ + u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift; + + hactive = adjusted_mode->crtc_hdisplay; + htotal = adjusted_mode->crtc_htotal; + hsync_start = adjusted_mode->crtc_hsync_start; + hsync_end = adjusted_mode->crtc_hsync_end; + hsync_size = hsync_end - hsync_start; + hfront_porch = (adjusted_mode->crtc_hsync_start - + adjusted_mode->crtc_hdisplay); + hback_porch = (adjusted_mode->crtc_htotal - + adjusted_mode->crtc_hsync_end); + vactive = adjusted_mode->crtc_vdisplay; + vtotal = adjusted_mode->crtc_vtotal; + vsync_start = adjusted_mode->crtc_vsync_start; + vsync_end = adjusted_mode->crtc_vsync_end; + vsync_shift = hsync_start - htotal / 2; + + if (intel_dsi->dual_link) { + hactive /= 2; + if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) + hactive += intel_dsi->pixel_overlap; + htotal /= 2; + } + + /* minimum hactive as per bspec: 256 pixels */ + if (adjusted_mode->crtc_hdisplay < 256) + DRM_ERROR("hactive is less then 256 pixels\n"); + + /* if RGB666 format, then hactive must be multiple of 4 pixels */ + if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0) + DRM_ERROR("hactive pixels are not multiple of 4\n"); + + /* program TRANS_HTOTAL register */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + I915_WRITE(HTOTAL(dsi_trans), + (hactive - 1) | ((htotal - 1) << 16)); + } + + /* TRANS_HSYNC register to be programmed only for video mode */ + if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) { + if (intel_dsi->video_mode_format == + VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) { + /* BSPEC: hsync size should be atleast 16 pixels */ + if (hsync_size < 16) + DRM_ERROR("hsync size < 16 pixels\n"); + } + + if (hback_porch < 16) + DRM_ERROR("hback porch < 16 pixels\n"); + + if (intel_dsi->dual_link) { + hsync_start /= 2; + hsync_end /= 2; + } + + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + I915_WRITE(HSYNC(dsi_trans), + (hsync_start - 1) | ((hsync_end - 1) << 16)); + } + } + + /* program TRANS_VTOTAL register */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + /* + * FIXME: Programing this by assuming progressive mode, since + * non-interlaced info from VBT is not saved inside + * struct drm_display_mode. + * For interlace mode: program required pixel minus 2 + */ + I915_WRITE(VTOTAL(dsi_trans), + (vactive - 1) | ((vtotal - 1) << 16)); + } + + if (vsync_end < vsync_start || vsync_end > vtotal) + DRM_ERROR("Invalid vsync_end value\n"); + + if (vsync_start < vactive) + DRM_ERROR("vsync_start less than vactive\n"); + + /* program TRANS_VSYNC register */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + I915_WRITE(VSYNC(dsi_trans), + (vsync_start - 1) | ((vsync_end - 1) << 16)); + } + + /* + * FIXME: It has to be programmed only for interlaced + * modes. Put the check condition here once interlaced + * info available as described above. + * program TRANS_VSYNCSHIFT register + */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift); + } +} + +static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + enum transcoder dsi_trans; + u32 tmp; + + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + tmp = I915_READ(PIPECONF(dsi_trans)); + tmp |= PIPECONF_ENABLE; + I915_WRITE(PIPECONF(dsi_trans), tmp); + + /* wait for transcoder to be enabled */ + if (intel_wait_for_register(&dev_priv->uncore, + PIPECONF(dsi_trans), + I965_PIPECONF_ACTIVE, + I965_PIPECONF_ACTIVE, 10)) + DRM_ERROR("DSI transcoder not enabled\n"); + } +} + +static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + enum transcoder dsi_trans; + u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul; + + /* + * escape clock count calculation: + * BYTE_CLK_COUNT = TIME_NS/(8 * UI) + * UI (nsec) = (10^6)/Bitrate + * TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate + * ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS + */ + divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000; + mul = 8 * 1000000; + hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul, + divisor); + lp_rx_timeout = DIV_ROUND_UP(intel_dsi->lp_rx_timeout * mul, divisor); + ta_timeout = DIV_ROUND_UP(intel_dsi->turn_arnd_val * mul, divisor); + + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + + /* program hst_tx_timeout */ + tmp = I915_READ(DSI_HSTX_TO(dsi_trans)); + tmp &= ~HSTX_TIMEOUT_VALUE_MASK; + tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout); + I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp); + + /* FIXME: DSI_CALIB_TO */ + + /* program lp_rx_host timeout */ + tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans)); + tmp &= ~LPRX_TIMEOUT_VALUE_MASK; + tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout); + I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp); + + /* FIXME: DSI_PWAIT_TO */ + + /* program turn around timeout */ + tmp = I915_READ(DSI_TA_TO(dsi_trans)); + tmp &= ~TA_TIMEOUT_VALUE_MASK; + tmp |= TA_TIMEOUT_VALUE(ta_timeout); + I915_WRITE(DSI_TA_TO(dsi_trans), tmp); + } +} + +static void +gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config) +{ + /* step 4a: power up all lanes of the DDI used by DSI */ + gen11_dsi_power_up_lanes(encoder); + + /* step 4b: configure lane sequencing of the Combo-PHY transmitters */ + gen11_dsi_config_phy_lanes_sequence(encoder); + + /* step 4c: configure voltage swing and skew */ + gen11_dsi_voltage_swing_program_seq(encoder); + + /* enable DDI buffer */ + gen11_dsi_enable_ddi_buffer(encoder); + + /* setup D-PHY timings */ + gen11_dsi_setup_dphy_timings(encoder); + + /* step 4h: setup DSI protocol timeouts */ + gen11_dsi_setup_timeouts(encoder); + + /* Step (4h, 4i, 4j, 4k): Configure transcoder */ + gen11_dsi_configure_transcoder(encoder, pipe_config); + + /* Step 4l: Gate DDI clocks */ + gen11_dsi_gate_clocks(encoder); +} + +static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct mipi_dsi_device *dsi; + enum port port; + enum transcoder dsi_trans; + u32 tmp; + int ret; + + /* set maximum return packet size */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + + /* + * FIXME: This uses the number of DW's currently in the payload + * receive queue. This is probably not what we want here. + */ + tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans)); + tmp &= NUMBER_RX_PLOAD_DW_MASK; + /* multiply "Number Rx Payload DW" by 4 to get max value */ + tmp = tmp * 4; + dsi = intel_dsi->dsi_hosts[port]->device; + ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp); + if (ret < 0) + DRM_ERROR("error setting max return pkt size%d\n", tmp); + } + + /* panel power on related mipi dsi vbt sequences */ + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON); + intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON); + + /* ensure all panel commands dispatched before enabling transcoder */ + wait_for_cmds_dispatched_to_panel(encoder); +} + +static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + /* step2: enable IO power */ + gen11_dsi_enable_io_power(encoder); + + /* step3: enable DSI PLL */ + gen11_dsi_program_esc_clk_div(encoder); +} + +static void gen11_dsi_pre_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + + /* step3b */ + gen11_dsi_map_pll(encoder, pipe_config); + + /* step4: enable DSI port and DPHY */ + gen11_dsi_enable_port_and_phy(encoder, pipe_config); + + /* step5: program and powerup panel */ + gen11_dsi_powerup_panel(encoder); + + /* step6c: configure transcoder timings */ + gen11_dsi_set_transcoder_timings(encoder, pipe_config); + + /* step6d: enable dsi transcoder */ + gen11_dsi_enable_transcoder(encoder); + + /* step7: enable backlight */ + intel_panel_enable_backlight(pipe_config, conn_state); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON); +} + +static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + enum transcoder dsi_trans; + u32 tmp; + + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + + /* disable transcoder */ + tmp = I915_READ(PIPECONF(dsi_trans)); + tmp &= ~PIPECONF_ENABLE; + I915_WRITE(PIPECONF(dsi_trans), tmp); + + /* wait for transcoder to be disabled */ + if (intel_wait_for_register(&dev_priv->uncore, + PIPECONF(dsi_trans), + I965_PIPECONF_ACTIVE, 0, 50)) + DRM_ERROR("DSI trancoder not disabled\n"); + } +} + +static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder) +{ + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF); + + /* ensure cmds dispatched to panel */ + wait_for_cmds_dispatched_to_panel(encoder); +} + +static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + enum transcoder dsi_trans; + u32 tmp; + + /* put dsi link in ULPS */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + tmp = I915_READ(DSI_LP_MSG(dsi_trans)); + tmp |= LINK_ENTER_ULPS; + tmp &= ~LINK_ULPS_TYPE_LP11; + I915_WRITE(DSI_LP_MSG(dsi_trans), tmp); + + if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) & + LINK_IN_ULPS), + 10)) + DRM_ERROR("DSI link not in ULPS\n"); + } + + /* disable ddi function */ + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans)); + tmp &= ~TRANS_DDI_FUNC_ENABLE; + I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp); + } + + /* disable port sync mode if dual link */ + if (intel_dsi->dual_link) { + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans)); + tmp &= ~PORT_SYNC_MODE_ENABLE; + I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); + } + } +} + +static void gen11_dsi_disable_port(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + u32 tmp; + enum port port; + + gen11_dsi_ungate_clocks(encoder); + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(DDI_BUF_CTL(port)); + tmp &= ~DDI_BUF_CTL_ENABLE; + I915_WRITE(DDI_BUF_CTL(port), tmp); + + if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) & + DDI_BUF_IS_IDLE), + 8)) + DRM_ERROR("DDI port:%c buffer not idle\n", + port_name(port)); + } + gen11_dsi_gate_clocks(encoder); +} + +static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + u32 tmp; + + for_each_dsi_port(port, intel_dsi->ports) { + intel_wakeref_t wakeref; + + wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]); + intel_display_power_put(dev_priv, + port == PORT_A ? + POWER_DOMAIN_PORT_DDI_A_IO : + POWER_DOMAIN_PORT_DDI_B_IO, + wakeref); + } + + /* set mode to DDI */ + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_DSI_IO_MODECTL(port)); + tmp &= ~COMBO_PHY_MODE_DSI; + I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp); + } +} + +static void gen11_dsi_disable(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + + /* step1: turn off backlight */ + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); + intel_panel_disable_backlight(old_conn_state); + + /* step2d,e: disable transcoder and wait */ + gen11_dsi_disable_transcoder(encoder); + + /* step2f,g: powerdown panel */ + gen11_dsi_powerdown_panel(encoder); + + /* step2h,i,j: deconfig trancoder */ + gen11_dsi_deconfigure_trancoder(encoder); + + /* step3: disable port */ + gen11_dsi_disable_port(encoder); + + /* step4: disable IO power */ + gen11_dsi_disable_io_power(encoder); +} + +static void gen11_dsi_get_timings(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + + if (intel_dsi->dual_link) { + adjusted_mode->crtc_hdisplay *= 2; + if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) + adjusted_mode->crtc_hdisplay -= + intel_dsi->pixel_overlap; + adjusted_mode->crtc_htotal *= 2; + } + adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay; + adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal; + + if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) { + if (intel_dsi->dual_link) { + adjusted_mode->crtc_hsync_start *= 2; + adjusted_mode->crtc_hsync_end *= 2; + } + } + adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay; + adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal; +} + +static void gen11_dsi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + + /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */ + pipe_config->port_clock = + cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state); + + pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk; + if (intel_dsi->dual_link) + pipe_config->base.adjusted_mode.crtc_clock *= 2; + + gen11_dsi_get_timings(encoder, pipe_config); + pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); + pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc); +} + +static int gen11_dsi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) +{ + struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, + base); + struct intel_connector *intel_connector = intel_dsi->attached_connector; + struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + const struct drm_display_mode *fixed_mode = + intel_connector->panel.fixed_mode; + struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + intel_fixed_panel_mode(fixed_mode, adjusted_mode); + intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode); + + adjusted_mode->flags = 0; + + /* Dual link goes to trancoder DSI'0' */ + if (intel_dsi->ports == BIT(PORT_B)) + pipe_config->cpu_transcoder = TRANSCODER_DSI_1; + else + pipe_config->cpu_transcoder = TRANSCODER_DSI_0; + + pipe_config->clock_set = true; + pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5; + + return 0; +} + +static void gen11_dsi_get_power_domains(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + get_dsi_io_power_domains(to_i915(encoder->base.dev), + enc_to_intel_dsi(&encoder->base)); +} + +static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum transcoder dsi_trans; + intel_wakeref_t wakeref; + enum port port; + bool ret = false; + u32 tmp; + + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) + return false; + + for_each_dsi_port(port, intel_dsi->ports) { + dsi_trans = dsi_port_to_transcoder(port); + tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans)); + switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { + case TRANS_DDI_EDP_INPUT_A_ON: + *pipe = PIPE_A; + break; + case TRANS_DDI_EDP_INPUT_B_ONOFF: + *pipe = PIPE_B; + break; + case TRANS_DDI_EDP_INPUT_C_ONOFF: + *pipe = PIPE_C; + break; + default: + DRM_ERROR("Invalid PIPE input\n"); + goto out; + } + + tmp = I915_READ(PIPECONF(dsi_trans)); + ret = tmp & PIPECONF_ENABLE; + } +out: + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); + return ret; +} + +static void gen11_dsi_encoder_destroy(struct drm_encoder *encoder) +{ + intel_encoder_destroy(encoder); +} + +static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = { + .destroy = gen11_dsi_encoder_destroy, +}; + +static const struct drm_connector_funcs gen11_dsi_connector_funcs = { + .late_register = intel_connector_register, + .early_unregister = intel_connector_unregister, + .destroy = intel_connector_destroy, + .fill_modes = drm_helper_probe_single_connector_modes, + .atomic_get_property = intel_digital_connector_atomic_get_property, + .atomic_set_property = intel_digital_connector_atomic_set_property, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = intel_digital_connector_duplicate_state, +}; + +static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = { + .get_modes = intel_dsi_get_modes, + .mode_valid = intel_dsi_mode_valid, + .atomic_check = intel_digital_connector_atomic_check, +}; + +static int gen11_dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi) +{ + return 0; +} + +static int gen11_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi) +{ + return 0; +} + +static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host); + struct mipi_dsi_packet dsi_pkt; + ssize_t ret; + bool enable_lpdt = false; + + ret = mipi_dsi_create_packet(&dsi_pkt, msg); + if (ret < 0) + return ret; + + if (msg->flags & MIPI_DSI_MSG_USE_LPM) + enable_lpdt = true; + + /* send packet header */ + ret = dsi_send_pkt_hdr(intel_dsi_host, dsi_pkt, enable_lpdt); + if (ret < 0) + return ret; + + /* only long packet contains payload */ + if (mipi_dsi_packet_format_is_long(msg->type)) { + ret = dsi_send_pkt_payld(intel_dsi_host, dsi_pkt); + if (ret < 0) + return ret; + } + + //TODO: add payload receive code if needed + + ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length; + + return ret; +} + +static const struct mipi_dsi_host_ops gen11_dsi_host_ops = { + .attach = gen11_dsi_host_attach, + .detach = gen11_dsi_host_detach, + .transfer = gen11_dsi_host_transfer, +}; + +#define ICL_PREPARE_CNT_MAX 0x7 +#define ICL_CLK_ZERO_CNT_MAX 0xf +#define ICL_TRAIL_CNT_MAX 0x7 +#define ICL_TCLK_PRE_CNT_MAX 0x3 +#define ICL_TCLK_POST_CNT_MAX 0x7 +#define ICL_HS_ZERO_CNT_MAX 0xf +#define ICL_EXIT_ZERO_CNT_MAX 0x7 + +static void icl_dphy_param_init(struct intel_dsi *intel_dsi) +{ + struct drm_device *dev = intel_dsi->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; + u32 tlpx_ns; + u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; + u32 ths_prepare_ns, tclk_trail_ns; + u32 hs_zero_cnt; + u32 tclk_pre_cnt, tclk_post_cnt; + + tlpx_ns = intel_dsi_tlpx_ns(intel_dsi); + + tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail); + ths_prepare_ns = max(mipi_config->ths_prepare, + mipi_config->tclk_prepare); + + /* + * prepare cnt in escape clocks + * this field represents a hexadecimal value with a precision + * of 1.2 – i.e. the most significant bit is the integer + * and the least significant 2 bits are fraction bits. + * so, the field can represent a range of 0.25 to 1.75 + */ + prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns); + if (prepare_cnt > ICL_PREPARE_CNT_MAX) { + DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt); + prepare_cnt = ICL_PREPARE_CNT_MAX; + } + + /* clk zero count in escape clocks */ + clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero - + ths_prepare_ns, tlpx_ns); + if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) { + DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt); + clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX; + } + + /* trail cnt in escape clocks*/ + trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns); + if (trail_cnt > ICL_TRAIL_CNT_MAX) { + DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt); + trail_cnt = ICL_TRAIL_CNT_MAX; + } + + /* tclk pre count in escape clocks */ + tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns); + if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) { + DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt); + tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX; + } + + /* tclk post count in escape clocks */ + tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns); + if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) { + DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt); + tclk_post_cnt = ICL_TCLK_POST_CNT_MAX; + } + + /* hs zero cnt in escape clocks */ + hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero - + ths_prepare_ns, tlpx_ns); + if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) { + DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt); + hs_zero_cnt = ICL_HS_ZERO_CNT_MAX; + } + + /* hs exit zero cnt in escape clocks */ + exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns); + if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) { + DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt); + exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX; + } + + /* clock lane dphy timings */ + intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE | + CLK_PREPARE(prepare_cnt) | + CLK_ZERO_OVERRIDE | + CLK_ZERO(clk_zero_cnt) | + CLK_PRE_OVERRIDE | + CLK_PRE(tclk_pre_cnt) | + CLK_POST_OVERRIDE | + CLK_POST(tclk_post_cnt) | + CLK_TRAIL_OVERRIDE | + CLK_TRAIL(trail_cnt)); + + /* data lanes dphy timings */ + intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE | + HS_PREPARE(prepare_cnt) | + HS_ZERO_OVERRIDE | + HS_ZERO(hs_zero_cnt) | + HS_TRAIL_OVERRIDE | + HS_TRAIL(trail_cnt) | + HS_EXIT_OVERRIDE | + HS_EXIT(exit_zero_cnt)); + + intel_dsi_log_params(intel_dsi); +} + +void icl_dsi_init(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = &dev_priv->drm; + struct intel_dsi *intel_dsi; + struct intel_encoder *encoder; + struct intel_connector *intel_connector; + struct drm_connector *connector; + struct drm_display_mode *fixed_mode; + enum port port; + + if (!intel_bios_is_dsi_present(dev_priv, &port)) + return; + + intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); + if (!intel_dsi) + return; + + intel_connector = intel_connector_alloc(); + if (!intel_connector) { + kfree(intel_dsi); + return; + } + + encoder = &intel_dsi->base; + intel_dsi->attached_connector = intel_connector; + connector = &intel_connector->base; + + /* register DSI encoder with DRM subsystem */ + drm_encoder_init(dev, &encoder->base, &gen11_dsi_encoder_funcs, + DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port)); + + encoder->pre_pll_enable = gen11_dsi_pre_pll_enable; + encoder->pre_enable = gen11_dsi_pre_enable; + encoder->disable = gen11_dsi_disable; + encoder->port = port; + encoder->get_config = gen11_dsi_get_config; + encoder->update_pipe = intel_panel_update_backlight; + encoder->compute_config = gen11_dsi_compute_config; + encoder->get_hw_state = gen11_dsi_get_hw_state; + encoder->type = INTEL_OUTPUT_DSI; + encoder->cloneable = 0; + encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); + encoder->power_domain = POWER_DOMAIN_PORT_DSI; + encoder->get_power_domains = gen11_dsi_get_power_domains; + + /* register DSI connector with DRM subsystem */ + drm_connector_init(dev, connector, &gen11_dsi_connector_funcs, + DRM_MODE_CONNECTOR_DSI); + drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs); + connector->display_info.subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + intel_connector->get_hw_state = intel_connector_get_hw_state; + + /* attach connector to encoder */ + intel_connector_attach_encoder(intel_connector, encoder); + + mutex_lock(&dev->mode_config.mutex); + fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); + mutex_unlock(&dev->mode_config.mutex); + + if (!fixed_mode) { + DRM_ERROR("DSI fixed mode info missing\n"); + goto err; + } + + intel_panel_init(&intel_connector->panel, fixed_mode, NULL); + intel_panel_setup_backlight(connector, INVALID_PIPE); + + if (dev_priv->vbt.dsi.config->dual_link) + intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B); + else + intel_dsi->ports = BIT(port); + + intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports; + intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports; + + for_each_dsi_port(port, intel_dsi->ports) { + struct intel_dsi_host *host; + + host = intel_dsi_host_init(intel_dsi, &gen11_dsi_host_ops, port); + if (!host) + goto err; + + intel_dsi->dsi_hosts[port] = host; + } + + if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) { + DRM_DEBUG_KMS("no device found\n"); + goto err; + } + + icl_dphy_param_init(intel_dsi); + return; + +err: + drm_encoder_cleanup(&encoder->base); + kfree(intel_dsi); + kfree(intel_connector); +} diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c new file mode 100644 index 000000000000..3fcf2f84bcce --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -0,0 +1,1069 @@ +/* + * Copyright © 2006-2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "i915_drv.h" +#include "intel_connector.h" +#include "intel_crt.h" +#include "intel_ddi.h" +#include "intel_drv.h" +#include "intel_fifo_underrun.h" +#include "intel_gmbus.h" +#include "intel_hotplug.h" + +/* Here's the desired hotplug mode */ +#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \ + ADPA_CRT_HOTPLUG_WARMUP_10MS | \ + ADPA_CRT_HOTPLUG_SAMPLE_4S | \ + ADPA_CRT_HOTPLUG_VOLTAGE_50 | \ + ADPA_CRT_HOTPLUG_VOLREF_325MV | \ + ADPA_CRT_HOTPLUG_ENABLE) + +struct intel_crt { + struct intel_encoder base; + /* DPMS state is stored in the connector, which we need in the + * encoder's enable/disable callbacks */ + struct intel_connector *connector; + bool force_hotplug_required; + i915_reg_t adpa_reg; +}; + +static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) +{ + return container_of(encoder, struct intel_crt, base); +} + +static struct intel_crt *intel_attached_crt(struct drm_connector *connector) +{ + return intel_encoder_to_crt(intel_attached_encoder(connector)); +} + +bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t adpa_reg, enum pipe *pipe) +{ + u32 val; + + val = I915_READ(adpa_reg); + + /* asserts want to know the pipe even if the port is disabled */ + if (HAS_PCH_CPT(dev_priv)) + *pipe = (val & ADPA_PIPE_SEL_MASK_CPT) >> ADPA_PIPE_SEL_SHIFT_CPT; + else + *pipe = (val & ADPA_PIPE_SEL_MASK) >> ADPA_PIPE_SEL_SHIFT; + + return val & ADPA_DAC_ENABLE; +} + +static bool intel_crt_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crt *crt = intel_encoder_to_crt(encoder); + intel_wakeref_t wakeref; + bool ret; + + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) + return false; + + ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe); + + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); + + return ret; +} + +static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crt *crt = intel_encoder_to_crt(encoder); + u32 tmp, flags = 0; + + tmp = I915_READ(crt->adpa_reg); + + if (tmp & ADPA_HSYNC_ACTIVE_HIGH) + flags |= DRM_MODE_FLAG_PHSYNC; + else + flags |= DRM_MODE_FLAG_NHSYNC; + + if (tmp & ADPA_VSYNC_ACTIVE_HIGH) + flags |= DRM_MODE_FLAG_PVSYNC; + else + flags |= DRM_MODE_FLAG_NVSYNC; + + return flags; +} + +static void intel_crt_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG); + + pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); + + pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; +} + +static void hsw_crt_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + intel_ddi_get_config(encoder, pipe_config); + + pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC | + DRM_MODE_FLAG_NHSYNC | + DRM_MODE_FLAG_PVSYNC | + DRM_MODE_FLAG_NVSYNC); + pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); + + pipe_config->base.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); +} + +/* Note: The caller is required to filter out dpms modes not supported by the + * platform. */ +static void intel_crt_set_dpms(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + int mode) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crt *crt = intel_encoder_to_crt(encoder); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; + u32 adpa; + + if (INTEL_GEN(dev_priv) >= 5) + adpa = ADPA_HOTPLUG_BITS; + else + adpa = 0; + + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + adpa |= ADPA_HSYNC_ACTIVE_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + adpa |= ADPA_VSYNC_ACTIVE_HIGH; + + /* For CPT allow 3 pipe config, for others just use A or B */ + if (HAS_PCH_LPT(dev_priv)) + ; /* Those bits don't exist here */ + else if (HAS_PCH_CPT(dev_priv)) + adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe); + else + adpa |= ADPA_PIPE_SEL(crtc->pipe); + + if (!HAS_PCH_SPLIT(dev_priv)) + I915_WRITE(BCLRPAT(crtc->pipe), 0); + + switch (mode) { + case DRM_MODE_DPMS_ON: + adpa |= ADPA_DAC_ENABLE; + break; + case DRM_MODE_DPMS_STANDBY: + adpa |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE; + break; + case DRM_MODE_DPMS_SUSPEND: + adpa |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE; + break; + case DRM_MODE_DPMS_OFF: + adpa |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; + break; + } + + I915_WRITE(crt->adpa_reg, adpa); +} + +static void intel_disable_crt(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF); +} + +static void pch_disable_crt(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ +} + +static void pch_post_disable_crt(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + intel_disable_crt(encoder, old_crtc_state, old_conn_state); +} + +static void hsw_disable_crt(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + WARN_ON(!old_crtc_state->has_pch_encoder); + + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); +} + +static void hsw_post_disable_crt(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + intel_ddi_disable_pipe_clock(old_crtc_state); + + pch_post_disable_crt(encoder, old_crtc_state, old_conn_state); + + lpt_disable_pch_transcoder(dev_priv); + lpt_disable_iclkip(dev_priv); + + intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state); + + WARN_ON(!old_crtc_state->has_pch_encoder); + + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); +} + +static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + WARN_ON(!crtc_state->has_pch_encoder); + + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); +} + +static void hsw_pre_enable_crt(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + enum pipe pipe = crtc->pipe; + + WARN_ON(!crtc_state->has_pch_encoder); + + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); + + dev_priv->display.fdi_link_train(crtc, crtc_state); + + intel_ddi_enable_pipe_clock(crtc_state); +} + +static void hsw_enable_crt(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + enum pipe pipe = crtc->pipe; + + WARN_ON(!crtc_state->has_pch_encoder); + + intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON); + + intel_wait_for_vblank(dev_priv, pipe); + intel_wait_for_vblank(dev_priv, pipe); + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); +} + +static void intel_enable_crt(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON); +} + +static enum drm_mode_status +intel_crt_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + int max_dotclk = dev_priv->max_dotclk_freq; + int max_clock; + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + + if (mode->clock < 25000) + return MODE_CLOCK_LOW; + + if (HAS_PCH_LPT(dev_priv)) + max_clock = 180000; + else if (IS_VALLEYVIEW(dev_priv)) + /* + * 270 MHz due to current DPLL limits, + * DAC limit supposedly 355 MHz. + */ + max_clock = 270000; + else if (IS_GEN_RANGE(dev_priv, 3, 4)) + max_clock = 400000; + else + max_clock = 350000; + if (mode->clock > max_clock) + return MODE_CLOCK_HIGH; + + if (mode->clock > max_dotclk) + return MODE_CLOCK_HIGH; + + /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */ + if (HAS_PCH_LPT(dev_priv) && + (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2)) + return MODE_CLOCK_HIGH; + + /* HSW/BDW FDI limited to 4k */ + if (mode->hdisplay > 4096) + return MODE_H_ILLEGAL; + + return MODE_OK; +} + +static int intel_crt_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) +{ + struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return -EINVAL; + + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + + return 0; +} + +static int pch_crt_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) +{ + struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return -EINVAL; + + pipe_config->has_pch_encoder = true; + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + + return 0; +} + +static int hsw_crt_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return -EINVAL; + + /* HSW/BDW FDI limited to 4k */ + if (adjusted_mode->crtc_hdisplay > 4096 || + adjusted_mode->crtc_hblank_start > 4096) + return -EINVAL; + + pipe_config->has_pch_encoder = true; + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + + /* LPT FDI RX only supports 8bpc. */ + if (HAS_PCH_LPT(dev_priv)) { + if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) { + DRM_DEBUG_KMS("LPT only supports 24bpp\n"); + return -EINVAL; + } + + pipe_config->pipe_bpp = 24; + } + + /* FDI must always be 2.7 GHz */ + pipe_config->port_clock = 135000 * 2; + + return 0; +} + +static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct intel_crt *crt = intel_attached_crt(connector); + struct drm_i915_private *dev_priv = to_i915(dev); + u32 adpa; + bool ret; + + /* The first time through, trigger an explicit detection cycle */ + if (crt->force_hotplug_required) { + bool turn_off_dac = HAS_PCH_SPLIT(dev_priv); + u32 save_adpa; + + crt->force_hotplug_required = 0; + + save_adpa = adpa = I915_READ(crt->adpa_reg); + DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); + + adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; + if (turn_off_dac) + adpa &= ~ADPA_DAC_ENABLE; + + I915_WRITE(crt->adpa_reg, adpa); + + if (intel_wait_for_register(&dev_priv->uncore, + crt->adpa_reg, + ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0, + 1000)) + DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); + + if (turn_off_dac) { + I915_WRITE(crt->adpa_reg, save_adpa); + POSTING_READ(crt->adpa_reg); + } + } + + /* Check the status to see if both blue and green are on now */ + adpa = I915_READ(crt->adpa_reg); + if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) + ret = true; + else + ret = false; + DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret); + + return ret; +} + +static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct intel_crt *crt = intel_attached_crt(connector); + struct drm_i915_private *dev_priv = to_i915(dev); + bool reenable_hpd; + u32 adpa; + bool ret; + u32 save_adpa; + + /* + * Doing a force trigger causes a hpd interrupt to get sent, which can + * get us stuck in a loop if we're polling: + * - We enable power wells and reset the ADPA + * - output_poll_exec does force probe on VGA, triggering a hpd + * - HPD handler waits for poll to unlock dev->mode_config.mutex + * - output_poll_exec shuts off the ADPA, unlocks + * dev->mode_config.mutex + * - HPD handler runs, resets ADPA and brings us back to the start + * + * Just disable HPD interrupts here to prevent this + */ + reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin); + + save_adpa = adpa = I915_READ(crt->adpa_reg); + DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); + + adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; + + I915_WRITE(crt->adpa_reg, adpa); + + if (intel_wait_for_register(&dev_priv->uncore, + crt->adpa_reg, + ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0, + 1000)) { + DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); + I915_WRITE(crt->adpa_reg, save_adpa); + } + + /* Check the status to see if both blue and green are on now */ + adpa = I915_READ(crt->adpa_reg); + if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) + ret = true; + else + ret = false; + + DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); + + if (reenable_hpd) + intel_hpd_enable(dev_priv, crt->base.hpd_pin); + + return ret; +} + +static bool intel_crt_detect_hotplug(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + u32 stat; + bool ret = false; + int i, tries = 0; + + if (HAS_PCH_SPLIT(dev_priv)) + return intel_ironlake_crt_detect_hotplug(connector); + + if (IS_VALLEYVIEW(dev_priv)) + return valleyview_crt_detect_hotplug(connector); + + /* + * On 4 series desktop, CRT detect sequence need to be done twice + * to get a reliable result. + */ + + if (IS_G45(dev_priv)) + tries = 2; + else + tries = 1; + + for (i = 0; i < tries ; i++) { + /* turn on the FORCE_DETECT */ + i915_hotplug_interrupt_update(dev_priv, + CRT_HOTPLUG_FORCE_DETECT, + CRT_HOTPLUG_FORCE_DETECT); + /* wait for FORCE_DETECT to go off */ + if (intel_wait_for_register(&dev_priv->uncore, PORT_HOTPLUG_EN, + CRT_HOTPLUG_FORCE_DETECT, 0, + 1000)) + DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); + } + + stat = I915_READ(PORT_HOTPLUG_STAT); + if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE) + ret = true; + + /* clear the interrupt we just generated, if any */ + I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS); + + i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0); + + return ret; +} + +static struct edid *intel_crt_get_edid(struct drm_connector *connector, + struct i2c_adapter *i2c) +{ + struct edid *edid; + + edid = drm_get_edid(connector, i2c); + + if (!edid && !intel_gmbus_is_forced_bit(i2c)) { + DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n"); + intel_gmbus_force_bit(i2c, true); + edid = drm_get_edid(connector, i2c); + intel_gmbus_force_bit(i2c, false); + } + + return edid; +} + +/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */ +static int intel_crt_ddc_get_modes(struct drm_connector *connector, + struct i2c_adapter *adapter) +{ + struct edid *edid; + int ret; + + edid = intel_crt_get_edid(connector, adapter); + if (!edid) + return 0; + + ret = intel_connector_update_modes(connector, edid); + kfree(edid); + + return ret; +} + +static bool intel_crt_detect_ddc(struct drm_connector *connector) +{ + struct intel_crt *crt = intel_attached_crt(connector); + struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); + struct edid *edid; + struct i2c_adapter *i2c; + bool ret = false; + + BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); + + i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin); + edid = intel_crt_get_edid(connector, i2c); + + if (edid) { + bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; + + /* + * This may be a DVI-I connector with a shared DDC + * link between analog and digital outputs, so we + * have to check the EDID input spec of the attached device. + */ + if (!is_digital) { + DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); + ret = true; + } else { + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); + } + } else { + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); + } + + kfree(edid); + + return ret; +} + +static enum drm_connector_status +intel_crt_load_detect(struct intel_crt *crt, u32 pipe) +{ + struct drm_device *dev = crt->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_uncore *uncore = &dev_priv->uncore; + u32 save_bclrpat; + u32 save_vtotal; + u32 vtotal, vactive; + u32 vsample; + u32 vblank, vblank_start, vblank_end; + u32 dsl; + i915_reg_t bclrpat_reg, vtotal_reg, + vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg; + u8 st00; + enum drm_connector_status status; + + DRM_DEBUG_KMS("starting load-detect on CRT\n"); + + bclrpat_reg = BCLRPAT(pipe); + vtotal_reg = VTOTAL(pipe); + vblank_reg = VBLANK(pipe); + vsync_reg = VSYNC(pipe); + pipeconf_reg = PIPECONF(pipe); + pipe_dsl_reg = PIPEDSL(pipe); + + save_bclrpat = intel_uncore_read(uncore, bclrpat_reg); + save_vtotal = intel_uncore_read(uncore, vtotal_reg); + vblank = intel_uncore_read(uncore, vblank_reg); + + vtotal = ((save_vtotal >> 16) & 0xfff) + 1; + vactive = (save_vtotal & 0x7ff) + 1; + + vblank_start = (vblank & 0xfff) + 1; + vblank_end = ((vblank >> 16) & 0xfff) + 1; + + /* Set the border color to purple. */ + intel_uncore_write(uncore, bclrpat_reg, 0x500050); + + if (!IS_GEN(dev_priv, 2)) { + u32 pipeconf = intel_uncore_read(uncore, pipeconf_reg); + intel_uncore_write(uncore, + pipeconf_reg, + pipeconf | PIPECONF_FORCE_BORDER); + intel_uncore_posting_read(uncore, pipeconf_reg); + /* Wait for next Vblank to substitue + * border color for Color info */ + intel_wait_for_vblank(dev_priv, pipe); + st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE); + status = ((st00 & (1 << 4)) != 0) ? + connector_status_connected : + connector_status_disconnected; + + intel_uncore_write(uncore, pipeconf_reg, pipeconf); + } else { + bool restore_vblank = false; + int count, detect; + + /* + * If there isn't any border, add some. + * Yes, this will flicker + */ + if (vblank_start <= vactive && vblank_end >= vtotal) { + u32 vsync = I915_READ(vsync_reg); + u32 vsync_start = (vsync & 0xffff) + 1; + + vblank_start = vsync_start; + intel_uncore_write(uncore, + vblank_reg, + (vblank_start - 1) | + ((vblank_end - 1) << 16)); + restore_vblank = true; + } + /* sample in the vertical border, selecting the larger one */ + if (vblank_start - vactive >= vtotal - vblank_end) + vsample = (vblank_start + vactive) >> 1; + else + vsample = (vtotal + vblank_end) >> 1; + + /* + * Wait for the border to be displayed + */ + while (intel_uncore_read(uncore, pipe_dsl_reg) >= vactive) + ; + while ((dsl = intel_uncore_read(uncore, pipe_dsl_reg)) <= + vsample) + ; + /* + * Watch ST00 for an entire scanline + */ + detect = 0; + count = 0; + do { + count++; + /* Read the ST00 VGA status register */ + st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE); + if (st00 & (1 << 4)) + detect++; + } while ((intel_uncore_read(uncore, pipe_dsl_reg) == dsl)); + + /* restore vblank if necessary */ + if (restore_vblank) + intel_uncore_write(uncore, vblank_reg, vblank); + /* + * If more than 3/4 of the scanline detected a monitor, + * then it is assumed to be present. This works even on i830, + * where there isn't any way to force the border color across + * the screen + */ + status = detect * 4 > count * 3 ? + connector_status_connected : + connector_status_disconnected; + } + + /* Restore previous settings */ + intel_uncore_write(uncore, bclrpat_reg, save_bclrpat); + + return status; +} + +static int intel_spurious_crt_detect_dmi_callback(const struct dmi_system_id *id) +{ + DRM_DEBUG_DRIVER("Skipping CRT detection for %s\n", id->ident); + return 1; +} + +static const struct dmi_system_id intel_spurious_crt_detect[] = { + { + .callback = intel_spurious_crt_detect_dmi_callback, + .ident = "ACER ZGB", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ACER"), + DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"), + }, + }, + { + .callback = intel_spurious_crt_detect_dmi_callback, + .ident = "Intel DZ77BH-55K", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "DZ77BH-55K"), + }, + }, + { } +}; + +static int +intel_crt_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct intel_crt *crt = intel_attached_crt(connector); + struct intel_encoder *intel_encoder = &crt->base; + intel_wakeref_t wakeref; + int status, ret; + struct intel_load_detect_pipe tmp; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", + connector->base.id, connector->name, + force); + + if (i915_modparams.load_detect_test) { + wakeref = intel_display_power_get(dev_priv, + intel_encoder->power_domain); + goto load_detect; + } + + /* Skip machines without VGA that falsely report hotplug events */ + if (dmi_check_system(intel_spurious_crt_detect)) + return connector_status_disconnected; + + wakeref = intel_display_power_get(dev_priv, + intel_encoder->power_domain); + + if (I915_HAS_HOTPLUG(dev_priv)) { + /* We can not rely on the HPD pin always being correctly wired + * up, for example many KVM do not pass it through, and so + * only trust an assertion that the monitor is connected. + */ + if (intel_crt_detect_hotplug(connector)) { + DRM_DEBUG_KMS("CRT detected via hotplug\n"); + status = connector_status_connected; + goto out; + } else + DRM_DEBUG_KMS("CRT not detected via hotplug\n"); + } + + if (intel_crt_detect_ddc(connector)) { + status = connector_status_connected; + goto out; + } + + /* Load detection is broken on HPD capable machines. Whoever wants a + * broken monitor (without edid) to work behind a broken kvm (that fails + * to have the right resistors for HP detection) needs to fix this up. + * For now just bail out. */ + if (I915_HAS_HOTPLUG(dev_priv)) { + status = connector_status_disconnected; + goto out; + } + +load_detect: + if (!force) { + status = connector->status; + goto out; + } + + /* for pre-945g platforms use load detect */ + ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx); + if (ret > 0) { + if (intel_crt_detect_ddc(connector)) + status = connector_status_connected; + else if (INTEL_GEN(dev_priv) < 4) + status = intel_crt_load_detect(crt, + to_intel_crtc(connector->state->crtc)->pipe); + else if (i915_modparams.load_detect_test) + status = connector_status_disconnected; + else + status = connector_status_unknown; + intel_release_load_detect_pipe(connector, &tmp, ctx); + } else if (ret == 0) { + status = connector_status_unknown; + } else { + status = ret; + } + +out: + intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); + return status; +} + +static int intel_crt_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crt *crt = intel_attached_crt(connector); + struct intel_encoder *intel_encoder = &crt->base; + intel_wakeref_t wakeref; + struct i2c_adapter *i2c; + int ret; + + wakeref = intel_display_power_get(dev_priv, + intel_encoder->power_domain); + + i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin); + ret = intel_crt_ddc_get_modes(connector, i2c); + if (ret || !IS_G4X(dev_priv)) + goto out; + + /* Try to probe digital port for output in DVI-I -> VGA mode. */ + i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPB); + ret = intel_crt_ddc_get_modes(connector, i2c); + +out: + intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); + + return ret; +} + +void intel_crt_reset(struct drm_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->dev); + struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder)); + + if (INTEL_GEN(dev_priv) >= 5) { + u32 adpa; + + adpa = I915_READ(crt->adpa_reg); + adpa &= ~ADPA_CRT_HOTPLUG_MASK; + adpa |= ADPA_HOTPLUG_BITS; + I915_WRITE(crt->adpa_reg, adpa); + POSTING_READ(crt->adpa_reg); + + DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa); + crt->force_hotplug_required = 1; + } + +} + +/* + * Routines for controlling stuff on the analog port + */ + +static const struct drm_connector_funcs intel_crt_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .late_register = intel_connector_register, + .early_unregister = intel_connector_unregister, + .destroy = intel_connector_destroy, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, +}; + +static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { + .detect_ctx = intel_crt_detect, + .mode_valid = intel_crt_mode_valid, + .get_modes = intel_crt_get_modes, +}; + +static const struct drm_encoder_funcs intel_crt_enc_funcs = { + .reset = intel_crt_reset, + .destroy = intel_encoder_destroy, +}; + +void intel_crt_init(struct drm_i915_private *dev_priv) +{ + struct drm_connector *connector; + struct intel_crt *crt; + struct intel_connector *intel_connector; + i915_reg_t adpa_reg; + u32 adpa; + + if (HAS_PCH_SPLIT(dev_priv)) + adpa_reg = PCH_ADPA; + else if (IS_VALLEYVIEW(dev_priv)) + adpa_reg = VLV_ADPA; + else + adpa_reg = ADPA; + + adpa = I915_READ(adpa_reg); + if ((adpa & ADPA_DAC_ENABLE) == 0) { + /* + * On some machines (some IVB at least) CRT can be + * fused off, but there's no known fuse bit to + * indicate that. On these machine the ADPA register + * works normally, except the DAC enable bit won't + * take. So the only way to tell is attempt to enable + * it and see what happens. + */ + I915_WRITE(adpa_reg, adpa | ADPA_DAC_ENABLE | + ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); + if ((I915_READ(adpa_reg) & ADPA_DAC_ENABLE) == 0) + return; + I915_WRITE(adpa_reg, adpa); + } + + crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); + if (!crt) + return; + + intel_connector = intel_connector_alloc(); + if (!intel_connector) { + kfree(crt); + return; + } + + connector = &intel_connector->base; + crt->connector = intel_connector; + drm_connector_init(&dev_priv->drm, &intel_connector->base, + &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); + + drm_encoder_init(&dev_priv->drm, &crt->base.base, &intel_crt_enc_funcs, + DRM_MODE_ENCODER_DAC, "CRT"); + + intel_connector_attach_encoder(intel_connector, &crt->base); + + crt->base.type = INTEL_OUTPUT_ANALOG; + crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI); + if (IS_I830(dev_priv)) + crt->base.crtc_mask = (1 << 0); + else + crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + + if (IS_GEN(dev_priv, 2)) + connector->interlace_allowed = 0; + else + connector->interlace_allowed = 1; + connector->doublescan_allowed = 0; + + crt->adpa_reg = adpa_reg; + + crt->base.power_domain = POWER_DOMAIN_PORT_CRT; + + if (I915_HAS_HOTPLUG(dev_priv) && + !dmi_check_system(intel_spurious_crt_detect)) { + crt->base.hpd_pin = HPD_CRT; + crt->base.hotplug = intel_encoder_hotplug; + } + + if (HAS_DDI(dev_priv)) { + crt->base.port = PORT_E; + crt->base.get_config = hsw_crt_get_config; + crt->base.get_hw_state = intel_ddi_get_hw_state; + crt->base.compute_config = hsw_crt_compute_config; + crt->base.pre_pll_enable = hsw_pre_pll_enable_crt; + crt->base.pre_enable = hsw_pre_enable_crt; + crt->base.enable = hsw_enable_crt; + crt->base.disable = hsw_disable_crt; + crt->base.post_disable = hsw_post_disable_crt; + } else { + if (HAS_PCH_SPLIT(dev_priv)) { + crt->base.compute_config = pch_crt_compute_config; + crt->base.disable = pch_disable_crt; + crt->base.post_disable = pch_post_disable_crt; + } else { + crt->base.compute_config = intel_crt_compute_config; + crt->base.disable = intel_disable_crt; + } + crt->base.port = PORT_NONE; + crt->base.get_config = intel_crt_get_config; + crt->base.get_hw_state = intel_crt_get_hw_state; + crt->base.enable = intel_enable_crt; + } + intel_connector->get_hw_state = intel_connector_get_hw_state; + + drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); + + if (!I915_HAS_HOTPLUG(dev_priv)) + intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; + + /* + * Configure the automatic hotplug detection stuff + */ + crt->force_hotplug_required = 0; + + /* + * TODO: find a proper way to discover whether we need to set the the + * polarity and link reversal bits or not, instead of relying on the + * BIOS. + */ + if (HAS_PCH_LPT(dev_priv)) { + u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT | + FDI_RX_LINK_REVERSAL_OVERRIDE; + + dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config; + } + + intel_crt_reset(&crt->base.base); +} diff --git a/drivers/gpu/drm/i915/display/intel_crt.h b/drivers/gpu/drm/i915/display/intel_crt.h new file mode 100644 index 000000000000..1b3fba359efc --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_crt.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_CRT_H__ +#define __INTEL_CRT_H__ + +#include "i915_reg.h" + +enum pipe; +struct drm_encoder; +struct drm_i915_private; +struct drm_i915_private; + +bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t adpa_reg, enum pipe *pipe); +void intel_crt_init(struct drm_i915_private *dev_priv); +void intel_crt_reset(struct drm_encoder *encoder); + +#endif /* __INTEL_CRT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c new file mode 100644 index 000000000000..7925a176f900 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -0,0 +1,4335 @@ +/* + * Copyright © 2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eugeni Dodonov + * + */ + +#include + +#include "i915_drv.h" +#include "intel_audio.h" +#include "intel_combo_phy.h" +#include "intel_connector.h" +#include "intel_ddi.h" +#include "intel_dp.h" +#include "intel_dp_link_training.h" +#include "intel_dpio_phy.h" +#include "intel_drv.h" +#include "intel_dsi.h" +#include "intel_fifo_underrun.h" +#include "intel_gmbus.h" +#include "intel_hdcp.h" +#include "intel_hdmi.h" +#include "intel_hotplug.h" +#include "intel_lspcon.h" +#include "intel_panel.h" +#include "intel_psr.h" +#include "intel_vdsc.h" + +struct ddi_buf_trans { + u32 trans1; /* balance leg enable, de-emph level */ + u32 trans2; /* vref sel, vswing */ + u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */ +}; + +static const u8 index_to_dp_signal_levels[] = { + [0] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0, + [1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1, + [2] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2, + [3] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3, + [4] = DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0, + [5] = DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1, + [6] = DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2, + [7] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0, + [8] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1, + [9] = DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0, +}; + +/* HDMI/DVI modes ignore everything but the last 2 items. So we share + * them for both DP and FDI transports, allowing those ports to + * automatically adapt to HDMI connections as well + */ +static const struct ddi_buf_trans hsw_ddi_translations_dp[] = { + { 0x00FFFFFF, 0x0006000E, 0x0 }, + { 0x00D75FFF, 0x0005000A, 0x0 }, + { 0x00C30FFF, 0x00040006, 0x0 }, + { 0x80AAAFFF, 0x000B0000, 0x0 }, + { 0x00FFFFFF, 0x0005000A, 0x0 }, + { 0x00D75FFF, 0x000C0004, 0x0 }, + { 0x80C30FFF, 0x000B0000, 0x0 }, + { 0x00FFFFFF, 0x00040006, 0x0 }, + { 0x80D75FFF, 0x000B0000, 0x0 }, +}; + +static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = { + { 0x00FFFFFF, 0x0007000E, 0x0 }, + { 0x00D75FFF, 0x000F000A, 0x0 }, + { 0x00C30FFF, 0x00060006, 0x0 }, + { 0x00AAAFFF, 0x001E0000, 0x0 }, + { 0x00FFFFFF, 0x000F000A, 0x0 }, + { 0x00D75FFF, 0x00160004, 0x0 }, + { 0x00C30FFF, 0x001E0000, 0x0 }, + { 0x00FFFFFF, 0x00060006, 0x0 }, + { 0x00D75FFF, 0x001E0000, 0x0 }, +}; + +static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = { + /* Idx NT mV d T mV d db */ + { 0x00FFFFFF, 0x0006000E, 0x0 },/* 0: 400 400 0 */ + { 0x00E79FFF, 0x000E000C, 0x0 },/* 1: 400 500 2 */ + { 0x00D75FFF, 0x0005000A, 0x0 },/* 2: 400 600 3.5 */ + { 0x00FFFFFF, 0x0005000A, 0x0 },/* 3: 600 600 0 */ + { 0x00E79FFF, 0x001D0007, 0x0 },/* 4: 600 750 2 */ + { 0x00D75FFF, 0x000C0004, 0x0 },/* 5: 600 900 3.5 */ + { 0x00FFFFFF, 0x00040006, 0x0 },/* 6: 800 800 0 */ + { 0x80E79FFF, 0x00030002, 0x0 },/* 7: 800 1000 2 */ + { 0x00FFFFFF, 0x00140005, 0x0 },/* 8: 850 850 0 */ + { 0x00FFFFFF, 0x000C0004, 0x0 },/* 9: 900 900 0 */ + { 0x00FFFFFF, 0x001C0003, 0x0 },/* 10: 950 950 0 */ + { 0x80FFFFFF, 0x00030002, 0x0 },/* 11: 1000 1000 0 */ +}; + +static const struct ddi_buf_trans bdw_ddi_translations_edp[] = { + { 0x00FFFFFF, 0x00000012, 0x0 }, + { 0x00EBAFFF, 0x00020011, 0x0 }, + { 0x00C71FFF, 0x0006000F, 0x0 }, + { 0x00AAAFFF, 0x000E000A, 0x0 }, + { 0x00FFFFFF, 0x00020011, 0x0 }, + { 0x00DB6FFF, 0x0005000F, 0x0 }, + { 0x00BEEFFF, 0x000A000C, 0x0 }, + { 0x00FFFFFF, 0x0005000F, 0x0 }, + { 0x00DB6FFF, 0x000A000C, 0x0 }, +}; + +static const struct ddi_buf_trans bdw_ddi_translations_dp[] = { + { 0x00FFFFFF, 0x0007000E, 0x0 }, + { 0x00D75FFF, 0x000E000A, 0x0 }, + { 0x00BEFFFF, 0x00140006, 0x0 }, + { 0x80B2CFFF, 0x001B0002, 0x0 }, + { 0x00FFFFFF, 0x000E000A, 0x0 }, + { 0x00DB6FFF, 0x00160005, 0x0 }, + { 0x80C71FFF, 0x001A0002, 0x0 }, + { 0x00F7DFFF, 0x00180004, 0x0 }, + { 0x80D75FFF, 0x001B0002, 0x0 }, +}; + +static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = { + { 0x00FFFFFF, 0x0001000E, 0x0 }, + { 0x00D75FFF, 0x0004000A, 0x0 }, + { 0x00C30FFF, 0x00070006, 0x0 }, + { 0x00AAAFFF, 0x000C0000, 0x0 }, + { 0x00FFFFFF, 0x0004000A, 0x0 }, + { 0x00D75FFF, 0x00090004, 0x0 }, + { 0x00C30FFF, 0x000C0000, 0x0 }, + { 0x00FFFFFF, 0x00070006, 0x0 }, + { 0x00D75FFF, 0x000C0000, 0x0 }, +}; + +static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = { + /* Idx NT mV d T mV df db */ + { 0x00FFFFFF, 0x0007000E, 0x0 },/* 0: 400 400 0 */ + { 0x00D75FFF, 0x000E000A, 0x0 },/* 1: 400 600 3.5 */ + { 0x00BEFFFF, 0x00140006, 0x0 },/* 2: 400 800 6 */ + { 0x00FFFFFF, 0x0009000D, 0x0 },/* 3: 450 450 0 */ + { 0x00FFFFFF, 0x000E000A, 0x0 },/* 4: 600 600 0 */ + { 0x00D7FFFF, 0x00140006, 0x0 },/* 5: 600 800 2.5 */ + { 0x80CB2FFF, 0x001B0002, 0x0 },/* 6: 600 1000 4.5 */ + { 0x00FFFFFF, 0x00140006, 0x0 },/* 7: 800 800 0 */ + { 0x80E79FFF, 0x001B0002, 0x0 },/* 8: 800 1000 2 */ + { 0x80FFFFFF, 0x001B0002, 0x0 },/* 9: 1000 1000 0 */ +}; + +/* Skylake H and S */ +static const struct ddi_buf_trans skl_ddi_translations_dp[] = { + { 0x00002016, 0x000000A0, 0x0 }, + { 0x00005012, 0x0000009B, 0x0 }, + { 0x00007011, 0x00000088, 0x0 }, + { 0x80009010, 0x000000C0, 0x1 }, + { 0x00002016, 0x0000009B, 0x0 }, + { 0x00005012, 0x00000088, 0x0 }, + { 0x80007011, 0x000000C0, 0x1 }, + { 0x00002016, 0x000000DF, 0x0 }, + { 0x80005012, 0x000000C0, 0x1 }, +}; + +/* Skylake U */ +static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { + { 0x0000201B, 0x000000A2, 0x0 }, + { 0x00005012, 0x00000088, 0x0 }, + { 0x80007011, 0x000000CD, 0x1 }, + { 0x80009010, 0x000000C0, 0x1 }, + { 0x0000201B, 0x0000009D, 0x0 }, + { 0x80005012, 0x000000C0, 0x1 }, + { 0x80007011, 0x000000C0, 0x1 }, + { 0x00002016, 0x00000088, 0x0 }, + { 0x80005012, 0x000000C0, 0x1 }, +}; + +/* Skylake Y */ +static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { + { 0x00000018, 0x000000A2, 0x0 }, + { 0x00005012, 0x00000088, 0x0 }, + { 0x80007011, 0x000000CD, 0x3 }, + { 0x80009010, 0x000000C0, 0x3 }, + { 0x00000018, 0x0000009D, 0x0 }, + { 0x80005012, 0x000000C0, 0x3 }, + { 0x80007011, 0x000000C0, 0x3 }, + { 0x00000018, 0x00000088, 0x0 }, + { 0x80005012, 0x000000C0, 0x3 }, +}; + +/* Kabylake H and S */ +static const struct ddi_buf_trans kbl_ddi_translations_dp[] = { + { 0x00002016, 0x000000A0, 0x0 }, + { 0x00005012, 0x0000009B, 0x0 }, + { 0x00007011, 0x00000088, 0x0 }, + { 0x80009010, 0x000000C0, 0x1 }, + { 0x00002016, 0x0000009B, 0x0 }, + { 0x00005012, 0x00000088, 0x0 }, + { 0x80007011, 0x000000C0, 0x1 }, + { 0x00002016, 0x00000097, 0x0 }, + { 0x80005012, 0x000000C0, 0x1 }, +}; + +/* Kabylake U */ +static const struct ddi_buf_trans kbl_u_ddi_translations_dp[] = { + { 0x0000201B, 0x000000A1, 0x0 }, + { 0x00005012, 0x00000088, 0x0 }, + { 0x80007011, 0x000000CD, 0x3 }, + { 0x80009010, 0x000000C0, 0x3 }, + { 0x0000201B, 0x0000009D, 0x0 }, + { 0x80005012, 0x000000C0, 0x3 }, + { 0x80007011, 0x000000C0, 0x3 }, + { 0x00002016, 0x0000004F, 0x0 }, + { 0x80005012, 0x000000C0, 0x3 }, +}; + +/* Kabylake Y */ +static const struct ddi_buf_trans kbl_y_ddi_translations_dp[] = { + { 0x00001017, 0x000000A1, 0x0 }, + { 0x00005012, 0x00000088, 0x0 }, + { 0x80007011, 0x000000CD, 0x3 }, + { 0x8000800F, 0x000000C0, 0x3 }, + { 0x00001017, 0x0000009D, 0x0 }, + { 0x80005012, 0x000000C0, 0x3 }, + { 0x80007011, 0x000000C0, 0x3 }, + { 0x00001017, 0x0000004C, 0x0 }, + { 0x80005012, 0x000000C0, 0x3 }, +}; + +/* + * Skylake/Kabylake H and S + * eDP 1.4 low vswing translation parameters + */ +static const struct ddi_buf_trans skl_ddi_translations_edp[] = { + { 0x00000018, 0x000000A8, 0x0 }, + { 0x00004013, 0x000000A9, 0x0 }, + { 0x00007011, 0x000000A2, 0x0 }, + { 0x00009010, 0x0000009C, 0x0 }, + { 0x00000018, 0x000000A9, 0x0 }, + { 0x00006013, 0x000000A2, 0x0 }, + { 0x00007011, 0x000000A6, 0x0 }, + { 0x00000018, 0x000000AB, 0x0 }, + { 0x00007013, 0x0000009F, 0x0 }, + { 0x00000018, 0x000000DF, 0x0 }, +}; + +/* + * Skylake/Kabylake U + * eDP 1.4 low vswing translation parameters + */ +static const struct ddi_buf_trans skl_u_ddi_translations_edp[] = { + { 0x00000018, 0x000000A8, 0x0 }, + { 0x00004013, 0x000000A9, 0x0 }, + { 0x00007011, 0x000000A2, 0x0 }, + { 0x00009010, 0x0000009C, 0x0 }, + { 0x00000018, 0x000000A9, 0x0 }, + { 0x00006013, 0x000000A2, 0x0 }, + { 0x00007011, 0x000000A6, 0x0 }, + { 0x00002016, 0x000000AB, 0x0 }, + { 0x00005013, 0x0000009F, 0x0 }, + { 0x00000018, 0x000000DF, 0x0 }, +}; + +/* + * Skylake/Kabylake Y + * eDP 1.4 low vswing translation parameters + */ +static const struct ddi_buf_trans skl_y_ddi_translations_edp[] = { + { 0x00000018, 0x000000A8, 0x0 }, + { 0x00004013, 0x000000AB, 0x0 }, + { 0x00007011, 0x000000A4, 0x0 }, + { 0x00009010, 0x000000DF, 0x0 }, + { 0x00000018, 0x000000AA, 0x0 }, + { 0x00006013, 0x000000A4, 0x0 }, + { 0x00007011, 0x0000009D, 0x0 }, + { 0x00000018, 0x000000A0, 0x0 }, + { 0x00006012, 0x000000DF, 0x0 }, + { 0x00000018, 0x0000008A, 0x0 }, +}; + +/* Skylake/Kabylake U, H and S */ +static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { + { 0x00000018, 0x000000AC, 0x0 }, + { 0x00005012, 0x0000009D, 0x0 }, + { 0x00007011, 0x00000088, 0x0 }, + { 0x00000018, 0x000000A1, 0x0 }, + { 0x00000018, 0x00000098, 0x0 }, + { 0x00004013, 0x00000088, 0x0 }, + { 0x80006012, 0x000000CD, 0x1 }, + { 0x00000018, 0x000000DF, 0x0 }, + { 0x80003015, 0x000000CD, 0x1 }, /* Default */ + { 0x80003015, 0x000000C0, 0x1 }, + { 0x80000018, 0x000000C0, 0x1 }, +}; + +/* Skylake/Kabylake Y */ +static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = { + { 0x00000018, 0x000000A1, 0x0 }, + { 0x00005012, 0x000000DF, 0x0 }, + { 0x80007011, 0x000000CB, 0x3 }, + { 0x00000018, 0x000000A4, 0x0 }, + { 0x00000018, 0x0000009D, 0x0 }, + { 0x00004013, 0x00000080, 0x0 }, + { 0x80006013, 0x000000C0, 0x3 }, + { 0x00000018, 0x0000008A, 0x0 }, + { 0x80003015, 0x000000C0, 0x3 }, /* Default */ + { 0x80003015, 0x000000C0, 0x3 }, + { 0x80000018, 0x000000C0, 0x3 }, +}; + +struct bxt_ddi_buf_trans { + u8 margin; /* swing value */ + u8 scale; /* scale value */ + u8 enable; /* scale enable */ + u8 deemphasis; +}; + +static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = { + /* Idx NT mV diff db */ + { 52, 0x9A, 0, 128, }, /* 0: 400 0 */ + { 78, 0x9A, 0, 85, }, /* 1: 400 3.5 */ + { 104, 0x9A, 0, 64, }, /* 2: 400 6 */ + { 154, 0x9A, 0, 43, }, /* 3: 400 9.5 */ + { 77, 0x9A, 0, 128, }, /* 4: 600 0 */ + { 116, 0x9A, 0, 85, }, /* 5: 600 3.5 */ + { 154, 0x9A, 0, 64, }, /* 6: 600 6 */ + { 102, 0x9A, 0, 128, }, /* 7: 800 0 */ + { 154, 0x9A, 0, 85, }, /* 8: 800 3.5 */ + { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */ +}; + +static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = { + /* Idx NT mV diff db */ + { 26, 0, 0, 128, }, /* 0: 200 0 */ + { 38, 0, 0, 112, }, /* 1: 200 1.5 */ + { 48, 0, 0, 96, }, /* 2: 200 4 */ + { 54, 0, 0, 69, }, /* 3: 200 6 */ + { 32, 0, 0, 128, }, /* 4: 250 0 */ + { 48, 0, 0, 104, }, /* 5: 250 1.5 */ + { 54, 0, 0, 85, }, /* 6: 250 4 */ + { 43, 0, 0, 128, }, /* 7: 300 0 */ + { 54, 0, 0, 101, }, /* 8: 300 1.5 */ + { 48, 0, 0, 128, }, /* 9: 300 0 */ +}; + +/* BSpec has 2 recommended values - entries 0 and 8. + * Using the entry with higher vswing. + */ +static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = { + /* Idx NT mV diff db */ + { 52, 0x9A, 0, 128, }, /* 0: 400 0 */ + { 52, 0x9A, 0, 85, }, /* 1: 400 3.5 */ + { 52, 0x9A, 0, 64, }, /* 2: 400 6 */ + { 42, 0x9A, 0, 43, }, /* 3: 400 9.5 */ + { 77, 0x9A, 0, 128, }, /* 4: 600 0 */ + { 77, 0x9A, 0, 85, }, /* 5: 600 3.5 */ + { 77, 0x9A, 0, 64, }, /* 6: 600 6 */ + { 102, 0x9A, 0, 128, }, /* 7: 800 0 */ + { 102, 0x9A, 0, 85, }, /* 8: 800 3.5 */ + { 154, 0x9A, 1, 128, }, /* 9: 1200 0 */ +}; + +struct cnl_ddi_buf_trans { + u8 dw2_swing_sel; + u8 dw7_n_scalar; + u8 dw4_cursor_coeff; + u8 dw4_post_cursor_2; + u8 dw4_post_cursor_1; +}; + +/* Voltage Swing Programming for VccIO 0.85V for DP */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_85V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */ + { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */ + { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */ + { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ + { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ + { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */ + { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */ + { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 0.85V for HDMI */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_85V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ + { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ + { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ + { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 */ + { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ +}; + +/* Voltage Swing Programming for VccIO 0.85V for eDP */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_85V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x66, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */ + { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */ + { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */ + { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */ + { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */ + { 0xA, 0x66, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */ + { 0xB, 0x70, 0x3C, 0x00, 0x03 }, /* 460 600 2.3 */ + { 0xC, 0x75, 0x3C, 0x00, 0x03 }, /* 537 700 2.3 */ + { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 0.95V for DP */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_0_95V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x5D, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x6A, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */ + { 0xB, 0x7A, 0x32, 0x00, 0x0D }, /* 350 700 6.0 */ + { 0x6, 0x7C, 0x2D, 0x00, 0x12 }, /* 350 900 8.2 */ + { 0xA, 0x69, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xB, 0x7A, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ + { 0x6, 0x7C, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ + { 0xB, 0x7D, 0x3C, 0x00, 0x03 }, /* 650 725 0.9 */ + { 0x6, 0x7C, 0x34, 0x00, 0x0B }, /* 600 900 3.5 */ + { 0x6, 0x7B, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 0.95V for HDMI */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_0_95V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x5C, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ + { 0xB, 0x69, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */ + { 0x5, 0x76, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */ + { 0xA, 0x5E, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ + { 0xB, 0x69, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */ + { 0xB, 0x79, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ + { 0x6, 0x7D, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */ + { 0x5, 0x76, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */ + { 0x6, 0x7D, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */ + { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 0.95V for eDP */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_0_95V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x61, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */ + { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */ + { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */ + { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */ + { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */ + { 0xA, 0x61, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */ + { 0xB, 0x68, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */ + { 0xC, 0x6E, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */ + { 0x4, 0x7F, 0x3A, 0x00, 0x05 }, /* 460 600 2.3 */ + { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 1.05V for DP */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_dp_1_05V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ + { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */ + { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */ + { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 400 1050 8.4 */ + { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */ + { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ + { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 550 1050 5.6 */ + { 0x5, 0x76, 0x3E, 0x00, 0x01 }, /* 850 900 0.5 */ + { 0x6, 0x7F, 0x36, 0x00, 0x09 }, /* 750 1050 2.9 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 1.05V for HDMI */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_hdmi_1_05V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x58, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ + { 0xB, 0x64, 0x37, 0x00, 0x08 }, /* 400 600 3.5 */ + { 0x5, 0x70, 0x31, 0x00, 0x0E }, /* 400 800 6.0 */ + { 0xA, 0x5B, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ + { 0xB, 0x64, 0x3F, 0x00, 0x00 }, /* 600 600 0.0 */ + { 0x5, 0x73, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ + { 0x6, 0x7C, 0x32, 0x00, 0x0D }, /* 600 1000 4.4 */ + { 0x5, 0x70, 0x3F, 0x00, 0x00 }, /* 800 800 0.0 */ + { 0x6, 0x7C, 0x39, 0x00, 0x06 }, /* 800 1000 1.9 */ + { 0x6, 0x7F, 0x39, 0x00, 0x06 }, /* 850 1050 1.8 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 1050 1050 0.0 */ +}; + +/* Voltage Swing Programming for VccIO 1.05V for eDP */ +static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = { + /* NT mV Trans mV db */ + { 0xA, 0x5E, 0x3A, 0x00, 0x05 }, /* 384 500 2.3 */ + { 0x0, 0x7F, 0x38, 0x00, 0x07 }, /* 153 200 2.3 */ + { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 192 250 2.3 */ + { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 230 300 2.3 */ + { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 269 350 2.3 */ + { 0xA, 0x5E, 0x3C, 0x00, 0x03 }, /* 446 500 1.0 */ + { 0xB, 0x64, 0x39, 0x00, 0x06 }, /* 460 600 2.3 */ + { 0xE, 0x6A, 0x39, 0x00, 0x06 }, /* 537 700 2.3 */ + { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ +}; + +/* icl_combo_phy_ddi_translations */ +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ + { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = { + /* NT mV Trans mV db */ + { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ + { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ + { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ + { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */ + { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ + { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ + { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ + { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ + { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ + { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ +}; + +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ + { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = { + /* NT mV Trans mV db */ + { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ + { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ + { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ + { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */ + { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ +}; + +struct icl_mg_phy_ddi_buf_trans { + u32 cri_txdeemph_override_5_0; + u32 cri_txdeemph_override_11_6; + u32 cri_txdeemph_override_17_12; +}; + +static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = { + /* Voltage swing pre-emphasis */ + { 0x0, 0x1B, 0x00 }, /* 0 0 */ + { 0x0, 0x23, 0x08 }, /* 0 1 */ + { 0x0, 0x2D, 0x12 }, /* 0 2 */ + { 0x0, 0x00, 0x00 }, /* 0 3 */ + { 0x0, 0x23, 0x00 }, /* 1 0 */ + { 0x0, 0x2B, 0x09 }, /* 1 1 */ + { 0x0, 0x2E, 0x11 }, /* 1 2 */ + { 0x0, 0x2F, 0x00 }, /* 2 0 */ + { 0x0, 0x33, 0x0C }, /* 2 1 */ + { 0x0, 0x00, 0x00 }, /* 3 0 */ +}; + +static const struct ddi_buf_trans * +bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) +{ + if (dev_priv->vbt.edp.low_vswing) { + *n_entries = ARRAY_SIZE(bdw_ddi_translations_edp); + return bdw_ddi_translations_edp; + } else { + *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp); + return bdw_ddi_translations_dp; + } +} + +static const struct ddi_buf_trans * +skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) +{ + if (IS_SKL_ULX(dev_priv)) { + *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); + return skl_y_ddi_translations_dp; + } else if (IS_SKL_ULT(dev_priv)) { + *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp); + return skl_u_ddi_translations_dp; + } else { + *n_entries = ARRAY_SIZE(skl_ddi_translations_dp); + return skl_ddi_translations_dp; + } +} + +static const struct ddi_buf_trans * +kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) +{ + if (IS_KBL_ULX(dev_priv) || IS_CFL_ULX(dev_priv)) { + *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp); + return kbl_y_ddi_translations_dp; + } else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) { + *n_entries = ARRAY_SIZE(kbl_u_ddi_translations_dp); + return kbl_u_ddi_translations_dp; + } else { + *n_entries = ARRAY_SIZE(kbl_ddi_translations_dp); + return kbl_ddi_translations_dp; + } +} + +static const struct ddi_buf_trans * +skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) +{ + if (dev_priv->vbt.edp.low_vswing) { + if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || + IS_CFL_ULX(dev_priv)) { + *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); + return skl_y_ddi_translations_edp; + } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) || + IS_CFL_ULT(dev_priv)) { + *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp); + return skl_u_ddi_translations_edp; + } else { + *n_entries = ARRAY_SIZE(skl_ddi_translations_edp); + return skl_ddi_translations_edp; + } + } + + if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) + return kbl_get_buf_trans_dp(dev_priv, n_entries); + else + return skl_get_buf_trans_dp(dev_priv, n_entries); +} + +static const struct ddi_buf_trans * +skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) +{ + if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || + IS_CFL_ULX(dev_priv)) { + *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); + return skl_y_ddi_translations_hdmi; + } else { + *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); + return skl_ddi_translations_hdmi; + } +} + +static int skl_buf_trans_num_entries(enum port port, int n_entries) +{ + /* Only DDIA and DDIE can select the 10th register with DP */ + if (port == PORT_A || port == PORT_E) + return min(n_entries, 10); + else + return min(n_entries, 9); +} + +static const struct ddi_buf_trans * +intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv, + enum port port, int *n_entries) +{ + if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { + const struct ddi_buf_trans *ddi_translations = + kbl_get_buf_trans_dp(dev_priv, n_entries); + *n_entries = skl_buf_trans_num_entries(port, *n_entries); + return ddi_translations; + } else if (IS_SKYLAKE(dev_priv)) { + const struct ddi_buf_trans *ddi_translations = + skl_get_buf_trans_dp(dev_priv, n_entries); + *n_entries = skl_buf_trans_num_entries(port, *n_entries); + return ddi_translations; + } else if (IS_BROADWELL(dev_priv)) { + *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp); + return bdw_ddi_translations_dp; + } else if (IS_HASWELL(dev_priv)) { + *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp); + return hsw_ddi_translations_dp; + } + + *n_entries = 0; + return NULL; +} + +static const struct ddi_buf_trans * +intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv, + enum port port, int *n_entries) +{ + if (IS_GEN9_BC(dev_priv)) { + const struct ddi_buf_trans *ddi_translations = + skl_get_buf_trans_edp(dev_priv, n_entries); + *n_entries = skl_buf_trans_num_entries(port, *n_entries); + return ddi_translations; + } else if (IS_BROADWELL(dev_priv)) { + return bdw_get_buf_trans_edp(dev_priv, n_entries); + } else if (IS_HASWELL(dev_priv)) { + *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp); + return hsw_ddi_translations_dp; + } + + *n_entries = 0; + return NULL; +} + +static const struct ddi_buf_trans * +intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv, + int *n_entries) +{ + if (IS_BROADWELL(dev_priv)) { + *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi); + return bdw_ddi_translations_fdi; + } else if (IS_HASWELL(dev_priv)) { + *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); + return hsw_ddi_translations_fdi; + } + + *n_entries = 0; + return NULL; +} + +static const struct ddi_buf_trans * +intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, + int *n_entries) +{ + if (IS_GEN9_BC(dev_priv)) { + return skl_get_buf_trans_hdmi(dev_priv, n_entries); + } else if (IS_BROADWELL(dev_priv)) { + *n_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); + return bdw_ddi_translations_hdmi; + } else if (IS_HASWELL(dev_priv)) { + *n_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); + return hsw_ddi_translations_hdmi; + } + + *n_entries = 0; + return NULL; +} + +static const struct bxt_ddi_buf_trans * +bxt_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) +{ + *n_entries = ARRAY_SIZE(bxt_ddi_translations_dp); + return bxt_ddi_translations_dp; +} + +static const struct bxt_ddi_buf_trans * +bxt_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) +{ + if (dev_priv->vbt.edp.low_vswing) { + *n_entries = ARRAY_SIZE(bxt_ddi_translations_edp); + return bxt_ddi_translations_edp; + } + + return bxt_get_buf_trans_dp(dev_priv, n_entries); +} + +static const struct bxt_ddi_buf_trans * +bxt_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) +{ + *n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi); + return bxt_ddi_translations_hdmi; +} + +static const struct cnl_ddi_buf_trans * +cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) +{ + u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; + + if (voltage == VOLTAGE_INFO_0_85V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V); + return cnl_ddi_translations_hdmi_0_85V; + } else if (voltage == VOLTAGE_INFO_0_95V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V); + return cnl_ddi_translations_hdmi_0_95V; + } else if (voltage == VOLTAGE_INFO_1_05V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V); + return cnl_ddi_translations_hdmi_1_05V; + } else { + *n_entries = 1; /* shut up gcc */ + MISSING_CASE(voltage); + } + return NULL; +} + +static const struct cnl_ddi_buf_trans * +cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) +{ + u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; + + if (voltage == VOLTAGE_INFO_0_85V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V); + return cnl_ddi_translations_dp_0_85V; + } else if (voltage == VOLTAGE_INFO_0_95V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V); + return cnl_ddi_translations_dp_0_95V; + } else if (voltage == VOLTAGE_INFO_1_05V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V); + return cnl_ddi_translations_dp_1_05V; + } else { + *n_entries = 1; /* shut up gcc */ + MISSING_CASE(voltage); + } + return NULL; +} + +static const struct cnl_ddi_buf_trans * +cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) +{ + u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; + + if (dev_priv->vbt.edp.low_vswing) { + if (voltage == VOLTAGE_INFO_0_85V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V); + return cnl_ddi_translations_edp_0_85V; + } else if (voltage == VOLTAGE_INFO_0_95V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V); + return cnl_ddi_translations_edp_0_95V; + } else if (voltage == VOLTAGE_INFO_1_05V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V); + return cnl_ddi_translations_edp_1_05V; + } else { + *n_entries = 1; /* shut up gcc */ + MISSING_CASE(voltage); + } + return NULL; + } else { + return cnl_get_buf_trans_dp(dev_priv, n_entries); + } +} + +static const struct cnl_ddi_buf_trans * +icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, + int type, int rate, int *n_entries) +{ + if (type == INTEL_OUTPUT_HDMI) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); + return icl_combo_phy_ddi_translations_hdmi; + } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); + return icl_combo_phy_ddi_translations_edp_hbr3; + } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); + return icl_combo_phy_ddi_translations_edp_hbr2; + } + + *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); + return icl_combo_phy_ddi_translations_dp_hbr2; +} + +static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) +{ + int n_entries, level, default_entry; + + level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; + + if (INTEL_GEN(dev_priv) >= 11) { + if (intel_port_is_combophy(dev_priv, port)) + icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI, + 0, &n_entries); + else + n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); + default_entry = n_entries - 1; + } else if (IS_CANNONLAKE(dev_priv)) { + cnl_get_buf_trans_hdmi(dev_priv, &n_entries); + default_entry = n_entries - 1; + } else if (IS_GEN9_LP(dev_priv)) { + bxt_get_buf_trans_hdmi(dev_priv, &n_entries); + default_entry = n_entries - 1; + } else if (IS_GEN9_BC(dev_priv)) { + intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries); + default_entry = 8; + } else if (IS_BROADWELL(dev_priv)) { + intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries); + default_entry = 7; + } else if (IS_HASWELL(dev_priv)) { + intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries); + default_entry = 6; + } else { + WARN(1, "ddi translation table missing\n"); + return 0; + } + + /* Choose a good default if VBT is badly populated */ + if (level == HDMI_LEVEL_SHIFT_UNKNOWN || level >= n_entries) + level = default_entry; + + if (WARN_ON_ONCE(n_entries == 0)) + return 0; + if (WARN_ON_ONCE(level >= n_entries)) + level = n_entries - 1; + + return level; +} + +/* + * Starting with Haswell, DDI port buffers must be programmed with correct + * values in advance. This function programs the correct values for + * DP/eDP/FDI use cases. + */ +static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 iboost_bit = 0; + int i, n_entries; + enum port port = encoder->port; + const struct ddi_buf_trans *ddi_translations; + + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) + ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv, + &n_entries); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) + ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port, + &n_entries); + else + ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, + &n_entries); + + /* If we're boosting the current, set bit 31 of trans1 */ + if (IS_GEN9_BC(dev_priv) && + dev_priv->vbt.ddi_port_info[port].dp_boost_level) + iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; + + for (i = 0; i < n_entries; i++) { + I915_WRITE(DDI_BUF_TRANS_LO(port, i), + ddi_translations[i].trans1 | iboost_bit); + I915_WRITE(DDI_BUF_TRANS_HI(port, i), + ddi_translations[i].trans2); + } +} + +/* + * Starting with Haswell, DDI port buffers must be programmed with correct + * values in advance. This function programs the correct values for + * HDMI/DVI use cases. + */ +static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder, + int level) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 iboost_bit = 0; + int n_entries; + enum port port = encoder->port; + const struct ddi_buf_trans *ddi_translations; + + ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries); + + if (WARN_ON_ONCE(!ddi_translations)) + return; + if (WARN_ON_ONCE(level >= n_entries)) + level = n_entries - 1; + + /* If we're boosting the current, set bit 31 of trans1 */ + if (IS_GEN9_BC(dev_priv) && + dev_priv->vbt.ddi_port_info[port].hdmi_boost_level) + iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; + + /* Entry 9 is for HDMI: */ + I915_WRITE(DDI_BUF_TRANS_LO(port, 9), + ddi_translations[level].trans1 | iboost_bit); + I915_WRITE(DDI_BUF_TRANS_HI(port, 9), + ddi_translations[level].trans2); +} + +static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, + enum port port) +{ + i915_reg_t reg = DDI_BUF_CTL(port); + int i; + + for (i = 0; i < 16; i++) { + udelay(1); + if (I915_READ(reg) & DDI_BUF_IS_IDLE) + return; + } + DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port)); +} + +static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) +{ + switch (pll->info->id) { + case DPLL_ID_WRPLL1: + return PORT_CLK_SEL_WRPLL1; + case DPLL_ID_WRPLL2: + return PORT_CLK_SEL_WRPLL2; + case DPLL_ID_SPLL: + return PORT_CLK_SEL_SPLL; + case DPLL_ID_LCPLL_810: + return PORT_CLK_SEL_LCPLL_810; + case DPLL_ID_LCPLL_1350: + return PORT_CLK_SEL_LCPLL_1350; + case DPLL_ID_LCPLL_2700: + return PORT_CLK_SEL_LCPLL_2700; + default: + MISSING_CASE(pll->info->id); + return PORT_CLK_SEL_NONE; + } +} + +static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; + int clock = crtc_state->port_clock; + const enum intel_dpll_id id = pll->info->id; + + switch (id) { + default: + /* + * DPLL_ID_ICL_DPLL0 and DPLL_ID_ICL_DPLL1 should not be used + * here, so do warn if this get passed in + */ + MISSING_CASE(id); + return DDI_CLK_SEL_NONE; + case DPLL_ID_ICL_TBTPLL: + switch (clock) { + case 162000: + return DDI_CLK_SEL_TBT_162; + case 270000: + return DDI_CLK_SEL_TBT_270; + case 540000: + return DDI_CLK_SEL_TBT_540; + case 810000: + return DDI_CLK_SEL_TBT_810; + default: + MISSING_CASE(clock); + return DDI_CLK_SEL_NONE; + } + case DPLL_ID_ICL_MGPLL1: + case DPLL_ID_ICL_MGPLL2: + case DPLL_ID_ICL_MGPLL3: + case DPLL_ID_ICL_MGPLL4: + return DDI_CLK_SEL_MG; + } +} + +/* Starting with Haswell, different DDI ports can work in FDI mode for + * connection to the PCH-located connectors. For this, it is necessary to train + * both the DDI port and PCH receiver for the desired DDI buffer settings. + * + * The recommended port to work in FDI mode is DDI E, which we use here. Also, + * please note that when FDI mode is active on DDI E, it shares 2 lines with + * DDI A (which is used for eDP) + */ + +void hsw_fdi_link_train(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_encoder *encoder; + u32 temp, i, rx_ctl_val, ddi_pll_sel; + + for_each_encoder_on_crtc(dev, &crtc->base, encoder) { + WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG); + intel_prepare_dp_ddi_buffers(encoder, crtc_state); + } + + /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the + * mode set "sequence for CRT port" document: + * - TP1 to TP2 time with the default value + * - FDI delay to 90h + * + * WaFDIAutoLinkSetTimingOverrride:hsw + */ + I915_WRITE(FDI_RX_MISC(PIPE_A), FDI_RX_PWRDN_LANE1_VAL(2) | + FDI_RX_PWRDN_LANE0_VAL(2) | + FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); + + /* Enable the PCH Receiver FDI PLL */ + rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | + FDI_RX_PLL_ENABLE | + FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); + I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val); + POSTING_READ(FDI_RX_CTL(PIPE_A)); + udelay(220); + + /* Switch from Rawclk to PCDclk */ + rx_ctl_val |= FDI_PCDCLK; + I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val); + + /* Configure Port Clock Select */ + ddi_pll_sel = hsw_pll_to_ddi_pll_sel(crtc_state->shared_dpll); + I915_WRITE(PORT_CLK_SEL(PORT_E), ddi_pll_sel); + WARN_ON(ddi_pll_sel != PORT_CLK_SEL_SPLL); + + /* Start the training iterating through available voltages and emphasis, + * testing each value twice. */ + for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) { + /* Configure DP_TP_CTL with auto-training */ + I915_WRITE(DP_TP_CTL(PORT_E), + DP_TP_CTL_FDI_AUTOTRAIN | + DP_TP_CTL_ENHANCED_FRAME_ENABLE | + DP_TP_CTL_LINK_TRAIN_PAT1 | + DP_TP_CTL_ENABLE); + + /* Configure and enable DDI_BUF_CTL for DDI E with next voltage. + * DDI E does not support port reversal, the functionality is + * achieved on the PCH side in FDI_RX_CTL, so no need to set the + * port reversal bit */ + I915_WRITE(DDI_BUF_CTL(PORT_E), + DDI_BUF_CTL_ENABLE | + ((crtc_state->fdi_lanes - 1) << 1) | + DDI_BUF_TRANS_SELECT(i / 2)); + POSTING_READ(DDI_BUF_CTL(PORT_E)); + + udelay(600); + + /* Program PCH FDI Receiver TU */ + I915_WRITE(FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64)); + + /* Enable PCH FDI Receiver with auto-training */ + rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO; + I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val); + POSTING_READ(FDI_RX_CTL(PIPE_A)); + + /* Wait for FDI receiver lane calibration */ + udelay(30); + + /* Unset FDI_RX_MISC pwrdn lanes */ + temp = I915_READ(FDI_RX_MISC(PIPE_A)); + temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); + I915_WRITE(FDI_RX_MISC(PIPE_A), temp); + POSTING_READ(FDI_RX_MISC(PIPE_A)); + + /* Wait for FDI auto training time */ + udelay(5); + + temp = I915_READ(DP_TP_STATUS(PORT_E)); + if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { + DRM_DEBUG_KMS("FDI link training done on step %d\n", i); + break; + } + + /* + * Leave things enabled even if we failed to train FDI. + * Results in less fireworks from the state checker. + */ + if (i == ARRAY_SIZE(hsw_ddi_translations_fdi) * 2 - 1) { + DRM_ERROR("FDI link training failed!\n"); + break; + } + + rx_ctl_val &= ~FDI_RX_ENABLE; + I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val); + POSTING_READ(FDI_RX_CTL(PIPE_A)); + + temp = I915_READ(DDI_BUF_CTL(PORT_E)); + temp &= ~DDI_BUF_CTL_ENABLE; + I915_WRITE(DDI_BUF_CTL(PORT_E), temp); + POSTING_READ(DDI_BUF_CTL(PORT_E)); + + /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ + temp = I915_READ(DP_TP_CTL(PORT_E)); + temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); + temp |= DP_TP_CTL_LINK_TRAIN_PAT1; + I915_WRITE(DP_TP_CTL(PORT_E), temp); + POSTING_READ(DP_TP_CTL(PORT_E)); + + intel_wait_ddi_buf_idle(dev_priv, PORT_E); + + /* Reset FDI_RX_MISC pwrdn lanes */ + temp = I915_READ(FDI_RX_MISC(PIPE_A)); + temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); + temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); + I915_WRITE(FDI_RX_MISC(PIPE_A), temp); + POSTING_READ(FDI_RX_MISC(PIPE_A)); + } + + /* Enable normal pixel sending for FDI */ + I915_WRITE(DP_TP_CTL(PORT_E), + DP_TP_CTL_FDI_AUTOTRAIN | + DP_TP_CTL_LINK_TRAIN_NORMAL | + DP_TP_CTL_ENHANCED_FRAME_ENABLE | + DP_TP_CTL_ENABLE); +} + +static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_digital_port *intel_dig_port = + enc_to_dig_port(&encoder->base); + + intel_dp->DP = intel_dig_port->saved_port_bits | + DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0); + intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); +} + +static struct intel_encoder * +intel_ddi_get_crtc_encoder(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct intel_encoder *encoder, *ret = NULL; + int num_encoders = 0; + + for_each_encoder_on_crtc(dev, &crtc->base, encoder) { + ret = encoder; + num_encoders++; + } + + if (num_encoders != 1) + WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders, + pipe_name(crtc->pipe)); + + BUG_ON(ret == NULL); + return ret; +} + +static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, + i915_reg_t reg) +{ + int refclk; + int n, p, r; + u32 wrpll; + + wrpll = I915_READ(reg); + switch (wrpll & WRPLL_REF_MASK) { + case WRPLL_REF_SPECIAL_HSW: + /* + * muxed-SSC for BDW. + * non-SSC for non-ULT HSW. Check FUSE_STRAP3 + * for the non-SSC reference frequency. + */ + if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) { + if (I915_READ(FUSE_STRAP3) & HSW_REF_CLK_SELECT) + refclk = 24; + else + refclk = 135; + break; + } + /* fall through */ + case WRPLL_REF_PCH_SSC: + /* + * We could calculate spread here, but our checking + * code only cares about 5% accuracy, and spread is a max of + * 0.5% downspread. + */ + refclk = 135; + break; + case WRPLL_REF_LCPLL: + refclk = 2700; + break; + default: + MISSING_CASE(wrpll); + return 0; + } + + r = wrpll & WRPLL_DIVIDER_REF_MASK; + p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT; + n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT; + + /* Convert to KHz, p & r have a fixed point portion */ + return (refclk * n * 100) / (p * r); +} + +static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state) +{ + u32 p0, p1, p2, dco_freq; + + p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK; + p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK; + + if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1)) + p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8; + else + p1 = 1; + + + switch (p0) { + case DPLL_CFGCR2_PDIV_1: + p0 = 1; + break; + case DPLL_CFGCR2_PDIV_2: + p0 = 2; + break; + case DPLL_CFGCR2_PDIV_3: + p0 = 3; + break; + case DPLL_CFGCR2_PDIV_7: + p0 = 7; + break; + } + + switch (p2) { + case DPLL_CFGCR2_KDIV_5: + p2 = 5; + break; + case DPLL_CFGCR2_KDIV_2: + p2 = 2; + break; + case DPLL_CFGCR2_KDIV_3: + p2 = 3; + break; + case DPLL_CFGCR2_KDIV_1: + p2 = 1; + break; + } + + dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) + * 24 * 1000; + + dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) + * 24 * 1000) / 0x8000; + + if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0)) + return 0; + + return dco_freq / (p0 * p1 * p2 * 5); +} + +int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, + struct intel_dpll_hw_state *pll_state) +{ + u32 p0, p1, p2, dco_freq, ref_clock; + + p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK; + p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK; + + if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) + p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> + DPLL_CFGCR1_QDIV_RATIO_SHIFT; + else + p1 = 1; + + + switch (p0) { + case DPLL_CFGCR1_PDIV_2: + p0 = 2; + break; + case DPLL_CFGCR1_PDIV_3: + p0 = 3; + break; + case DPLL_CFGCR1_PDIV_5: + p0 = 5; + break; + case DPLL_CFGCR1_PDIV_7: + p0 = 7; + break; + } + + switch (p2) { + case DPLL_CFGCR1_KDIV_1: + p2 = 1; + break; + case DPLL_CFGCR1_KDIV_2: + p2 = 2; + break; + case DPLL_CFGCR1_KDIV_3: + p2 = 3; + break; + } + + ref_clock = cnl_hdmi_pll_ref_clock(dev_priv); + + dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) + * ref_clock; + + dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> + DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000; + + if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0)) + return 0; + + return dco_freq / (p0 * p1 * p2 * 5); +} + +static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv, + enum port port) +{ + u32 val = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; + + switch (val) { + case DDI_CLK_SEL_NONE: + return 0; + case DDI_CLK_SEL_TBT_162: + return 162000; + case DDI_CLK_SEL_TBT_270: + return 270000; + case DDI_CLK_SEL_TBT_540: + return 540000; + case DDI_CLK_SEL_TBT_810: + return 810000; + default: + MISSING_CASE(val); + return 0; + } +} + +static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv, + const struct intel_dpll_hw_state *pll_state) +{ + u32 m1, m2_int, m2_frac, div1, div2, ref_clock; + u64 tmp; + + ref_clock = dev_priv->cdclk.hw.ref; + + m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK; + m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; + m2_frac = (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ? + (pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >> + MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0; + + switch (pll_state->mg_clktop2_hsclkctl & + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) { + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2: + div1 = 2; + break; + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3: + div1 = 3; + break; + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5: + div1 = 5; + break; + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7: + div1 = 7; + break; + default: + MISSING_CASE(pll_state->mg_clktop2_hsclkctl); + return 0; + } + + div2 = (pll_state->mg_clktop2_hsclkctl & + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >> + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT; + + /* div2 value of 0 is same as 1 means no div */ + if (div2 == 0) + div2 = 1; + + /* + * Adjust the original formula to delay the division by 2^22 in order to + * minimize possible rounding errors. + */ + tmp = (u64)m1 * m2_int * ref_clock + + (((u64)m1 * m2_frac * ref_clock) >> 22); + tmp = div_u64(tmp, 5 * div1 * div2); + + return tmp; +} + +static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) +{ + int dotclock; + + if (pipe_config->has_pch_encoder) + dotclock = intel_dotclock_calculate(pipe_config->port_clock, + &pipe_config->fdi_m_n); + else if (intel_crtc_has_dp_encoder(pipe_config)) + dotclock = intel_dotclock_calculate(pipe_config->port_clock, + &pipe_config->dp_m_n); + else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36) + dotclock = pipe_config->port_clock * 2 / 3; + else + dotclock = pipe_config->port_clock; + + if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && + !intel_crtc_has_dp_encoder(pipe_config)) + dotclock *= 2; + + if (pipe_config->pixel_multiplier) + dotclock /= pipe_config->pixel_multiplier; + + pipe_config->base.adjusted_mode.crtc_clock = dotclock; +} + +static void icl_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; + enum port port = encoder->port; + int link_clock; + + if (intel_port_is_combophy(dev_priv, port)) { + link_clock = cnl_calc_wrpll_link(dev_priv, pll_state); + } else { + enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv, + pipe_config->shared_dpll); + + if (pll_id == DPLL_ID_ICL_TBTPLL) + link_clock = icl_calc_tbt_pll_link(dev_priv, port); + else + link_clock = icl_calc_mg_pll_link(dev_priv, pll_state); + } + + pipe_config->port_clock = link_clock; + + ddi_dotclock_get(pipe_config); +} + +static void cnl_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; + int link_clock; + + if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { + link_clock = cnl_calc_wrpll_link(dev_priv, pll_state); + } else { + link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK; + + switch (link_clock) { + case DPLL_CFGCR0_LINK_RATE_810: + link_clock = 81000; + break; + case DPLL_CFGCR0_LINK_RATE_1080: + link_clock = 108000; + break; + case DPLL_CFGCR0_LINK_RATE_1350: + link_clock = 135000; + break; + case DPLL_CFGCR0_LINK_RATE_1620: + link_clock = 162000; + break; + case DPLL_CFGCR0_LINK_RATE_2160: + link_clock = 216000; + break; + case DPLL_CFGCR0_LINK_RATE_2700: + link_clock = 270000; + break; + case DPLL_CFGCR0_LINK_RATE_3240: + link_clock = 324000; + break; + case DPLL_CFGCR0_LINK_RATE_4050: + link_clock = 405000; + break; + default: + WARN(1, "Unsupported link rate\n"); + break; + } + link_clock *= 2; + } + + pipe_config->port_clock = link_clock; + + ddi_dotclock_get(pipe_config); +} + +static void skl_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; + int link_clock; + + /* + * ctrl1 register is already shifted for each pll, just use 0 to get + * the internal shift for each field + */ + if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) { + link_clock = skl_calc_wrpll_link(pll_state); + } else { + link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0); + link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0); + + switch (link_clock) { + case DPLL_CTRL1_LINK_RATE_810: + link_clock = 81000; + break; + case DPLL_CTRL1_LINK_RATE_1080: + link_clock = 108000; + break; + case DPLL_CTRL1_LINK_RATE_1350: + link_clock = 135000; + break; + case DPLL_CTRL1_LINK_RATE_1620: + link_clock = 162000; + break; + case DPLL_CTRL1_LINK_RATE_2160: + link_clock = 216000; + break; + case DPLL_CTRL1_LINK_RATE_2700: + link_clock = 270000; + break; + default: + WARN(1, "Unsupported link rate\n"); + break; + } + link_clock *= 2; + } + + pipe_config->port_clock = link_clock; + + ddi_dotclock_get(pipe_config); +} + +static void hsw_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + int link_clock = 0; + u32 val, pll; + + val = hsw_pll_to_ddi_pll_sel(pipe_config->shared_dpll); + switch (val & PORT_CLK_SEL_MASK) { + case PORT_CLK_SEL_LCPLL_810: + link_clock = 81000; + break; + case PORT_CLK_SEL_LCPLL_1350: + link_clock = 135000; + break; + case PORT_CLK_SEL_LCPLL_2700: + link_clock = 270000; + break; + case PORT_CLK_SEL_WRPLL1: + link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0)); + break; + case PORT_CLK_SEL_WRPLL2: + link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1)); + break; + case PORT_CLK_SEL_SPLL: + pll = I915_READ(SPLL_CTL) & SPLL_FREQ_MASK; + if (pll == SPLL_FREQ_810MHz) + link_clock = 81000; + else if (pll == SPLL_FREQ_1350MHz) + link_clock = 135000; + else if (pll == SPLL_FREQ_2700MHz) + link_clock = 270000; + else { + WARN(1, "bad spll freq\n"); + return; + } + break; + default: + WARN(1, "bad port clock sel\n"); + return; + } + + pipe_config->port_clock = link_clock * 2; + + ddi_dotclock_get(pipe_config); +} + +static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state) +{ + struct dpll clock; + + clock.m1 = 2; + clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22; + if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE) + clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK; + clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT; + clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT; + clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT; + + return chv_calc_dpll_params(100000, &clock); +} + +static void bxt_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + pipe_config->port_clock = + bxt_calc_pll_link(&pipe_config->dpll_hw_state); + + ddi_dotclock_get(pipe_config); +} + +static void intel_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (INTEL_GEN(dev_priv) >= 11) + icl_ddi_clock_get(encoder, pipe_config); + else if (IS_CANNONLAKE(dev_priv)) + cnl_ddi_clock_get(encoder, pipe_config); + else if (IS_GEN9_LP(dev_priv)) + bxt_ddi_clock_get(encoder, pipe_config); + else if (IS_GEN9_BC(dev_priv)) + skl_ddi_clock_get(encoder, pipe_config); + else if (INTEL_GEN(dev_priv) <= 8) + hsw_ddi_clock_get(encoder, pipe_config); +} + +void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 temp; + + if (!intel_crtc_has_dp_encoder(crtc_state)) + return; + + WARN_ON(transcoder_is_dsi(cpu_transcoder)); + + temp = TRANS_MSA_SYNC_CLK; + + if (crtc_state->limited_color_range) + temp |= TRANS_MSA_CEA_RANGE; + + switch (crtc_state->pipe_bpp) { + case 18: + temp |= TRANS_MSA_6_BPC; + break; + case 24: + temp |= TRANS_MSA_8_BPC; + break; + case 30: + temp |= TRANS_MSA_10_BPC; + break; + case 36: + temp |= TRANS_MSA_12_BPC; + break; + default: + MISSING_CASE(crtc_state->pipe_bpp); + break; + } + + /* + * As per DP 1.2 spec section 2.3.4.3 while sending + * YCBCR 444 signals we should program MSA MISC1/0 fields with + * colorspace information. The output colorspace encoding is BT601. + */ + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) + temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR; + /* + * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication + * of Color Encoding Format and Content Color Gamut] while sending + * YCBCR 420 signals we should program MSA MISC1 fields which + * indicate VSC SDP for the Pixel Encoding/Colorimetry Format. + */ + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) + temp |= TRANS_MSA_USE_VSC_SDP; + I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp); +} + +void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, + bool state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 temp; + + temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + if (state == true) + temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC; + else + temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC; + I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); +} + +void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + enum port port = encoder->port; + u32 temp; + + /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */ + temp = TRANS_DDI_FUNC_ENABLE; + temp |= TRANS_DDI_SELECT_PORT(port); + + switch (crtc_state->pipe_bpp) { + case 18: + temp |= TRANS_DDI_BPC_6; + break; + case 24: + temp |= TRANS_DDI_BPC_8; + break; + case 30: + temp |= TRANS_DDI_BPC_10; + break; + case 36: + temp |= TRANS_DDI_BPC_12; + break; + default: + BUG(); + } + + if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC) + temp |= TRANS_DDI_PVSYNC; + if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC) + temp |= TRANS_DDI_PHSYNC; + + if (cpu_transcoder == TRANSCODER_EDP) { + switch (pipe) { + case PIPE_A: + /* On Haswell, can only use the always-on power well for + * eDP when not using the panel fitter, and when not + * using motion blur mitigation (which we don't + * support). */ + if (crtc_state->pch_pfit.force_thru) + temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; + else + temp |= TRANS_DDI_EDP_INPUT_A_ON; + break; + case PIPE_B: + temp |= TRANS_DDI_EDP_INPUT_B_ONOFF; + break; + case PIPE_C: + temp |= TRANS_DDI_EDP_INPUT_C_ONOFF; + break; + default: + BUG(); + break; + } + } + + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { + if (crtc_state->has_hdmi_sink) + temp |= TRANS_DDI_MODE_SELECT_HDMI; + else + temp |= TRANS_DDI_MODE_SELECT_DVI; + + if (crtc_state->hdmi_scrambling) + temp |= TRANS_DDI_HDMI_SCRAMBLING; + if (crtc_state->hdmi_high_tmds_clock_ratio) + temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE; + } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { + temp |= TRANS_DDI_MODE_SELECT_FDI; + temp |= (crtc_state->fdi_lanes - 1) << 1; + } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { + temp |= TRANS_DDI_MODE_SELECT_DP_MST; + temp |= DDI_PORT_WIDTH(crtc_state->lane_count); + } else { + temp |= TRANS_DDI_MODE_SELECT_DP_SST; + temp |= DDI_PORT_WIDTH(crtc_state->lane_count); + } + + I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); +} + +void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); + u32 val = I915_READ(reg); + + val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); + val |= TRANS_DDI_PORT_NONE; + I915_WRITE(reg, val); + + if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { + DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n"); + /* Quirk time at 100ms for reliable operation */ + msleep(100); + } +} + +int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, + bool enable) +{ + struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + intel_wakeref_t wakeref; + enum pipe pipe = 0; + int ret = 0; + u32 tmp; + + wakeref = intel_display_power_get_if_enabled(dev_priv, + intel_encoder->power_domain); + if (WARN_ON(!wakeref)) + return -ENXIO; + + if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) { + ret = -EIO; + goto out; + } + + tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe)); + if (enable) + tmp |= TRANS_DDI_HDCP_SIGNALLING; + else + tmp &= ~TRANS_DDI_HDCP_SIGNALLING; + I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp); +out: + intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); + return ret; +} + +bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) +{ + struct drm_device *dev = intel_connector->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_encoder *encoder = intel_connector->encoder; + int type = intel_connector->base.connector_type; + enum port port = encoder->port; + enum transcoder cpu_transcoder; + intel_wakeref_t wakeref; + enum pipe pipe = 0; + u32 tmp; + bool ret; + + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) + return false; + + if (!encoder->get_hw_state(encoder, &pipe)) { + ret = false; + goto out; + } + + if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) + cpu_transcoder = TRANSCODER_EDP; + else + cpu_transcoder = (enum transcoder) pipe; + + tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + + switch (tmp & TRANS_DDI_MODE_SELECT_MASK) { + case TRANS_DDI_MODE_SELECT_HDMI: + case TRANS_DDI_MODE_SELECT_DVI: + ret = type == DRM_MODE_CONNECTOR_HDMIA; + break; + + case TRANS_DDI_MODE_SELECT_DP_SST: + ret = type == DRM_MODE_CONNECTOR_eDP || + type == DRM_MODE_CONNECTOR_DisplayPort; + break; + + case TRANS_DDI_MODE_SELECT_DP_MST: + /* if the transcoder is in MST state then + * connector isn't connected */ + ret = false; + break; + + case TRANS_DDI_MODE_SELECT_FDI: + ret = type == DRM_MODE_CONNECTOR_VGA; + break; + + default: + ret = false; + break; + } + +out: + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); + + return ret; +} + +static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, + u8 *pipe_mask, bool *is_dp_mst) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum port port = encoder->port; + intel_wakeref_t wakeref; + enum pipe p; + u32 tmp; + u8 mst_pipe_mask; + + *pipe_mask = 0; + *is_dp_mst = false; + + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) + return; + + tmp = I915_READ(DDI_BUF_CTL(port)); + if (!(tmp & DDI_BUF_CTL_ENABLE)) + goto out; + + if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) { + tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); + + switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { + default: + MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK); + /* fallthrough */ + case TRANS_DDI_EDP_INPUT_A_ON: + case TRANS_DDI_EDP_INPUT_A_ONOFF: + *pipe_mask = BIT(PIPE_A); + break; + case TRANS_DDI_EDP_INPUT_B_ONOFF: + *pipe_mask = BIT(PIPE_B); + break; + case TRANS_DDI_EDP_INPUT_C_ONOFF: + *pipe_mask = BIT(PIPE_C); + break; + } + + goto out; + } + + mst_pipe_mask = 0; + for_each_pipe(dev_priv, p) { + enum transcoder cpu_transcoder = (enum transcoder)p; + + tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + + if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port)) + continue; + + if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == + TRANS_DDI_MODE_SELECT_DP_MST) + mst_pipe_mask |= BIT(p); + + *pipe_mask |= BIT(p); + } + + if (!*pipe_mask) + DRM_DEBUG_KMS("No pipe for ddi port %c found\n", + port_name(port)); + + if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) { + DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n", + port_name(port), *pipe_mask); + *pipe_mask = BIT(ffs(*pipe_mask) - 1); + } + + if (mst_pipe_mask && mst_pipe_mask != *pipe_mask) + DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n", + port_name(port), *pipe_mask, mst_pipe_mask); + else + *is_dp_mst = mst_pipe_mask; + +out: + if (*pipe_mask && IS_GEN9_LP(dev_priv)) { + tmp = I915_READ(BXT_PHY_CTL(port)); + if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK | + BXT_PHY_LANE_POWERDOWN_ACK | + BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) + DRM_ERROR("Port %c enabled but PHY powered down? " + "(PHY_CTL %08x)\n", port_name(port), tmp); + } + + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); +} + +bool intel_ddi_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + u8 pipe_mask; + bool is_mst; + + intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst); + + if (is_mst || !pipe_mask) + return false; + + *pipe = ffs(pipe_mask) - 1; + + return true; +} + +static inline enum intel_display_power_domain +intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port) +{ + /* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with + * DC states enabled at the same time, while for driver initiated AUX + * transfers we need the same AUX IOs to be powered but with DC states + * disabled. Accordingly use the AUX power domain here which leaves DC + * states enabled. + * However, for non-A AUX ports the corresponding non-EDP transcoders + * would have already enabled power well 2 and DC_OFF. This means we can + * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a + * specific AUX_IO reference without powering up any extra wells. + * Note that PSR is enabled only on Port A even though this function + * returns the correct domain for other ports too. + */ + return dig_port->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A : + intel_aux_power_domain(dig_port); +} + +static void intel_ddi_get_power_domains(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port; + + /* + * TODO: Add support for MST encoders. Atm, the following should never + * happen since fake-MST encoders don't set their get_power_domains() + * hook. + */ + if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))) + return; + + dig_port = enc_to_dig_port(&encoder->base); + intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); + + /* + * AUX power is only needed for (e)DP mode, and for HDMI mode on TC + * ports. + */ + if (intel_crtc_has_dp_encoder(crtc_state) || + intel_port_is_tc(dev_priv, encoder->port)) + intel_display_power_get(dev_priv, + intel_ddi_main_link_aux_domain(dig_port)); + + /* + * VDSC power is needed when DSC is enabled + */ + if (crtc_state->dsc_params.compression_enable) + intel_display_power_get(dev_priv, + intel_dsc_power_domain(crtc_state)); +} + +void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc); + enum port port = encoder->port; + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (cpu_transcoder != TRANSCODER_EDP) + I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), + TRANS_CLK_SEL_PORT(port)); +} + +void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (cpu_transcoder != TRANSCODER_EDP) + I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), + TRANS_CLK_SEL_DISABLED); +} + +static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, + enum port port, u8 iboost) +{ + u32 tmp; + + tmp = I915_READ(DISPIO_CR_TX_BMU_CR0); + tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port)); + if (iboost) + tmp |= iboost << BALANCE_LEG_SHIFT(port); + else + tmp |= BALANCE_LEG_DISABLE(port); + I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp); +} + +static void skl_ddi_set_iboost(struct intel_encoder *encoder, + int level, enum intel_output_type type) +{ + struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + u8 iboost; + + if (type == INTEL_OUTPUT_HDMI) + iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level; + else + iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level; + + if (iboost == 0) { + const struct ddi_buf_trans *ddi_translations; + int n_entries; + + if (type == INTEL_OUTPUT_HDMI) + ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries); + else if (type == INTEL_OUTPUT_EDP) + ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries); + else + ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries); + + if (WARN_ON_ONCE(!ddi_translations)) + return; + if (WARN_ON_ONCE(level >= n_entries)) + level = n_entries - 1; + + iboost = ddi_translations[level].i_boost; + } + + /* Make sure that the requested I_boost is valid */ + if (iboost && iboost != 0x1 && iboost != 0x3 && iboost != 0x7) { + DRM_ERROR("Invalid I_boost value %u\n", iboost); + return; + } + + _skl_ddi_set_iboost(dev_priv, port, iboost); + + if (port == PORT_A && intel_dig_port->max_lanes == 4) + _skl_ddi_set_iboost(dev_priv, PORT_E, iboost); +} + +static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, + int level, enum intel_output_type type) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + const struct bxt_ddi_buf_trans *ddi_translations; + enum port port = encoder->port; + int n_entries; + + if (type == INTEL_OUTPUT_HDMI) + ddi_translations = bxt_get_buf_trans_hdmi(dev_priv, &n_entries); + else if (type == INTEL_OUTPUT_EDP) + ddi_translations = bxt_get_buf_trans_edp(dev_priv, &n_entries); + else + ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries); + + if (WARN_ON_ONCE(!ddi_translations)) + return; + if (WARN_ON_ONCE(level >= n_entries)) + level = n_entries - 1; + + bxt_ddi_phy_set_signal_level(dev_priv, port, + ddi_translations[level].margin, + ddi_translations[level].scale, + ddi_translations[level].enable, + ddi_translations[level].deemphasis); +} + +u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + enum port port = encoder->port; + int n_entries; + + if (INTEL_GEN(dev_priv) >= 11) { + if (intel_port_is_combophy(dev_priv, port)) + icl_get_combo_buf_trans(dev_priv, port, encoder->type, + intel_dp->link_rate, &n_entries); + else + n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); + } else if (IS_CANNONLAKE(dev_priv)) { + if (encoder->type == INTEL_OUTPUT_EDP) + cnl_get_buf_trans_edp(dev_priv, &n_entries); + else + cnl_get_buf_trans_dp(dev_priv, &n_entries); + } else if (IS_GEN9_LP(dev_priv)) { + if (encoder->type == INTEL_OUTPUT_EDP) + bxt_get_buf_trans_edp(dev_priv, &n_entries); + else + bxt_get_buf_trans_dp(dev_priv, &n_entries); + } else { + if (encoder->type == INTEL_OUTPUT_EDP) + intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries); + else + intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries); + } + + if (WARN_ON(n_entries < 1)) + n_entries = 1; + if (WARN_ON(n_entries > ARRAY_SIZE(index_to_dp_signal_levels))) + n_entries = ARRAY_SIZE(index_to_dp_signal_levels); + + return index_to_dp_signal_levels[n_entries - 1] & + DP_TRAIN_VOLTAGE_SWING_MASK; +} + +/* + * We assume that the full set of pre-emphasis values can be + * used on all DDI platforms. Should that change we need to + * rethink this code. + */ +u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing) +{ + switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + return DP_TRAIN_PRE_EMPH_LEVEL_3; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + return DP_TRAIN_PRE_EMPH_LEVEL_2; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + return DP_TRAIN_PRE_EMPH_LEVEL_1; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + default: + return DP_TRAIN_PRE_EMPH_LEVEL_0; + } +} + +static void cnl_ddi_vswing_program(struct intel_encoder *encoder, + int level, enum intel_output_type type) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + const struct cnl_ddi_buf_trans *ddi_translations; + enum port port = encoder->port; + int n_entries, ln; + u32 val; + + if (type == INTEL_OUTPUT_HDMI) + ddi_translations = cnl_get_buf_trans_hdmi(dev_priv, &n_entries); + else if (type == INTEL_OUTPUT_EDP) + ddi_translations = cnl_get_buf_trans_edp(dev_priv, &n_entries); + else + ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries); + + if (WARN_ON_ONCE(!ddi_translations)) + return; + if (WARN_ON_ONCE(level >= n_entries)) + level = n_entries - 1; + + /* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */ + val = I915_READ(CNL_PORT_TX_DW5_LN0(port)); + val &= ~SCALING_MODE_SEL_MASK; + val |= SCALING_MODE_SEL(2); + I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val); + + /* Program PORT_TX_DW2 */ + val = I915_READ(CNL_PORT_TX_DW2_LN0(port)); + val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | + RCOMP_SCALAR_MASK); + val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); + val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); + /* Rcomp scalar is fixed as 0x98 for every table entry */ + val |= RCOMP_SCALAR(0x98); + I915_WRITE(CNL_PORT_TX_DW2_GRP(port), val); + + /* Program PORT_TX_DW4 */ + /* We cannot write to GRP. It would overrite individual loadgen */ + for (ln = 0; ln < 4; ln++) { + val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port)); + val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | + CURSOR_COEFF_MASK); + val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); + val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); + val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); + I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val); + } + + /* Program PORT_TX_DW5 */ + /* All DW5 values are fixed for every table entry */ + val = I915_READ(CNL_PORT_TX_DW5_LN0(port)); + val &= ~RTERM_SELECT_MASK; + val |= RTERM_SELECT(6); + val |= TAP3_DISABLE; + I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val); + + /* Program PORT_TX_DW7 */ + val = I915_READ(CNL_PORT_TX_DW7_LN0(port)); + val &= ~N_SCALAR_MASK; + val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); + I915_WRITE(CNL_PORT_TX_DW7_GRP(port), val); +} + +static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, + int level, enum intel_output_type type) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + int width, rate, ln; + u32 val; + + if (type == INTEL_OUTPUT_HDMI) { + width = 4; + rate = 0; /* Rate is always < than 6GHz for HDMI */ + } else { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + width = intel_dp->lane_count; + rate = intel_dp->link_rate; + } + + /* + * 1. If port type is eDP or DP, + * set PORT_PCS_DW1 cmnkeeper_enable to 1b, + * else clear to 0b. + */ + val = I915_READ(CNL_PORT_PCS_DW1_LN0(port)); + if (type != INTEL_OUTPUT_HDMI) + val |= COMMON_KEEPER_EN; + else + val &= ~COMMON_KEEPER_EN; + I915_WRITE(CNL_PORT_PCS_DW1_GRP(port), val); + + /* 2. Program loadgen select */ + /* + * Program PORT_TX_DW4_LN depending on Bit rate and used lanes + * <= 6 GHz and 4 lanes (LN0=0, LN1=1, LN2=1, LN3=1) + * <= 6 GHz and 1,2 lanes (LN0=0, LN1=1, LN2=1, LN3=0) + * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) + */ + for (ln = 0; ln <= 3; ln++) { + val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port)); + val &= ~LOADGEN_SELECT; + + if ((rate <= 600000 && width == 4 && ln >= 1) || + (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { + val |= LOADGEN_SELECT; + } + I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val); + } + + /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ + val = I915_READ(CNL_PORT_CL1CM_DW5); + val |= SUS_CLOCK_CONFIG; + I915_WRITE(CNL_PORT_CL1CM_DW5, val); + + /* 4. Clear training enable to change swing values */ + val = I915_READ(CNL_PORT_TX_DW5_LN0(port)); + val &= ~TX_TRAINING_EN; + I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val); + + /* 5. Program swing and de-emphasis */ + cnl_ddi_vswing_program(encoder, level, type); + + /* 6. Set training enable to trigger update */ + val = I915_READ(CNL_PORT_TX_DW5_LN0(port)); + val |= TX_TRAINING_EN; + I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val); +} + +static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, + u32 level, enum port port, int type, + int rate) +{ + const struct cnl_ddi_buf_trans *ddi_translations = NULL; + u32 n_entries, val; + int ln; + + ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, + rate, &n_entries); + if (!ddi_translations) + return; + + if (level >= n_entries) { + DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1); + level = n_entries - 1; + } + + /* Set PORT_TX_DW5 */ + val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | + TAP2_DISABLE | TAP3_DISABLE); + val |= SCALING_MODE_SEL(0x2); + val |= RTERM_SELECT(0x6); + val |= TAP3_DISABLE; + I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); + + /* Program PORT_TX_DW2 */ + val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); + val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | + RCOMP_SCALAR_MASK); + val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); + val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); + /* Program Rcomp scalar for every table entry */ + val |= RCOMP_SCALAR(0x98); + I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); + + /* Program PORT_TX_DW4 */ + /* We cannot write to GRP. It would overwrite individual loadgen. */ + for (ln = 0; ln <= 3; ln++) { + val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port)); + val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | + CURSOR_COEFF_MASK); + val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); + val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); + val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); + I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val); + } + + /* Program PORT_TX_DW7 */ + val = I915_READ(ICL_PORT_TX_DW7_LN0(port)); + val &= ~N_SCALAR_MASK; + val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); + I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val); +} + +static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, + u32 level, + enum intel_output_type type) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + int width = 0; + int rate = 0; + u32 val; + int ln = 0; + + if (type == INTEL_OUTPUT_HDMI) { + width = 4; + /* Rate is always < than 6GHz for HDMI */ + } else { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + width = intel_dp->lane_count; + rate = intel_dp->link_rate; + } + + /* + * 1. If port type is eDP or DP, + * set PORT_PCS_DW1 cmnkeeper_enable to 1b, + * else clear to 0b. + */ + val = I915_READ(ICL_PORT_PCS_DW1_LN0(port)); + if (type == INTEL_OUTPUT_HDMI) + val &= ~COMMON_KEEPER_EN; + else + val |= COMMON_KEEPER_EN; + I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), val); + + /* 2. Program loadgen select */ + /* + * Program PORT_TX_DW4_LN depending on Bit rate and used lanes + * <= 6 GHz and 4 lanes (LN0=0, LN1=1, LN2=1, LN3=1) + * <= 6 GHz and 1,2 lanes (LN0=0, LN1=1, LN2=1, LN3=0) + * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) + */ + for (ln = 0; ln <= 3; ln++) { + val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port)); + val &= ~LOADGEN_SELECT; + + if ((rate <= 600000 && width == 4 && ln >= 1) || + (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { + val |= LOADGEN_SELECT; + } + I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val); + } + + /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ + val = I915_READ(ICL_PORT_CL_DW5(port)); + val |= SUS_CLOCK_CONFIG; + I915_WRITE(ICL_PORT_CL_DW5(port), val); + + /* 4. Clear training enable to change swing values */ + val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + val &= ~TX_TRAINING_EN; + I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); + + /* 5. Program swing and de-emphasis */ + icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate); + + /* 6. Set training enable to trigger update */ + val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + val |= TX_TRAINING_EN; + I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); +} + +static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, + int link_clock, + u32 level) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + const struct icl_mg_phy_ddi_buf_trans *ddi_translations; + u32 n_entries, val; + int ln; + + n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); + ddi_translations = icl_mg_phy_ddi_translations; + /* The table does not have values for level 3 and level 9. */ + if (level >= n_entries || level == 3 || level == 9) { + DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", + level, n_entries - 2); + level = n_entries - 2; + } + + /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_TX1_LINK_PARAMS(ln, port)); + val &= ~CRI_USE_FS32; + I915_WRITE(MG_TX1_LINK_PARAMS(ln, port), val); + + val = I915_READ(MG_TX2_LINK_PARAMS(ln, port)); + val &= ~CRI_USE_FS32; + I915_WRITE(MG_TX2_LINK_PARAMS(ln, port), val); + } + + /* Program MG_TX_SWINGCTRL with values from vswing table */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_TX1_SWINGCTRL(ln, port)); + val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; + val |= CRI_TXDEEMPH_OVERRIDE_17_12( + ddi_translations[level].cri_txdeemph_override_17_12); + I915_WRITE(MG_TX1_SWINGCTRL(ln, port), val); + + val = I915_READ(MG_TX2_SWINGCTRL(ln, port)); + val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; + val |= CRI_TXDEEMPH_OVERRIDE_17_12( + ddi_translations[level].cri_txdeemph_override_17_12); + I915_WRITE(MG_TX2_SWINGCTRL(ln, port), val); + } + + /* Program MG_TX_DRVCTRL with values from vswing table */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_TX1_DRVCTRL(ln, port)); + val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | + CRI_TXDEEMPH_OVERRIDE_5_0_MASK); + val |= CRI_TXDEEMPH_OVERRIDE_5_0( + ddi_translations[level].cri_txdeemph_override_5_0) | + CRI_TXDEEMPH_OVERRIDE_11_6( + ddi_translations[level].cri_txdeemph_override_11_6) | + CRI_TXDEEMPH_OVERRIDE_EN; + I915_WRITE(MG_TX1_DRVCTRL(ln, port), val); + + val = I915_READ(MG_TX2_DRVCTRL(ln, port)); + val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | + CRI_TXDEEMPH_OVERRIDE_5_0_MASK); + val |= CRI_TXDEEMPH_OVERRIDE_5_0( + ddi_translations[level].cri_txdeemph_override_5_0) | + CRI_TXDEEMPH_OVERRIDE_11_6( + ddi_translations[level].cri_txdeemph_override_11_6) | + CRI_TXDEEMPH_OVERRIDE_EN; + I915_WRITE(MG_TX2_DRVCTRL(ln, port), val); + + /* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */ + } + + /* + * Program MG_CLKHUB with value from frequency table + * In case of Legacy mode on MG PHY, both TX1 and TX2 enabled so use the + * values from table for which TX1 and TX2 enabled. + */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_CLKHUB(ln, port)); + if (link_clock < 300000) + val |= CFG_LOW_RATE_LKREN_EN; + else + val &= ~CFG_LOW_RATE_LKREN_EN; + I915_WRITE(MG_CLKHUB(ln, port), val); + } + + /* Program the MG_TX_DCC based on the link frequency */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_TX1_DCC(ln, port)); + val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; + if (link_clock <= 500000) { + val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; + } else { + val |= CFG_AMI_CK_DIV_OVERRIDE_EN | + CFG_AMI_CK_DIV_OVERRIDE_VAL(1); + } + I915_WRITE(MG_TX1_DCC(ln, port), val); + + val = I915_READ(MG_TX2_DCC(ln, port)); + val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; + if (link_clock <= 500000) { + val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; + } else { + val |= CFG_AMI_CK_DIV_OVERRIDE_EN | + CFG_AMI_CK_DIV_OVERRIDE_VAL(1); + } + I915_WRITE(MG_TX2_DCC(ln, port), val); + } + + /* Program MG_TX_PISO_READLOAD with values from vswing table */ + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_TX1_PISO_READLOAD(ln, port)); + val |= CRI_CALCINIT; + I915_WRITE(MG_TX1_PISO_READLOAD(ln, port), val); + + val = I915_READ(MG_TX2_PISO_READLOAD(ln, port)); + val |= CRI_CALCINIT; + I915_WRITE(MG_TX2_PISO_READLOAD(ln, port), val); + } +} + +static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, + int link_clock, + u32 level, + enum intel_output_type type) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + + if (intel_port_is_combophy(dev_priv, port)) + icl_combo_phy_ddi_vswing_sequence(encoder, level, type); + else + icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level); +} + +static u32 translate_signal_level(int signal_levels) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) { + if (index_to_dp_signal_levels[i] == signal_levels) + return i; + } + + WARN(1, "Unsupported voltage swing/pre-emphasis level: 0x%x\n", + signal_levels); + + return 0; +} + +static u32 intel_ddi_dp_level(struct intel_dp *intel_dp) +{ + u8 train_set = intel_dp->train_set[0]; + int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | + DP_TRAIN_PRE_EMPHASIS_MASK); + + return translate_signal_level(signal_levels); +} + +u32 bxt_signal_levels(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); + struct intel_encoder *encoder = &dport->base; + int level = intel_ddi_dp_level(intel_dp); + + if (INTEL_GEN(dev_priv) >= 11) + icl_ddi_vswing_sequence(encoder, intel_dp->link_rate, + level, encoder->type); + else if (IS_CANNONLAKE(dev_priv)) + cnl_ddi_vswing_sequence(encoder, level, encoder->type); + else + bxt_ddi_vswing_sequence(encoder, level, encoder->type); + + return 0; +} + +u32 ddi_signal_levels(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); + struct intel_encoder *encoder = &dport->base; + int level = intel_ddi_dp_level(intel_dp); + + if (IS_GEN9_BC(dev_priv)) + skl_ddi_set_iboost(encoder, level, encoder->type); + + return DDI_BUF_TRANS_SELECT(level); +} + +static inline +u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv, + enum port port) +{ + if (intel_port_is_combophy(dev_priv, port)) { + return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port); + } else if (intel_port_is_tc(dev_priv, port)) { + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + + return ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port); + } + + return 0; +} + +static void icl_map_plls_to_ports(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_shared_dpll *pll = crtc_state->shared_dpll; + enum port port = encoder->port; + u32 val; + + mutex_lock(&dev_priv->dpll_lock); + + val = I915_READ(DPCLKA_CFGCR0_ICL); + WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0); + + if (intel_port_is_combophy(dev_priv, port)) { + val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); + val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); + I915_WRITE(DPCLKA_CFGCR0_ICL, val); + POSTING_READ(DPCLKA_CFGCR0_ICL); + } + + val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port); + I915_WRITE(DPCLKA_CFGCR0_ICL, val); + + mutex_unlock(&dev_priv->dpll_lock); +} + +static void icl_unmap_plls_to_ports(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + u32 val; + + mutex_lock(&dev_priv->dpll_lock); + + val = I915_READ(DPCLKA_CFGCR0_ICL); + val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port); + I915_WRITE(DPCLKA_CFGCR0_ICL, val); + + mutex_unlock(&dev_priv->dpll_lock); +} + +void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 val; + enum port port; + u32 port_mask; + bool ddi_clk_needed; + + /* + * In case of DP MST, we sanitize the primary encoder only, not the + * virtual ones. + */ + if (encoder->type == INTEL_OUTPUT_DP_MST) + return; + + if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) { + u8 pipe_mask; + bool is_mst; + + intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst); + /* + * In the unlikely case that BIOS enables DP in MST mode, just + * warn since our MST HW readout is incomplete. + */ + if (WARN_ON(is_mst)) + return; + } + + port_mask = BIT(encoder->port); + ddi_clk_needed = encoder->base.crtc; + + if (encoder->type == INTEL_OUTPUT_DSI) { + struct intel_encoder *other_encoder; + + port_mask = intel_dsi_encoder_ports(encoder); + /* + * Sanity check that we haven't incorrectly registered another + * encoder using any of the ports of this DSI encoder. + */ + for_each_intel_encoder(&dev_priv->drm, other_encoder) { + if (other_encoder == encoder) + continue; + + if (WARN_ON(port_mask & BIT(other_encoder->port))) + return; + } + /* + * For DSI we keep the ddi clocks gated + * except during enable/disable sequence. + */ + ddi_clk_needed = false; + } + + val = I915_READ(DPCLKA_CFGCR0_ICL); + for_each_port_masked(port, port_mask) { + bool ddi_clk_ungated = !(val & + icl_dpclka_cfgcr0_clk_off(dev_priv, + port)); + + if (ddi_clk_needed == ddi_clk_ungated) + continue; + + /* + * Punt on the case now where clock is gated, but it would + * be needed by the port. Something else is really broken then. + */ + if (WARN_ON(ddi_clk_needed)) + continue; + + DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n", + port_name(port)); + val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port); + I915_WRITE(DPCLKA_CFGCR0_ICL, val); + } +} + +static void intel_ddi_clk_select(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + u32 val; + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; + + if (WARN_ON(!pll)) + return; + + mutex_lock(&dev_priv->dpll_lock); + + if (INTEL_GEN(dev_priv) >= 11) { + if (!intel_port_is_combophy(dev_priv, port)) + I915_WRITE(DDI_CLK_SEL(port), + icl_pll_to_ddi_clk_sel(encoder, crtc_state)); + } else if (IS_CANNONLAKE(dev_priv)) { + /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ + val = I915_READ(DPCLKA_CFGCR0); + val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); + val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); + I915_WRITE(DPCLKA_CFGCR0, val); + + /* + * Configure DPCLKA_CFGCR0 to turn on the clock for the DDI. + * This step and the step before must be done with separate + * register writes. + */ + val = I915_READ(DPCLKA_CFGCR0); + val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); + I915_WRITE(DPCLKA_CFGCR0, val); + } else if (IS_GEN9_BC(dev_priv)) { + /* DDI -> PLL mapping */ + val = I915_READ(DPLL_CTRL2); + + val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) | + DPLL_CTRL2_DDI_CLK_SEL_MASK(port)); + val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) | + DPLL_CTRL2_DDI_SEL_OVERRIDE(port)); + + I915_WRITE(DPLL_CTRL2, val); + + } else if (INTEL_GEN(dev_priv) < 9) { + I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll)); + } + + mutex_unlock(&dev_priv->dpll_lock); +} + +static void intel_ddi_clk_disable(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + + if (INTEL_GEN(dev_priv) >= 11) { + if (!intel_port_is_combophy(dev_priv, port)) + I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); + } else if (IS_CANNONLAKE(dev_priv)) { + I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) | + DPCLKA_CFGCR0_DDI_CLK_OFF(port)); + } else if (IS_GEN9_BC(dev_priv)) { + I915_WRITE(DPLL_CTRL2, I915_READ(DPLL_CTRL2) | + DPLL_CTRL2_DDI_CLK_OFF(port)); + } else if (INTEL_GEN(dev_priv) < 9) { + I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); + } +} + +static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum port port = dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + u32 val; + int ln; + + if (tc_port == PORT_TC_NONE) + return; + + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_DP_MODE(ln, port)); + val |= MG_DP_MODE_CFG_TR2PWR_GATING | + MG_DP_MODE_CFG_TRPWR_GATING | + MG_DP_MODE_CFG_CLNPWR_GATING | + MG_DP_MODE_CFG_DIGPWR_GATING | + MG_DP_MODE_CFG_GAONPWR_GATING; + I915_WRITE(MG_DP_MODE(ln, port), val); + } + + val = I915_READ(MG_MISC_SUS0(tc_port)); + val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) | + MG_MISC_SUS0_CFG_TR2PWR_GATING | + MG_MISC_SUS0_CFG_CL2PWR_GATING | + MG_MISC_SUS0_CFG_GAONPWR_GATING | + MG_MISC_SUS0_CFG_TRPWR_GATING | + MG_MISC_SUS0_CFG_CL1PWR_GATING | + MG_MISC_SUS0_CFG_DGPWR_GATING; + I915_WRITE(MG_MISC_SUS0(tc_port), val); +} + +static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum port port = dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + u32 val; + int ln; + + if (tc_port == PORT_TC_NONE) + return; + + for (ln = 0; ln < 2; ln++) { + val = I915_READ(MG_DP_MODE(ln, port)); + val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING | + MG_DP_MODE_CFG_TRPWR_GATING | + MG_DP_MODE_CFG_CLNPWR_GATING | + MG_DP_MODE_CFG_DIGPWR_GATING | + MG_DP_MODE_CFG_GAONPWR_GATING); + I915_WRITE(MG_DP_MODE(ln, port), val); + } + + val = I915_READ(MG_MISC_SUS0(tc_port)); + val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK | + MG_MISC_SUS0_CFG_TR2PWR_GATING | + MG_MISC_SUS0_CFG_CL2PWR_GATING | + MG_MISC_SUS0_CFG_GAONPWR_GATING | + MG_MISC_SUS0_CFG_TRPWR_GATING | + MG_MISC_SUS0_CFG_CL1PWR_GATING | + MG_MISC_SUS0_CFG_DGPWR_GATING); + I915_WRITE(MG_MISC_SUS0(tc_port), val); +} + +static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); + enum port port = intel_dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + u32 ln0, ln1, lane_info; + + if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT) + return; + + ln0 = I915_READ(MG_DP_MODE(0, port)); + ln1 = I915_READ(MG_DP_MODE(1, port)); + + switch (intel_dig_port->tc_type) { + case TC_PORT_TYPEC: + ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); + ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); + + lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & + DP_LANE_ASSIGNMENT_MASK(tc_port)) >> + DP_LANE_ASSIGNMENT_SHIFT(tc_port); + + switch (lane_info) { + case 0x1: + case 0x4: + break; + case 0x2: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE; + break; + case 0x3: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + break; + case 0x8: + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; + break; + case 0xC: + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + break; + case 0xF: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | + MG_DP_MODE_CFG_DP_X2_MODE; + break; + default: + MISSING_CASE(lane_info); + } + break; + + case TC_PORT_LEGACY: + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; + break; + + default: + MISSING_CASE(intel_dig_port->tc_type); + return; + } + + I915_WRITE(MG_DP_MODE(0, port), ln0); + I915_WRITE(MG_DP_MODE(1, port), ln1); +} + +static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->fec_enable) + return; + + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0) + DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n"); +} + +static void intel_ddi_enable_fec(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + u32 val; + + if (!crtc_state->fec_enable) + return; + + val = I915_READ(DP_TP_CTL(port)); + val |= DP_TP_CTL_FEC_ENABLE; + I915_WRITE(DP_TP_CTL(port), val); + + if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port), + DP_TP_STATUS_FEC_ENABLE_LIVE, + DP_TP_STATUS_FEC_ENABLE_LIVE, + 1)) + DRM_ERROR("Timed out waiting for FEC Enable Status\n"); +} + +static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + u32 val; + + if (!crtc_state->fec_enable) + return; + + val = I915_READ(DP_TP_CTL(port)); + val &= ~DP_TP_CTL_FEC_ENABLE; + I915_WRITE(DP_TP_CTL(port), val); + POSTING_READ(DP_TP_CTL(port)); +} + +static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); + int level = intel_ddi_dp_level(intel_dp); + + WARN_ON(is_mst && (port == PORT_A || port == PORT_E)); + + intel_dp_set_link_params(intel_dp, crtc_state->port_clock, + crtc_state->lane_count, is_mst); + + intel_edp_panel_on(intel_dp); + + intel_ddi_clk_select(encoder, crtc_state); + + intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); + + icl_program_mg_dp_mode(dig_port); + icl_disable_phy_clock_gating(dig_port); + + if (INTEL_GEN(dev_priv) >= 11) + icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, + level, encoder->type); + else if (IS_CANNONLAKE(dev_priv)) + cnl_ddi_vswing_sequence(encoder, level, encoder->type); + else if (IS_GEN9_LP(dev_priv)) + bxt_ddi_vswing_sequence(encoder, level, encoder->type); + else + intel_prepare_dp_ddi_buffers(encoder, crtc_state); + + if (intel_port_is_combophy(dev_priv, port)) { + bool lane_reversal = + dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; + + intel_combo_phy_power_up_lanes(dev_priv, port, false, + crtc_state->lane_count, + lane_reversal); + } + + intel_ddi_init_dp_buf_reg(encoder); + if (!is_mst) + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + intel_dp_sink_set_decompression_state(intel_dp, crtc_state, + true); + intel_dp_sink_set_fec_ready(intel_dp, crtc_state); + intel_dp_start_link_train(intel_dp); + if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) + intel_dp_stop_link_train(intel_dp); + + intel_ddi_enable_fec(encoder, crtc_state); + + icl_enable_phy_clock_gating(dig_port); + + if (!is_mst) + intel_ddi_enable_pipe_clock(crtc_state); + + intel_dsc_enable(encoder, crtc_state); +} + +static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + int level = intel_ddi_hdmi_level(dev_priv, port); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + + intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); + intel_ddi_clk_select(encoder, crtc_state); + + intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); + + icl_program_mg_dp_mode(dig_port); + icl_disable_phy_clock_gating(dig_port); + + if (INTEL_GEN(dev_priv) >= 11) + icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, + level, INTEL_OUTPUT_HDMI); + else if (IS_CANNONLAKE(dev_priv)) + cnl_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI); + else if (IS_GEN9_LP(dev_priv)) + bxt_ddi_vswing_sequence(encoder, level, INTEL_OUTPUT_HDMI); + else + intel_prepare_hdmi_ddi_buffers(encoder, level); + + icl_enable_phy_clock_gating(dig_port); + + if (IS_GEN9_BC(dev_priv)) + skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI); + + intel_ddi_enable_pipe_clock(crtc_state); + + intel_dig_port->set_infoframes(encoder, + crtc_state->has_infoframe, + crtc_state, conn_state); +} + +static void intel_ddi_pre_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + /* + * When called from DP MST code: + * - conn_state will be NULL + * - encoder will be the main encoder (ie. mst->primary) + * - the main connector associated with this port + * won't be active or linked to a crtc + * - crtc_state will be the state of the first stream to + * be activated on this port, and it may not be the same + * stream that will be deactivated last, but each stream + * should have a state that is identical when it comes to + * the DP link parameteres + */ + + WARN_ON(crtc_state->has_pch_encoder); + + if (INTEL_GEN(dev_priv) >= 11) + icl_map_plls_to_ports(encoder, crtc_state); + + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); + + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { + intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state); + } else { + struct intel_lspcon *lspcon = + enc_to_intel_lspcon(&encoder->base); + + intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state); + if (lspcon->active) { + struct intel_digital_port *dig_port = + enc_to_dig_port(&encoder->base); + + dig_port->set_infoframes(encoder, + crtc_state->has_infoframe, + crtc_state, conn_state); + } + } +} + +static void intel_disable_ddi_buf(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + bool wait = false; + u32 val; + + val = I915_READ(DDI_BUF_CTL(port)); + if (val & DDI_BUF_CTL_ENABLE) { + val &= ~DDI_BUF_CTL_ENABLE; + I915_WRITE(DDI_BUF_CTL(port), val); + wait = true; + } + + val = I915_READ(DP_TP_CTL(port)); + val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); + val |= DP_TP_CTL_LINK_TRAIN_PAT1; + I915_WRITE(DP_TP_CTL(port), val); + + /* Disable FEC in DP Sink */ + intel_ddi_disable_fec_state(encoder, crtc_state); + + if (wait) + intel_wait_ddi_buf_idle(dev_priv, port); +} + +static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_dp *intel_dp = &dig_port->dp; + bool is_mst = intel_crtc_has_type(old_crtc_state, + INTEL_OUTPUT_DP_MST); + + if (!is_mst) { + intel_ddi_disable_pipe_clock(old_crtc_state); + /* + * Power down sink before disabling the port, otherwise we end + * up getting interrupts from the sink on detecting link loss. + */ + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); + } + + intel_disable_ddi_buf(encoder, old_crtc_state); + + intel_edp_panel_vdd_on(intel_dp); + intel_edp_panel_off(intel_dp); + + intel_display_power_put_unchecked(dev_priv, + dig_port->ddi_io_power_domain); + + intel_ddi_clk_disable(encoder); +} + +static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct intel_hdmi *intel_hdmi = &dig_port->hdmi; + + dig_port->set_infoframes(encoder, false, + old_crtc_state, old_conn_state); + + intel_ddi_disable_pipe_clock(old_crtc_state); + + intel_disable_ddi_buf(encoder, old_crtc_state); + + intel_display_power_put_unchecked(dev_priv, + dig_port->ddi_io_power_domain); + + intel_ddi_clk_disable(encoder); + + intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); +} + +static void intel_ddi_post_disable(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + /* + * When called from DP MST code: + * - old_conn_state will be NULL + * - encoder will be the main encoder (ie. mst->primary) + * - the main connector associated with this port + * won't be active or linked to a crtc + * - old_crtc_state will be the state of the last stream to + * be deactivated on this port, and it may not be the same + * stream that was activated last, but each stream + * should have a state that is identical when it comes to + * the DP link parameteres + */ + + if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI)) + intel_ddi_post_disable_hdmi(encoder, + old_crtc_state, old_conn_state); + else + intel_ddi_post_disable_dp(encoder, + old_crtc_state, old_conn_state); + + if (INTEL_GEN(dev_priv) >= 11) + icl_unmap_plls_to_ports(encoder); +} + +void intel_ddi_fdi_post_disable(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 val; + + /* + * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) + * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN, + * step 13 is the correct place for it. Step 18 is where it was + * originally before the BUN. + */ + val = I915_READ(FDI_RX_CTL(PIPE_A)); + val &= ~FDI_RX_ENABLE; + I915_WRITE(FDI_RX_CTL(PIPE_A), val); + + intel_disable_ddi_buf(encoder, old_crtc_state); + intel_ddi_clk_disable(encoder); + + val = I915_READ(FDI_RX_MISC(PIPE_A)); + val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); + val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); + I915_WRITE(FDI_RX_MISC(PIPE_A), val); + + val = I915_READ(FDI_RX_CTL(PIPE_A)); + val &= ~FDI_PCDCLK; + I915_WRITE(FDI_RX_CTL(PIPE_A), val); + + val = I915_READ(FDI_RX_CTL(PIPE_A)); + val &= ~FDI_RX_PLL_ENABLE; + I915_WRITE(FDI_RX_CTL(PIPE_A), val); +} + +static void intel_enable_ddi_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + enum port port = encoder->port; + + if (port == PORT_A && INTEL_GEN(dev_priv) < 9) + intel_dp_stop_link_train(intel_dp); + + intel_edp_backlight_on(crtc_state, conn_state); + intel_psr_enable(intel_dp, crtc_state); + intel_dp_ycbcr_420_enable(intel_dp, crtc_state); + intel_edp_drrs_enable(intel_dp, crtc_state); + + if (crtc_state->has_audio) + intel_audio_codec_enable(encoder, crtc_state, conn_state); +} + +static i915_reg_t +gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv, + enum port port) +{ + static const i915_reg_t regs[] = { + [PORT_A] = CHICKEN_TRANS_EDP, + [PORT_B] = CHICKEN_TRANS_A, + [PORT_C] = CHICKEN_TRANS_B, + [PORT_D] = CHICKEN_TRANS_C, + [PORT_E] = CHICKEN_TRANS_A, + }; + + WARN_ON(INTEL_GEN(dev_priv) < 9); + + if (WARN_ON(port < PORT_A || port > PORT_E)) + port = PORT_A; + + return regs[port]; +} + +static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct drm_connector *connector = conn_state->connector; + enum port port = encoder->port; + + if (!intel_hdmi_handle_sink_scrambling(encoder, connector, + crtc_state->hdmi_high_tmds_clock_ratio, + crtc_state->hdmi_scrambling)) + DRM_ERROR("[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n", + connector->base.id, connector->name); + + /* Display WA #1143: skl,kbl,cfl */ + if (IS_GEN9_BC(dev_priv)) { + /* + * For some reason these chicken bits have been + * stuffed into a transcoder register, event though + * the bits affect a specific DDI port rather than + * a specific transcoder. + */ + i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port); + u32 val; + + val = I915_READ(reg); + + if (port == PORT_E) + val |= DDIE_TRAINING_OVERRIDE_ENABLE | + DDIE_TRAINING_OVERRIDE_VALUE; + else + val |= DDI_TRAINING_OVERRIDE_ENABLE | + DDI_TRAINING_OVERRIDE_VALUE; + + I915_WRITE(reg, val); + POSTING_READ(reg); + + udelay(1); + + if (port == PORT_E) + val &= ~(DDIE_TRAINING_OVERRIDE_ENABLE | + DDIE_TRAINING_OVERRIDE_VALUE); + else + val &= ~(DDI_TRAINING_OVERRIDE_ENABLE | + DDI_TRAINING_OVERRIDE_VALUE); + + I915_WRITE(reg, val); + } + + /* In HDMI/DVI mode, the port width, and swing/emphasis values + * are ignored so nothing special needs to be done besides + * enabling the port. + */ + I915_WRITE(DDI_BUF_CTL(port), + dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE); + + if (crtc_state->has_audio) + intel_audio_codec_enable(encoder, crtc_state, conn_state); +} + +static void intel_enable_ddi(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + intel_enable_ddi_hdmi(encoder, crtc_state, conn_state); + else + intel_enable_ddi_dp(encoder, crtc_state, conn_state); + + /* Enable hdcp if it's desired */ + if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_DESIRED) + intel_hdcp_enable(to_intel_connector(conn_state->connector)); +} + +static void intel_disable_ddi_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + intel_dp->link_trained = false; + + if (old_crtc_state->has_audio) + intel_audio_codec_disable(encoder, + old_crtc_state, old_conn_state); + + intel_edp_drrs_disable(intel_dp, old_crtc_state); + intel_psr_disable(intel_dp, old_crtc_state); + intel_edp_backlight_off(old_conn_state); + /* Disable the decompression in DP Sink */ + intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state, + false); +} + +static void intel_disable_ddi_hdmi(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct drm_connector *connector = old_conn_state->connector; + + if (old_crtc_state->has_audio) + intel_audio_codec_disable(encoder, + old_crtc_state, old_conn_state); + + if (!intel_hdmi_handle_sink_scrambling(encoder, connector, + false, false)) + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n", + connector->base.id, connector->name); +} + +static void intel_disable_ddi(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + intel_hdcp_disable(to_intel_connector(old_conn_state->connector)); + + if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI)) + intel_disable_ddi_hdmi(encoder, old_crtc_state, old_conn_state); + else + intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state); +} + +static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + intel_ddi_set_pipe_settings(crtc_state); + + intel_psr_update(intel_dp, crtc_state); + intel_edp_drrs_enable(intel_dp, crtc_state); + + intel_panel_update_backlight(encoder, crtc_state, conn_state); +} + +static void intel_ddi_update_pipe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state); + + if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_DESIRED) + intel_hdcp_enable(to_intel_connector(conn_state->connector)); + else if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + intel_hdcp_disable(to_intel_connector(conn_state->connector)); +} + +static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + enum port port) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + u32 val = I915_READ(PORT_TX_DFLEXDPMLE1); + bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; + + val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port); + switch (pipe_config->lane_count) { + case 1: + val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) : + DFLEXDPMLE1_DPMLETC_ML0(tc_port); + break; + case 2: + val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) : + DFLEXDPMLE1_DPMLETC_ML1_0(tc_port); + break; + case 4: + val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port); + break; + default: + MISSING_CASE(pipe_config->lane_count); + } + I915_WRITE(PORT_TX_DFLEXDPMLE1, val); +} + +static void +intel_ddi_pre_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + enum port port = encoder->port; + + if (intel_crtc_has_dp_encoder(crtc_state) || + intel_port_is_tc(dev_priv, encoder->port)) + intel_display_power_get(dev_priv, + intel_ddi_main_link_aux_domain(dig_port)); + + if (IS_GEN9_LP(dev_priv)) + bxt_ddi_phy_set_lane_optim_mask(encoder, + crtc_state->lane_lat_optim_mask); + + /* + * Program the lane count for static/dynamic connections on Type-C ports. + * Skip this step for TBT. + */ + if (dig_port->tc_type == TC_PORT_UNKNOWN || + dig_port->tc_type == TC_PORT_TBT) + return; + + intel_ddi_set_fia_lane_count(encoder, crtc_state, port); +} + +static void +intel_ddi_post_pll_disable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + + if (intel_crtc_has_dp_encoder(crtc_state) || + intel_port_is_tc(dev_priv, encoder->port)) + intel_display_power_put_unchecked(dev_priv, + intel_ddi_main_link_aux_domain(dig_port)); +} + +static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = + to_i915(intel_dig_port->base.base.dev); + enum port port = intel_dig_port->base.port; + u32 val; + bool wait = false; + + if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { + val = I915_READ(DDI_BUF_CTL(port)); + if (val & DDI_BUF_CTL_ENABLE) { + val &= ~DDI_BUF_CTL_ENABLE; + I915_WRITE(DDI_BUF_CTL(port), val); + wait = true; + } + + val = I915_READ(DP_TP_CTL(port)); + val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); + val |= DP_TP_CTL_LINK_TRAIN_PAT1; + I915_WRITE(DP_TP_CTL(port), val); + POSTING_READ(DP_TP_CTL(port)); + + if (wait) + intel_wait_ddi_buf_idle(dev_priv, port); + } + + val = DP_TP_CTL_ENABLE | + DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE; + if (intel_dp->link_mst) + val |= DP_TP_CTL_MODE_MST; + else { + val |= DP_TP_CTL_MODE_SST; + if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) + val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; + } + I915_WRITE(DP_TP_CTL(port), val); + POSTING_READ(DP_TP_CTL(port)); + + intel_dp->DP |= DDI_BUF_CTL_ENABLE; + I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP); + POSTING_READ(DDI_BUF_CTL(port)); + + udelay(600); +} + +static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + if (cpu_transcoder == TRANSCODER_EDP) + return false; + + if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) + return false; + + return I915_READ(HSW_AUD_PIN_ELD_CP_VLD) & + AUDIO_OUTPUT_ENABLE(cpu_transcoder); +} + +void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, + struct intel_crtc_state *crtc_state) +{ + if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000) + crtc_state->min_voltage_level = 1; + else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000) + crtc_state->min_voltage_level = 2; +} + +void intel_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); + enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; + struct intel_digital_port *intel_dig_port; + u32 temp, flags = 0; + + /* XXX: DSI transcoder paranoia */ + if (WARN_ON(transcoder_is_dsi(cpu_transcoder))) + return; + + temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + if (temp & TRANS_DDI_PHSYNC) + flags |= DRM_MODE_FLAG_PHSYNC; + else + flags |= DRM_MODE_FLAG_NHSYNC; + if (temp & TRANS_DDI_PVSYNC) + flags |= DRM_MODE_FLAG_PVSYNC; + else + flags |= DRM_MODE_FLAG_NVSYNC; + + pipe_config->base.adjusted_mode.flags |= flags; + + switch (temp & TRANS_DDI_BPC_MASK) { + case TRANS_DDI_BPC_6: + pipe_config->pipe_bpp = 18; + break; + case TRANS_DDI_BPC_8: + pipe_config->pipe_bpp = 24; + break; + case TRANS_DDI_BPC_10: + pipe_config->pipe_bpp = 30; + break; + case TRANS_DDI_BPC_12: + pipe_config->pipe_bpp = 36; + break; + default: + break; + } + + switch (temp & TRANS_DDI_MODE_SELECT_MASK) { + case TRANS_DDI_MODE_SELECT_HDMI: + pipe_config->has_hdmi_sink = true; + intel_dig_port = enc_to_dig_port(&encoder->base); + + pipe_config->infoframes.enable |= + intel_hdmi_infoframes_enabled(encoder, pipe_config); + + if (pipe_config->infoframes.enable) + pipe_config->has_infoframe = true; + + if (temp & TRANS_DDI_HDMI_SCRAMBLING) + pipe_config->hdmi_scrambling = true; + if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE) + pipe_config->hdmi_high_tmds_clock_ratio = true; + /* fall through */ + case TRANS_DDI_MODE_SELECT_DVI: + pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI); + pipe_config->lane_count = 4; + break; + case TRANS_DDI_MODE_SELECT_FDI: + pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG); + break; + case TRANS_DDI_MODE_SELECT_DP_SST: + if (encoder->type == INTEL_OUTPUT_EDP) + pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); + else + pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); + pipe_config->lane_count = + ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; + intel_dp_get_m_n(intel_crtc, pipe_config); + break; + case TRANS_DDI_MODE_SELECT_DP_MST: + pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST); + pipe_config->lane_count = + ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; + intel_dp_get_m_n(intel_crtc, pipe_config); + break; + default: + break; + } + + pipe_config->has_audio = + intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder); + + if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp && + pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { + /* + * This is a big fat ugly hack. + * + * Some machines in UEFI boot mode provide us a VBT that has 18 + * bpp and 1.62 GHz link bandwidth for eDP, which for reasons + * unknown we fail to light up. Yet the same BIOS boots up with + * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as + * max, not what it tells us to use. + * + * Note: This will still be broken if the eDP panel is not lit + * up by the BIOS, and thus we can't get the mode at module + * load. + */ + DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", + pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); + dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; + } + + intel_ddi_clock_get(encoder, pipe_config); + + if (IS_GEN9_LP(dev_priv)) + pipe_config->lane_lat_optim_mask = + bxt_ddi_phy_get_lane_lat_optim_mask(encoder); + + intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); + + intel_hdmi_read_gcp_infoframe(encoder, pipe_config); + + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_AVI, + &pipe_config->infoframes.avi); + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_SPD, + &pipe_config->infoframes.spd); + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_VENDOR, + &pipe_config->infoframes.hdmi); + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_DRM, + &pipe_config->infoframes.drm); +} + +static enum intel_output_type +intel_ddi_compute_output_type(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + switch (conn_state->connector->connector_type) { + case DRM_MODE_CONNECTOR_HDMIA: + return INTEL_OUTPUT_HDMI; + case DRM_MODE_CONNECTOR_eDP: + return INTEL_OUTPUT_EDP; + case DRM_MODE_CONNECTOR_DisplayPort: + return INTEL_OUTPUT_DP; + default: + MISSING_CASE(conn_state->connector->connector_type); + return INTEL_OUTPUT_UNUSED; + } +} + +static int intel_ddi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) +{ + struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + int ret; + + if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) + pipe_config->cpu_transcoder = TRANSCODER_EDP; + + if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) + ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state); + else + ret = intel_dp_compute_config(encoder, pipe_config, conn_state); + if (ret) + return ret; + + if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A && + pipe_config->cpu_transcoder == TRANSCODER_EDP) + pipe_config->pch_pfit.force_thru = + pipe_config->pch_pfit.enabled || + pipe_config->crc_enabled; + + if (IS_GEN9_LP(dev_priv)) + pipe_config->lane_lat_optim_mask = + bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); + + intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); + + return 0; +} + +static void intel_ddi_encoder_suspend(struct intel_encoder *encoder) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + intel_dp_encoder_suspend(encoder); + + /* + * TODO: disconnect also from USB DP alternate mode once we have a + * way to handle the modeset restore in that mode during resume + * even if the sink has disappeared while being suspended. + */ + if (dig_port->tc_legacy_port) + icl_tc_phy_disconnect(i915, dig_port); +} + +static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(drm_encoder); + struct drm_i915_private *i915 = to_i915(drm_encoder->dev); + + if (intel_port_is_tc(i915, dig_port->base.port)) + intel_digital_port_connected(&dig_port->base); + + intel_dp_encoder_reset(drm_encoder); +} + +static void intel_ddi_encoder_destroy(struct drm_encoder *encoder) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + struct drm_i915_private *i915 = to_i915(encoder->dev); + + intel_dp_encoder_flush_work(encoder); + + if (intel_port_is_tc(i915, dig_port->base.port)) + icl_tc_phy_disconnect(i915, dig_port); + + drm_encoder_cleanup(encoder); + kfree(dig_port); +} + +static const struct drm_encoder_funcs intel_ddi_funcs = { + .reset = intel_ddi_encoder_reset, + .destroy = intel_ddi_encoder_destroy, +}; + +static struct intel_connector * +intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port) +{ + struct intel_connector *connector; + enum port port = intel_dig_port->base.port; + + connector = intel_connector_alloc(); + if (!connector) + return NULL; + + intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); + intel_dig_port->dp.prepare_link_retrain = + intel_ddi_prepare_link_retrain; + + if (!intel_dp_init_connector(intel_dig_port, connector)) { + kfree(connector); + return NULL; + } + + return connector; +} + +static int modeset_pipe(struct drm_crtc *crtc, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_atomic_state *state; + struct drm_crtc_state *crtc_state; + int ret; + + state = drm_atomic_state_alloc(crtc->dev); + if (!state) + return -ENOMEM; + + state->acquire_ctx = ctx; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto out; + } + + crtc_state->connectors_changed = true; + + ret = drm_atomic_commit(state); +out: + drm_atomic_state_put(state); + + return ret; +} + +static int intel_hdmi_reset_link(struct intel_encoder *encoder, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_hdmi *hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_connector *connector = hdmi->attached_connector; + struct i2c_adapter *adapter = + intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); + struct drm_connector_state *conn_state; + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + u8 config; + int ret; + + if (!connector || connector->base.status != connector_status_connected) + return 0; + + ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, + ctx); + if (ret) + return ret; + + conn_state = connector->base.state; + + crtc = to_intel_crtc(conn_state->crtc); + if (!crtc) + return 0; + + ret = drm_modeset_lock(&crtc->base.mutex, ctx); + if (ret) + return ret; + + crtc_state = to_intel_crtc_state(crtc->base.state); + + WARN_ON(!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)); + + if (!crtc_state->base.active) + return 0; + + if (!crtc_state->hdmi_high_tmds_clock_ratio && + !crtc_state->hdmi_scrambling) + return 0; + + if (conn_state->commit && + !try_wait_for_completion(&conn_state->commit->hw_done)) + return 0; + + ret = drm_scdc_readb(adapter, SCDC_TMDS_CONFIG, &config); + if (ret < 0) { + DRM_ERROR("Failed to read TMDS config: %d\n", ret); + return 0; + } + + if (!!(config & SCDC_TMDS_BIT_CLOCK_RATIO_BY_40) == + crtc_state->hdmi_high_tmds_clock_ratio && + !!(config & SCDC_SCRAMBLING_ENABLE) == + crtc_state->hdmi_scrambling) + return 0; + + /* + * HDMI 2.0 says that one should not send scrambled data + * prior to configuring the sink scrambling, and that + * TMDS clock/data transmission should be suspended when + * changing the TMDS clock rate in the sink. So let's + * just do a full modeset here, even though some sinks + * would be perfectly happy if were to just reconfigure + * the SCDC settings on the fly. + */ + return modeset_pipe(&crtc->base, ctx); +} + +static bool intel_ddi_hotplug(struct intel_encoder *encoder, + struct intel_connector *connector) +{ + struct drm_modeset_acquire_ctx ctx; + bool changed; + int ret; + + changed = intel_encoder_hotplug(encoder, connector); + + drm_modeset_acquire_init(&ctx, 0); + + for (;;) { + if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) + ret = intel_hdmi_reset_link(encoder, &ctx); + else + ret = intel_dp_retrain_link(encoder, &ctx); + + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + continue; + } + + break; + } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + WARN(ret, "Acquiring modeset locks failed with %i\n", ret); + + return changed; +} + +static struct intel_connector * +intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port) +{ + struct intel_connector *connector; + enum port port = intel_dig_port->base.port; + + connector = intel_connector_alloc(); + if (!connector) + return NULL; + + intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port); + intel_hdmi_init_connector(intel_dig_port, connector); + + return connector; +} + +static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport) +{ + struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); + + if (dport->base.port != PORT_A) + return false; + + if (dport->saved_port_bits & DDI_A_4_LANES) + return false; + + /* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only + * supported configuration + */ + if (IS_GEN9_LP(dev_priv)) + return true; + + /* Cannonlake: Most of SKUs don't support DDI_E, and the only + * one who does also have a full A/E split called + * DDI_F what makes DDI_E useless. However for this + * case let's trust VBT info. + */ + if (IS_CANNONLAKE(dev_priv) && + !intel_bios_is_port_present(dev_priv, PORT_E)) + return true; + + return false; +} + +static int +intel_ddi_max_lanes(struct intel_digital_port *intel_dport) +{ + struct drm_i915_private *dev_priv = to_i915(intel_dport->base.base.dev); + enum port port = intel_dport->base.port; + int max_lanes = 4; + + if (INTEL_GEN(dev_priv) >= 11) + return max_lanes; + + if (port == PORT_A || port == PORT_E) { + if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) + max_lanes = port == PORT_A ? 4 : 0; + else + /* Both A and E share 2 lanes */ + max_lanes = 2; + } + + /* + * Some BIOS might fail to set this bit on port A if eDP + * wasn't lit up at boot. Force this bit set when needed + * so we use the proper lane count for our calculations. + */ + if (intel_ddi_a_force_4_lanes(intel_dport)) { + DRM_DEBUG_KMS("Forcing DDI_A_4_LANES for port A\n"); + intel_dport->saved_port_bits |= DDI_A_4_LANES; + max_lanes = 4; + } + + return max_lanes; +} + +void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) +{ + struct ddi_vbt_port_info *port_info = + &dev_priv->vbt.ddi_port_info[port]; + struct intel_digital_port *intel_dig_port; + struct intel_encoder *intel_encoder; + struct drm_encoder *encoder; + bool init_hdmi, init_dp, init_lspcon = false; + enum pipe pipe; + + init_hdmi = port_info->supports_dvi || port_info->supports_hdmi; + init_dp = port_info->supports_dp; + + if (intel_bios_is_lspcon_present(dev_priv, port)) { + /* + * Lspcon device needs to be driven with DP connector + * with special detection sequence. So make sure DP + * is initialized before lspcon. + */ + init_dp = true; + init_lspcon = true; + init_hdmi = false; + DRM_DEBUG_KMS("VBT says port %c has lspcon\n", port_name(port)); + } + + if (!init_dp && !init_hdmi) { + DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n", + port_name(port)); + return; + } + + intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); + if (!intel_dig_port) + return; + + intel_encoder = &intel_dig_port->base; + encoder = &intel_encoder->base; + + drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs, + DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port)); + + intel_encoder->hotplug = intel_ddi_hotplug; + intel_encoder->compute_output_type = intel_ddi_compute_output_type; + intel_encoder->compute_config = intel_ddi_compute_config; + intel_encoder->enable = intel_enable_ddi; + intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable; + intel_encoder->post_pll_disable = intel_ddi_post_pll_disable; + intel_encoder->pre_enable = intel_ddi_pre_enable; + intel_encoder->disable = intel_disable_ddi; + intel_encoder->post_disable = intel_ddi_post_disable; + intel_encoder->update_pipe = intel_ddi_update_pipe; + intel_encoder->get_hw_state = intel_ddi_get_hw_state; + intel_encoder->get_config = intel_ddi_get_config; + intel_encoder->suspend = intel_ddi_encoder_suspend; + intel_encoder->get_power_domains = intel_ddi_get_power_domains; + intel_encoder->type = INTEL_OUTPUT_DDI; + intel_encoder->power_domain = intel_port_to_power_domain(port); + intel_encoder->port = port; + intel_encoder->cloneable = 0; + for_each_pipe(dev_priv, pipe) + intel_encoder->crtc_mask |= BIT(pipe); + + if (INTEL_GEN(dev_priv) >= 11) + intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & + DDI_BUF_PORT_REVERSAL; + else + intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & + (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); + intel_dig_port->dp.output_reg = INVALID_MMIO_REG; + intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port); + intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); + + intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) && + !port_info->supports_typec_usb && + !port_info->supports_tbt; + + switch (port) { + case PORT_A: + intel_dig_port->ddi_io_power_domain = + POWER_DOMAIN_PORT_DDI_A_IO; + break; + case PORT_B: + intel_dig_port->ddi_io_power_domain = + POWER_DOMAIN_PORT_DDI_B_IO; + break; + case PORT_C: + intel_dig_port->ddi_io_power_domain = + POWER_DOMAIN_PORT_DDI_C_IO; + break; + case PORT_D: + intel_dig_port->ddi_io_power_domain = + POWER_DOMAIN_PORT_DDI_D_IO; + break; + case PORT_E: + intel_dig_port->ddi_io_power_domain = + POWER_DOMAIN_PORT_DDI_E_IO; + break; + case PORT_F: + intel_dig_port->ddi_io_power_domain = + POWER_DOMAIN_PORT_DDI_F_IO; + break; + default: + MISSING_CASE(port); + } + + if (init_dp) { + if (!intel_ddi_init_dp_connector(intel_dig_port)) + goto err; + + intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; + } + + /* In theory we don't need the encoder->type check, but leave it just in + * case we have some really bad VBTs... */ + if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { + if (!intel_ddi_init_hdmi_connector(intel_dig_port)) + goto err; + } + + if (init_lspcon) { + if (lspcon_init(intel_dig_port)) + /* TODO: handle hdmi info frame part */ + DRM_DEBUG_KMS("LSPCON init success on port %c\n", + port_name(port)); + else + /* + * LSPCON init faied, but DP init was success, so + * lets try to drive as DP++ port. + */ + DRM_ERROR("LSPCON init failed on port %c\n", + port_name(port)); + } + + intel_infoframe_init(intel_dig_port); + + if (intel_port_is_tc(dev_priv, port)) + intel_digital_port_connected(intel_encoder); + + return; + +err: + drm_encoder_cleanup(encoder); + kfree(intel_dig_port); +} diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h new file mode 100644 index 000000000000..a08365da2643 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_DDI_H__ +#define __INTEL_DDI_H__ + +#include + +#include "intel_display.h" + +struct drm_connector_state; +struct drm_i915_private; +struct intel_connector; +struct intel_crtc; +struct intel_crtc_state; +struct intel_dp; +struct intel_dpll_hw_state; +struct intel_encoder; + +void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state); +void hsw_fdi_link_train(struct intel_crtc *crtc, + const struct intel_crtc_state *crtc_state); +void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); +bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); +void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state); +void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state); +void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state); +void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); +void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state); +bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); +void intel_ddi_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config); +void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, + bool state); +void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, + struct intel_crtc_state *crtc_state); +u32 bxt_signal_levels(struct intel_dp *intel_dp); +u32 ddi_signal_levels(struct intel_dp *intel_dp); +u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder); +u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, + u8 voltage_swing); +int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, + bool enable); +void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder); +int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, + struct intel_dpll_hw_state *state); + +#endif /* __INTEL_DDI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c new file mode 100644 index 000000000000..4336df46fe78 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -0,0 +1,7577 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Keith Packard + * + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "i915_debugfs.h" +#include "i915_drv.h" +#include "intel_atomic.h" +#include "intel_audio.h" +#include "intel_connector.h" +#include "intel_ddi.h" +#include "intel_dp.h" +#include "intel_dp_link_training.h" +#include "intel_dp_mst.h" +#include "intel_dpio_phy.h" +#include "intel_drv.h" +#include "intel_fifo_underrun.h" +#include "intel_hdcp.h" +#include "intel_hdmi.h" +#include "intel_hotplug.h" +#include "intel_lspcon.h" +#include "intel_lvds.h" +#include "intel_panel.h" +#include "intel_psr.h" +#include "intel_sideband.h" +#include "intel_vdsc.h" + +#define DP_DPRX_ESI_LEN 14 + +/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */ +#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440 +#define DP_DSC_MIN_SUPPORTED_BPC 8 +#define DP_DSC_MAX_SUPPORTED_BPC 10 + +/* DP DSC throughput values used for slice count calculations KPixels/s */ +#define DP_DSC_PEAK_PIXEL_RATE 2720000 +#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 +#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 + +/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */ +#define DP_DSC_FEC_OVERHEAD_FACTOR 976 + +/* Compliance test status bits */ +#define INTEL_DP_RESOLUTION_SHIFT_MASK 0 +#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) +#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) +#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) + +struct dp_link_dpll { + int clock; + struct dpll dpll; +}; + +static const struct dp_link_dpll g4x_dpll[] = { + { 162000, + { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, + { 270000, + { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } +}; + +static const struct dp_link_dpll pch_dpll[] = { + { 162000, + { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, + { 270000, + { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } +}; + +static const struct dp_link_dpll vlv_dpll[] = { + { 162000, + { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, + { 270000, + { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } +}; + +/* + * CHV supports eDP 1.4 that have more link rates. + * Below only provides the fixed rate but exclude variable rate. + */ +static const struct dp_link_dpll chv_dpll[] = { + /* + * CHV requires to program fractional division for m2. + * m2 is stored in fixed point format using formula below + * (m2_int << 22) | m2_fraction + */ + { 162000, /* m2_int = 32, m2_fraction = 1677722 */ + { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, + { 270000, /* m2_int = 27, m2_fraction = 0 */ + { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, +}; + +/* Constants for DP DSC configurations */ +static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; + +/* With Single pipe configuration, HW is capable of supporting maximum + * of 4 slices per line. + */ +static const u8 valid_dsc_slicecount[] = {1, 2, 4}; + +/** + * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) + * @intel_dp: DP struct + * + * If a CPU or PCH DP output is attached to an eDP panel, this function + * will return true, and false otherwise. + */ +bool intel_dp_is_edp(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + + return intel_dig_port->base.type == INTEL_OUTPUT_EDP; +} + +static struct intel_dp *intel_attached_dp(struct drm_connector *connector) +{ + return enc_to_intel_dp(&intel_attached_encoder(connector)->base); +} + +static void intel_dp_link_down(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state); +static bool edp_panel_vdd_on(struct intel_dp *intel_dp); +static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); +static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, + enum pipe pipe); +static void intel_dp_unset_edid(struct intel_dp *intel_dp); + +/* update sink rates from dpcd */ +static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) +{ + static const int dp_rates[] = { + 162000, 270000, 540000, 810000 + }; + int i, max_rate; + + max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); + + for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { + if (dp_rates[i] > max_rate) + break; + intel_dp->sink_rates[i] = dp_rates[i]; + } + + intel_dp->num_sink_rates = i; +} + +/* Get length of rates array potentially limited by max_rate. */ +static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) +{ + int i; + + /* Limit results by potentially reduced max rate */ + for (i = 0; i < len; i++) { + if (rates[len - i - 1] <= max_rate) + return len - i; + } + + return 0; +} + +/* Get length of common rates array potentially limited by max_rate. */ +static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, + int max_rate) +{ + return intel_dp_rate_limit_len(intel_dp->common_rates, + intel_dp->num_common_rates, max_rate); +} + +/* Theoretical max between source and sink */ +static int intel_dp_max_common_rate(struct intel_dp *intel_dp) +{ + return intel_dp->common_rates[intel_dp->num_common_rates - 1]; +} + +static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + intel_wakeref_t wakeref; + u32 lane_info; + + if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC) + return 4; + + lane_info = 0; + with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) + lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & + DP_LANE_ASSIGNMENT_MASK(tc_port)) >> + DP_LANE_ASSIGNMENT_SHIFT(tc_port); + + switch (lane_info) { + default: + MISSING_CASE(lane_info); + case 1: + case 2: + case 4: + case 8: + return 1; + case 3: + case 12: + return 2; + case 15: + return 4; + } +} + +/* Theoretical max between source and sink */ +static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + int source_max = intel_dig_port->max_lanes; + int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); + int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp); + + return min3(source_max, sink_max, fia_max); +} + +int intel_dp_max_lane_count(struct intel_dp *intel_dp) +{ + return intel_dp->max_link_lane_count; +} + +int +intel_dp_link_required(int pixel_clock, int bpp) +{ + /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ + return DIV_ROUND_UP(pixel_clock * bpp, 8); +} + +int +intel_dp_max_data_rate(int max_link_clock, int max_lanes) +{ + /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the + * link rate that is generally expressed in Gbps. Since, 8 bits of data + * is transmitted every LS_Clk per lane, there is no need to account for + * the channel encoding that is done in the PHY layer here. + */ + + return max_link_clock * max_lanes; +} + +static int +intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &intel_dig_port->base; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + int max_dotclk = dev_priv->max_dotclk_freq; + int ds_max_dotclk; + + int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; + + if (type != DP_DS_PORT_TYPE_VGA) + return max_dotclk; + + ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd, + intel_dp->downstream_ports); + + if (ds_max_dotclk != 0) + max_dotclk = min(max_dotclk, ds_max_dotclk); + + return max_dotclk; +} + +static int cnl_max_source_rate(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum port port = dig_port->base.port; + + u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; + + /* Low voltage SKUs are limited to max of 5.4G */ + if (voltage == VOLTAGE_INFO_0_85V) + return 540000; + + /* For this SKU 8.1G is supported in all ports */ + if (IS_CNL_WITH_PORT_F(dev_priv)) + return 810000; + + /* For other SKUs, max rate on ports A and D is 5.4G */ + if (port == PORT_A || port == PORT_D) + return 540000; + + return 810000; +} + +static int icl_max_source_rate(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum port port = dig_port->base.port; + + if (intel_port_is_combophy(dev_priv, port) && + !IS_ELKHARTLAKE(dev_priv) && + !intel_dp_is_edp(intel_dp)) + return 540000; + + return 810000; +} + +static void +intel_dp_set_source_rates(struct intel_dp *intel_dp) +{ + /* The values must be in increasing order */ + static const int cnl_rates[] = { + 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 + }; + static const int bxt_rates[] = { + 162000, 216000, 243000, 270000, 324000, 432000, 540000 + }; + static const int skl_rates[] = { + 162000, 216000, 270000, 324000, 432000, 540000 + }; + static const int hsw_rates[] = { + 162000, 270000, 540000 + }; + static const int g4x_rates[] = { + 162000, 270000 + }; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + const struct ddi_vbt_port_info *info = + &dev_priv->vbt.ddi_port_info[dig_port->base.port]; + const int *source_rates; + int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate; + + /* This should only be done once */ + WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates); + + if (INTEL_GEN(dev_priv) >= 10) { + source_rates = cnl_rates; + size = ARRAY_SIZE(cnl_rates); + if (IS_GEN(dev_priv, 10)) + max_rate = cnl_max_source_rate(intel_dp); + else + max_rate = icl_max_source_rate(intel_dp); + } else if (IS_GEN9_LP(dev_priv)) { + source_rates = bxt_rates; + size = ARRAY_SIZE(bxt_rates); + } else if (IS_GEN9_BC(dev_priv)) { + source_rates = skl_rates; + size = ARRAY_SIZE(skl_rates); + } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || + IS_BROADWELL(dev_priv)) { + source_rates = hsw_rates; + size = ARRAY_SIZE(hsw_rates); + } else { + source_rates = g4x_rates; + size = ARRAY_SIZE(g4x_rates); + } + + if (max_rate && vbt_max_rate) + max_rate = min(max_rate, vbt_max_rate); + else if (vbt_max_rate) + max_rate = vbt_max_rate; + + if (max_rate) + size = intel_dp_rate_limit_len(source_rates, size, max_rate); + + intel_dp->source_rates = source_rates; + intel_dp->num_source_rates = size; +} + +static int intersect_rates(const int *source_rates, int source_len, + const int *sink_rates, int sink_len, + int *common_rates) +{ + int i = 0, j = 0, k = 0; + + while (i < source_len && j < sink_len) { + if (source_rates[i] == sink_rates[j]) { + if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) + return k; + common_rates[k] = source_rates[i]; + ++k; + ++i; + ++j; + } else if (source_rates[i] < sink_rates[j]) { + ++i; + } else { + ++j; + } + } + return k; +} + +/* return index of rate in rates array, or -1 if not found */ +static int intel_dp_rate_index(const int *rates, int len, int rate) +{ + int i; + + for (i = 0; i < len; i++) + if (rate == rates[i]) + return i; + + return -1; +} + +static void intel_dp_set_common_rates(struct intel_dp *intel_dp) +{ + WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates); + + intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, + intel_dp->num_source_rates, + intel_dp->sink_rates, + intel_dp->num_sink_rates, + intel_dp->common_rates); + + /* Paranoia, there should always be something in common. */ + if (WARN_ON(intel_dp->num_common_rates == 0)) { + intel_dp->common_rates[0] = 162000; + intel_dp->num_common_rates = 1; + } +} + +static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, + u8 lane_count) +{ + /* + * FIXME: we need to synchronize the current link parameters with + * hardware readout. Currently fast link training doesn't work on + * boot-up. + */ + if (link_rate == 0 || + link_rate > intel_dp->max_link_rate) + return false; + + if (lane_count == 0 || + lane_count > intel_dp_max_lane_count(intel_dp)) + return false; + + return true; +} + +static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, + int link_rate, + u8 lane_count) +{ + const struct drm_display_mode *fixed_mode = + intel_dp->attached_connector->panel.fixed_mode; + int mode_rate, max_rate; + + mode_rate = intel_dp_link_required(fixed_mode->clock, 18); + max_rate = intel_dp_max_data_rate(link_rate, lane_count); + if (mode_rate > max_rate) + return false; + + return true; +} + +int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, + int link_rate, u8 lane_count) +{ + int index; + + index = intel_dp_rate_index(intel_dp->common_rates, + intel_dp->num_common_rates, + link_rate); + if (index > 0) { + if (intel_dp_is_edp(intel_dp) && + !intel_dp_can_link_train_fallback_for_edp(intel_dp, + intel_dp->common_rates[index - 1], + lane_count)) { + DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); + return 0; + } + intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; + intel_dp->max_link_lane_count = lane_count; + } else if (lane_count > 1) { + if (intel_dp_is_edp(intel_dp) && + !intel_dp_can_link_train_fallback_for_edp(intel_dp, + intel_dp_max_common_rate(intel_dp), + lane_count >> 1)) { + DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n"); + return 0; + } + intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); + intel_dp->max_link_lane_count = lane_count >> 1; + } else { + DRM_ERROR("Link Training Unsuccessful\n"); + return -1; + } + + return 0; +} + +static enum drm_mode_status +intel_dp_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; + struct drm_i915_private *dev_priv = to_i915(connector->dev); + int target_clock = mode->clock; + int max_rate, mode_rate, max_lanes, max_link_clock; + int max_dotclk; + u16 dsc_max_output_bpp = 0; + u8 dsc_slice_count = 0; + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + + max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); + + if (intel_dp_is_edp(intel_dp) && fixed_mode) { + if (mode->hdisplay > fixed_mode->hdisplay) + return MODE_PANEL; + + if (mode->vdisplay > fixed_mode->vdisplay) + return MODE_PANEL; + + target_clock = fixed_mode->clock; + } + + max_link_clock = intel_dp_max_link_rate(intel_dp); + max_lanes = intel_dp_max_lane_count(intel_dp); + + max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); + mode_rate = intel_dp_link_required(target_clock, 18); + + /* + * Output bpp is stored in 6.4 format so right shift by 4 to get the + * integer value since we support only integer values of bpp. + */ + if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && + drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { + if (intel_dp_is_edp(intel_dp)) { + dsc_max_output_bpp = + drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; + dsc_slice_count = + drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, + true); + } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { + dsc_max_output_bpp = + intel_dp_dsc_get_output_bpp(max_link_clock, + max_lanes, + target_clock, + mode->hdisplay) >> 4; + dsc_slice_count = + intel_dp_dsc_get_slice_count(intel_dp, + target_clock, + mode->hdisplay); + } + } + + if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) || + target_clock > max_dotclk) + return MODE_CLOCK_HIGH; + + if (mode->clock < 10000) + return MODE_CLOCK_LOW; + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + return MODE_H_ILLEGAL; + + return MODE_OK; +} + +u32 intel_dp_pack_aux(const u8 *src, int src_bytes) +{ + int i; + u32 v = 0; + + if (src_bytes > 4) + src_bytes = 4; + for (i = 0; i < src_bytes; i++) + v |= ((u32)src[i]) << ((3 - i) * 8); + return v; +} + +static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) +{ + int i; + if (dst_bytes > 4) + dst_bytes = 4; + for (i = 0; i < dst_bytes; i++) + dst[i] = src >> ((3-i) * 8); +} + +static void +intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp); +static void +intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, + bool force_disable_vdd); +static void +intel_dp_pps_init(struct intel_dp *intel_dp); + +static intel_wakeref_t +pps_lock(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + intel_wakeref_t wakeref; + + /* + * See intel_power_sequencer_reset() why we need + * a power domain reference here. + */ + wakeref = intel_display_power_get(dev_priv, + intel_aux_power_domain(dp_to_dig_port(intel_dp))); + + mutex_lock(&dev_priv->pps_mutex); + + return wakeref; +} + +static intel_wakeref_t +pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + mutex_unlock(&dev_priv->pps_mutex); + intel_display_power_put(dev_priv, + intel_aux_power_domain(dp_to_dig_port(intel_dp)), + wakeref); + return 0; +} + +#define with_pps_lock(dp, wf) \ + for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf))) + +static void +vlv_power_sequencer_kick(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + enum pipe pipe = intel_dp->pps_pipe; + bool pll_enabled, release_cl_override = false; + enum dpio_phy phy = DPIO_PHY(pipe); + enum dpio_channel ch = vlv_pipe_to_channel(pipe); + u32 DP; + + if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN, + "skipping pipe %c power sequencer kick due to port %c being active\n", + pipe_name(pipe), port_name(intel_dig_port->base.port))) + return; + + DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n", + pipe_name(pipe), port_name(intel_dig_port->base.port)); + + /* Preserve the BIOS-computed detected bit. This is + * supposed to be read-only. + */ + DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; + DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; + DP |= DP_PORT_WIDTH(1); + DP |= DP_LINK_TRAIN_PAT_1; + + if (IS_CHERRYVIEW(dev_priv)) + DP |= DP_PIPE_SEL_CHV(pipe); + else + DP |= DP_PIPE_SEL(pipe); + + pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE; + + /* + * The DPLL for the pipe must be enabled for this to work. + * So enable temporarily it if it's not already enabled. + */ + if (!pll_enabled) { + release_cl_override = IS_CHERRYVIEW(dev_priv) && + !chv_phy_powergate_ch(dev_priv, phy, ch, true); + + if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ? + &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { + DRM_ERROR("Failed to force on pll for pipe %c!\n", + pipe_name(pipe)); + return; + } + } + + /* + * Similar magic as in intel_dp_enable_port(). + * We _must_ do this port enable + disable trick + * to make this power sequencer lock onto the port. + * Otherwise even VDD force bit won't work. + */ + I915_WRITE(intel_dp->output_reg, DP); + POSTING_READ(intel_dp->output_reg); + + I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN); + POSTING_READ(intel_dp->output_reg); + + I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); + POSTING_READ(intel_dp->output_reg); + + if (!pll_enabled) { + vlv_force_pll_off(dev_priv, pipe); + + if (release_cl_override) + chv_phy_powergate_ch(dev_priv, phy, ch, false); + } +} + +static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) +{ + struct intel_encoder *encoder; + unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); + + /* + * We don't have power sequencer currently. + * Pick one that's not used by other ports. + */ + for_each_intel_dp(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + if (encoder->type == INTEL_OUTPUT_EDP) { + WARN_ON(intel_dp->active_pipe != INVALID_PIPE && + intel_dp->active_pipe != intel_dp->pps_pipe); + + if (intel_dp->pps_pipe != INVALID_PIPE) + pipes &= ~(1 << intel_dp->pps_pipe); + } else { + WARN_ON(intel_dp->pps_pipe != INVALID_PIPE); + + if (intel_dp->active_pipe != INVALID_PIPE) + pipes &= ~(1 << intel_dp->active_pipe); + } + } + + if (pipes == 0) + return INVALID_PIPE; + + return ffs(pipes) - 1; +} + +static enum pipe +vlv_power_sequencer_pipe(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + enum pipe pipe; + + lockdep_assert_held(&dev_priv->pps_mutex); + + /* We should never land here with regular DP ports */ + WARN_ON(!intel_dp_is_edp(intel_dp)); + + WARN_ON(intel_dp->active_pipe != INVALID_PIPE && + intel_dp->active_pipe != intel_dp->pps_pipe); + + if (intel_dp->pps_pipe != INVALID_PIPE) + return intel_dp->pps_pipe; + + pipe = vlv_find_free_pps(dev_priv); + + /* + * Didn't find one. This should not happen since there + * are two power sequencers and up to two eDP ports. + */ + if (WARN_ON(pipe == INVALID_PIPE)) + pipe = PIPE_A; + + vlv_steal_power_sequencer(dev_priv, pipe); + intel_dp->pps_pipe = pipe; + + DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n", + pipe_name(intel_dp->pps_pipe), + port_name(intel_dig_port->base.port)); + + /* init power sequencer on this pipe and port */ + intel_dp_init_panel_power_sequencer(intel_dp); + intel_dp_init_panel_power_sequencer_registers(intel_dp, true); + + /* + * Even vdd force doesn't work until we've made + * the power sequencer lock in on the port. + */ + vlv_power_sequencer_kick(intel_dp); + + return intel_dp->pps_pipe; +} + +static int +bxt_power_sequencer_idx(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + int backlight_controller = dev_priv->vbt.backlight.controller; + + lockdep_assert_held(&dev_priv->pps_mutex); + + /* We should never land here with regular DP ports */ + WARN_ON(!intel_dp_is_edp(intel_dp)); + + if (!intel_dp->pps_reset) + return backlight_controller; + + intel_dp->pps_reset = false; + + /* + * Only the HW needs to be reprogrammed, the SW state is fixed and + * has been setup during connector init. + */ + intel_dp_init_panel_power_sequencer_registers(intel_dp, false); + + return backlight_controller; +} + +typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, + enum pipe pipe); + +static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + return I915_READ(PP_STATUS(pipe)) & PP_ON; +} + +static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD; +} + +static bool vlv_pipe_any(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + return true; +} + +static enum pipe +vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, + enum port port, + vlv_pipe_check pipe_check) +{ + enum pipe pipe; + + for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { + u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) & + PANEL_PORT_SELECT_MASK; + + if (port_sel != PANEL_PORT_SELECT_VLV(port)) + continue; + + if (!pipe_check(dev_priv, pipe)) + continue; + + return pipe; + } + + return INVALID_PIPE; +} + +static void +vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + enum port port = intel_dig_port->base.port; + + lockdep_assert_held(&dev_priv->pps_mutex); + + /* try to find a pipe with this port selected */ + /* first pick one where the panel is on */ + intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, + vlv_pipe_has_pp_on); + /* didn't find one? pick one where vdd is on */ + if (intel_dp->pps_pipe == INVALID_PIPE) + intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, + vlv_pipe_has_vdd_on); + /* didn't find one? pick one with just the correct port */ + if (intel_dp->pps_pipe == INVALID_PIPE) + intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port, + vlv_pipe_any); + + /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ + if (intel_dp->pps_pipe == INVALID_PIPE) { + DRM_DEBUG_KMS("no initial power sequencer for port %c\n", + port_name(port)); + return; + } + + DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n", + port_name(port), pipe_name(intel_dp->pps_pipe)); + + intel_dp_init_panel_power_sequencer(intel_dp); + intel_dp_init_panel_power_sequencer_registers(intel_dp, false); +} + +void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) +{ + struct intel_encoder *encoder; + + if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && + !IS_GEN9_LP(dev_priv))) + return; + + /* + * We can't grab pps_mutex here due to deadlock with power_domain + * mutex when power_domain functions are called while holding pps_mutex. + * That also means that in order to use pps_pipe the code needs to + * hold both a power domain reference and pps_mutex, and the power domain + * reference get/put must be done while _not_ holding pps_mutex. + * pps_{lock,unlock}() do these steps in the correct order, so one + * should use them always. + */ + + for_each_intel_dp(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + WARN_ON(intel_dp->active_pipe != INVALID_PIPE); + + if (encoder->type != INTEL_OUTPUT_EDP) + continue; + + if (IS_GEN9_LP(dev_priv)) + intel_dp->pps_reset = true; + else + intel_dp->pps_pipe = INVALID_PIPE; + } +} + +struct pps_registers { + i915_reg_t pp_ctrl; + i915_reg_t pp_stat; + i915_reg_t pp_on; + i915_reg_t pp_off; + i915_reg_t pp_div; +}; + +static void intel_pps_get_registers(struct intel_dp *intel_dp, + struct pps_registers *regs) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + int pps_idx = 0; + + memset(regs, 0, sizeof(*regs)); + + if (IS_GEN9_LP(dev_priv)) + pps_idx = bxt_power_sequencer_idx(intel_dp); + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + pps_idx = vlv_power_sequencer_pipe(intel_dp); + + regs->pp_ctrl = PP_CONTROL(pps_idx); + regs->pp_stat = PP_STATUS(pps_idx); + regs->pp_on = PP_ON_DELAYS(pps_idx); + regs->pp_off = PP_OFF_DELAYS(pps_idx); + + /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ + if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) + regs->pp_div = INVALID_MMIO_REG; + else + regs->pp_div = PP_DIVISOR(pps_idx); +} + +static i915_reg_t +_pp_ctrl_reg(struct intel_dp *intel_dp) +{ + struct pps_registers regs; + + intel_pps_get_registers(intel_dp, ®s); + + return regs.pp_ctrl; +} + +static i915_reg_t +_pp_stat_reg(struct intel_dp *intel_dp) +{ + struct pps_registers regs; + + intel_pps_get_registers(intel_dp, ®s); + + return regs.pp_stat; +} + +/* Reboot notifier handler to shutdown panel power to guarantee T12 timing + This function only applicable when panel PM state is not to be tracked */ +static int edp_notify_handler(struct notifier_block *this, unsigned long code, + void *unused) +{ + struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), + edp_notifier); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + intel_wakeref_t wakeref; + + if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) + return 0; + + with_pps_lock(intel_dp, wakeref) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); + i915_reg_t pp_ctrl_reg, pp_div_reg; + u32 pp_div; + + pp_ctrl_reg = PP_CONTROL(pipe); + pp_div_reg = PP_DIVISOR(pipe); + pp_div = I915_READ(pp_div_reg); + pp_div &= PP_REFERENCE_DIVIDER_MASK; + + /* 0x1F write to PP_DIV_REG sets max cycle delay */ + I915_WRITE(pp_div_reg, pp_div | 0x1F); + I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS); + msleep(intel_dp->panel_power_cycle_delay); + } + } + + return 0; +} + +static bool edp_have_panel_power(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + lockdep_assert_held(&dev_priv->pps_mutex); + + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + intel_dp->pps_pipe == INVALID_PIPE) + return false; + + return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; +} + +static bool edp_have_panel_vdd(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + lockdep_assert_held(&dev_priv->pps_mutex); + + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + intel_dp->pps_pipe == INVALID_PIPE) + return false; + + return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; +} + +static void +intel_dp_check_edp(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (!intel_dp_is_edp(intel_dp)) + return; + + if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { + WARN(1, "eDP powered off while attempting aux channel communication.\n"); + DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", + I915_READ(_pp_stat_reg(intel_dp)), + I915_READ(_pp_ctrl_reg(intel_dp))); + } +} + +static u32 +intel_dp_aux_wait_done(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); + u32 status; + bool done; + +#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) + done = wait_event_timeout(i915->gmbus_wait_queue, C, + msecs_to_jiffies_timeout(10)); + + /* just trace the final value */ + trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); + + if (!done) + DRM_ERROR("dp aux hw did not signal timeout!\n"); +#undef C + + return status; +} + +static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (index) + return 0; + + /* + * The clock divider is based off the hrawclk, and would like to run at + * 2MHz. So, take the hrawclk value and divide by 2000 and use that + */ + return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); +} + +static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + + if (index) + return 0; + + /* + * The clock divider is based off the cdclk or PCH rawclk, and would + * like to run at 2MHz. So, take the cdclk or PCH rawclk value and + * divide by 2000 and use that + */ + if (dig_port->aux_ch == AUX_CH_A) + return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000); + else + return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); +} + +static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + + if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { + /* Workaround for non-ULT HSW */ + switch (index) { + case 0: return 63; + case 1: return 72; + default: return 0; + } + } + + return ilk_get_aux_clock_divider(intel_dp, index); +} + +static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +{ + /* + * SKL doesn't need us to program the AUX clock divider (Hardware will + * derive the clock from CDCLK automatically). We still implement the + * get_aux_clock_divider vfunc to plug-in into the existing code. + */ + return index ? 0 : 1; +} + +static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, + int send_bytes, + u32 aux_clock_divider) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = + to_i915(intel_dig_port->base.base.dev); + u32 precharge, timeout; + + if (IS_GEN(dev_priv, 6)) + precharge = 3; + else + precharge = 5; + + if (IS_BROADWELL(dev_priv)) + timeout = DP_AUX_CH_CTL_TIME_OUT_600us; + else + timeout = DP_AUX_CH_CTL_TIME_OUT_400us; + + return DP_AUX_CH_CTL_SEND_BUSY | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_INTERRUPT | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + timeout | + DP_AUX_CH_CTL_RECEIVE_ERROR | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); +} + +static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, + int send_bytes, + u32 unused) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + u32 ret; + + ret = DP_AUX_CH_CTL_SEND_BUSY | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_INTERRUPT | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_TIME_OUT_MAX | + DP_AUX_CH_CTL_RECEIVE_ERROR | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | + DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); + + if (intel_dig_port->tc_type == TC_PORT_TBT) + ret |= DP_AUX_CH_CTL_TBT_IO; + + return ret; +} + +static int +intel_dp_aux_xfer(struct intel_dp *intel_dp, + const u8 *send, int send_bytes, + u8 *recv, int recv_size, + u32 aux_send_ctl_flags) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *i915 = + to_i915(intel_dig_port->base.base.dev); + struct intel_uncore *uncore = &i915->uncore; + i915_reg_t ch_ctl, ch_data[5]; + u32 aux_clock_divider; + enum intel_display_power_domain aux_domain = + intel_aux_power_domain(intel_dig_port); + intel_wakeref_t aux_wakeref; + intel_wakeref_t pps_wakeref; + int i, ret, recv_bytes; + int try, clock = 0; + u32 status; + bool vdd; + + ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); + for (i = 0; i < ARRAY_SIZE(ch_data); i++) + ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); + + aux_wakeref = intel_display_power_get(i915, aux_domain); + pps_wakeref = pps_lock(intel_dp); + + /* + * We will be called with VDD already enabled for dpcd/edid/oui reads. + * In such cases we want to leave VDD enabled and it's up to upper layers + * to turn it off. But for eg. i2c-dev access we need to turn it on/off + * ourselves. + */ + vdd = edp_panel_vdd_on(intel_dp); + + /* dp aux is extremely sensitive to irq latency, hence request the + * lowest possible wakeup latency and so prevent the cpu from going into + * deep sleep states. + */ + pm_qos_update_request(&i915->pm_qos, 0); + + intel_dp_check_edp(intel_dp); + + /* Try to wait for any previous AUX channel activity */ + for (try = 0; try < 3; try++) { + status = intel_uncore_read_notrace(uncore, ch_ctl); + if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) + break; + msleep(1); + } + /* just trace the final value */ + trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); + + if (try == 3) { + static u32 last_status = -1; + const u32 status = intel_uncore_read(uncore, ch_ctl); + + if (status != last_status) { + WARN(1, "dp_aux_ch not started status 0x%08x\n", + status); + last_status = status; + } + + ret = -EBUSY; + goto out; + } + + /* Only 5 data registers! */ + if (WARN_ON(send_bytes > 20 || recv_size > 20)) { + ret = -E2BIG; + goto out; + } + + while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { + u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, + send_bytes, + aux_clock_divider); + + send_ctl |= aux_send_ctl_flags; + + /* Must try at least 3 times according to DP spec */ + for (try = 0; try < 5; try++) { + /* Load the send data into the aux channel data registers */ + for (i = 0; i < send_bytes; i += 4) + intel_uncore_write(uncore, + ch_data[i >> 2], + intel_dp_pack_aux(send + i, + send_bytes - i)); + + /* Send the command and wait for it to complete */ + intel_uncore_write(uncore, ch_ctl, send_ctl); + + status = intel_dp_aux_wait_done(intel_dp); + + /* Clear done status and any errors */ + intel_uncore_write(uncore, + ch_ctl, + status | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); + + /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2 + * 400us delay required for errors and timeouts + * Timeout errors from the HW already meet this + * requirement so skip to next iteration + */ + if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) + continue; + + if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { + usleep_range(400, 500); + continue; + } + if (status & DP_AUX_CH_CTL_DONE) + goto done; + } + } + + if ((status & DP_AUX_CH_CTL_DONE) == 0) { + DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); + ret = -EBUSY; + goto out; + } + +done: + /* Check for timeout or receive error. + * Timeouts occur when the sink is not connected + */ + if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { + DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); + ret = -EIO; + goto out; + } + + /* Timeouts occur when the device isn't connected, so they're + * "normal" -- don't fill the kernel log with these */ + if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { + DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); + ret = -ETIMEDOUT; + goto out; + } + + /* Unload any bytes sent back from the other side */ + recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> + DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); + + /* + * By BSpec: "Message sizes of 0 or >20 are not allowed." + * We have no idea of what happened so we return -EBUSY so + * drm layer takes care for the necessary retries. + */ + if (recv_bytes == 0 || recv_bytes > 20) { + DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n", + recv_bytes); + ret = -EBUSY; + goto out; + } + + if (recv_bytes > recv_size) + recv_bytes = recv_size; + + for (i = 0; i < recv_bytes; i += 4) + intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), + recv + i, recv_bytes - i); + + ret = recv_bytes; +out: + pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); + + if (vdd) + edp_panel_vdd_off(intel_dp, false); + + pps_unlock(intel_dp, pps_wakeref); + intel_display_power_put_async(i915, aux_domain, aux_wakeref); + + return ret; +} + +#define BARE_ADDRESS_SIZE 3 +#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) + +static void +intel_dp_aux_header(u8 txbuf[HEADER_SIZE], + const struct drm_dp_aux_msg *msg) +{ + txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf); + txbuf[1] = (msg->address >> 8) & 0xff; + txbuf[2] = msg->address & 0xff; + txbuf[3] = msg->size - 1; +} + +static ssize_t +intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) +{ + struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux); + u8 txbuf[20], rxbuf[20]; + size_t txsize, rxsize; + int ret; + + intel_dp_aux_header(txbuf, msg); + + switch (msg->request & ~DP_AUX_I2C_MOT) { + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE: + case DP_AUX_I2C_WRITE_STATUS_UPDATE: + txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; + rxsize = 2; /* 0 or 1 data bytes */ + + if (WARN_ON(txsize > 20)) + return -E2BIG; + + WARN_ON(!msg->buffer != !msg->size); + + if (msg->buffer) + memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); + + ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, + rxbuf, rxsize, 0); + if (ret > 0) { + msg->reply = rxbuf[0] >> 4; + + if (ret > 1) { + /* Number of bytes written in a short write. */ + ret = clamp_t(int, rxbuf[1], 0, msg->size); + } else { + /* Return payload size. */ + ret = msg->size; + } + } + break; + + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ: + txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE; + rxsize = msg->size + 1; + + if (WARN_ON(rxsize > 20)) + return -E2BIG; + + ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize, + rxbuf, rxsize, 0); + if (ret > 0) { + msg->reply = rxbuf[0] >> 4; + /* + * Assume happy day, and copy the data. The caller is + * expected to check msg->reply before touching it. + * + * Return payload size. + */ + ret--; + memcpy(msg->buffer, rxbuf + 1, ret); + } + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + + +static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; + + switch (aux_ch) { + case AUX_CH_B: + case AUX_CH_C: + case AUX_CH_D: + return DP_AUX_CH_CTL(aux_ch); + default: + MISSING_CASE(aux_ch); + return DP_AUX_CH_CTL(AUX_CH_B); + } +} + +static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; + + switch (aux_ch) { + case AUX_CH_B: + case AUX_CH_C: + case AUX_CH_D: + return DP_AUX_CH_DATA(aux_ch, index); + default: + MISSING_CASE(aux_ch); + return DP_AUX_CH_DATA(AUX_CH_B, index); + } +} + +static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; + + switch (aux_ch) { + case AUX_CH_A: + return DP_AUX_CH_CTL(aux_ch); + case AUX_CH_B: + case AUX_CH_C: + case AUX_CH_D: + return PCH_DP_AUX_CH_CTL(aux_ch); + default: + MISSING_CASE(aux_ch); + return DP_AUX_CH_CTL(AUX_CH_A); + } +} + +static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; + + switch (aux_ch) { + case AUX_CH_A: + return DP_AUX_CH_DATA(aux_ch, index); + case AUX_CH_B: + case AUX_CH_C: + case AUX_CH_D: + return PCH_DP_AUX_CH_DATA(aux_ch, index); + default: + MISSING_CASE(aux_ch); + return DP_AUX_CH_DATA(AUX_CH_A, index); + } +} + +static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; + + switch (aux_ch) { + case AUX_CH_A: + case AUX_CH_B: + case AUX_CH_C: + case AUX_CH_D: + case AUX_CH_E: + case AUX_CH_F: + return DP_AUX_CH_CTL(aux_ch); + default: + MISSING_CASE(aux_ch); + return DP_AUX_CH_CTL(AUX_CH_A); + } +} + +static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum aux_ch aux_ch = dig_port->aux_ch; + + switch (aux_ch) { + case AUX_CH_A: + case AUX_CH_B: + case AUX_CH_C: + case AUX_CH_D: + case AUX_CH_E: + case AUX_CH_F: + return DP_AUX_CH_DATA(aux_ch, index); + default: + MISSING_CASE(aux_ch); + return DP_AUX_CH_DATA(AUX_CH_A, index); + } +} + +static void +intel_dp_aux_fini(struct intel_dp *intel_dp) +{ + kfree(intel_dp->aux.name); +} + +static void +intel_dp_aux_init(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &dig_port->base; + + if (INTEL_GEN(dev_priv) >= 9) { + intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; + intel_dp->aux_ch_data_reg = skl_aux_data_reg; + } else if (HAS_PCH_SPLIT(dev_priv)) { + intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; + intel_dp->aux_ch_data_reg = ilk_aux_data_reg; + } else { + intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; + intel_dp->aux_ch_data_reg = g4x_aux_data_reg; + } + + if (INTEL_GEN(dev_priv) >= 9) + intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; + else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; + else if (HAS_PCH_SPLIT(dev_priv)) + intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; + else + intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; + + if (INTEL_GEN(dev_priv) >= 9) + intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; + else + intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; + + drm_dp_aux_init(&intel_dp->aux); + + /* Failure to allocate our preferred name is not critical */ + intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", + port_name(encoder->port)); + intel_dp->aux.transfer = intel_dp_aux_transfer; +} + +bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) +{ + int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; + + return max_rate >= 540000; +} + +bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) +{ + int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; + + return max_rate >= 810000; +} + +static void +intel_dp_set_clock(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + const struct dp_link_dpll *divisor = NULL; + int i, count = 0; + + if (IS_G4X(dev_priv)) { + divisor = g4x_dpll; + count = ARRAY_SIZE(g4x_dpll); + } else if (HAS_PCH_SPLIT(dev_priv)) { + divisor = pch_dpll; + count = ARRAY_SIZE(pch_dpll); + } else if (IS_CHERRYVIEW(dev_priv)) { + divisor = chv_dpll; + count = ARRAY_SIZE(chv_dpll); + } else if (IS_VALLEYVIEW(dev_priv)) { + divisor = vlv_dpll; + count = ARRAY_SIZE(vlv_dpll); + } + + if (divisor && count) { + for (i = 0; i < count; i++) { + if (pipe_config->port_clock == divisor[i].clock) { + pipe_config->dpll = divisor[i].dpll; + pipe_config->clock_set = true; + break; + } + } + } +} + +static void snprintf_int_array(char *str, size_t len, + const int *array, int nelem) +{ + int i; + + str[0] = '\0'; + + for (i = 0; i < nelem; i++) { + int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); + if (r >= len) + return; + str += r; + len -= r; + } +} + +static void intel_dp_print_rates(struct intel_dp *intel_dp) +{ + char str[128]; /* FIXME: too big for stack? */ + + if ((drm_debug & DRM_UT_KMS) == 0) + return; + + snprintf_int_array(str, sizeof(str), + intel_dp->source_rates, intel_dp->num_source_rates); + DRM_DEBUG_KMS("source rates: %s\n", str); + + snprintf_int_array(str, sizeof(str), + intel_dp->sink_rates, intel_dp->num_sink_rates); + DRM_DEBUG_KMS("sink rates: %s\n", str); + + snprintf_int_array(str, sizeof(str), + intel_dp->common_rates, intel_dp->num_common_rates); + DRM_DEBUG_KMS("common rates: %s\n", str); +} + +int +intel_dp_max_link_rate(struct intel_dp *intel_dp) +{ + int len; + + len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); + if (WARN_ON(len <= 0)) + return 162000; + + return intel_dp->common_rates[len - 1]; +} + +int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) +{ + int i = intel_dp_rate_index(intel_dp->sink_rates, + intel_dp->num_sink_rates, rate); + + if (WARN_ON(i < 0)) + i = 0; + + return i; +} + +void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, + u8 *link_bw, u8 *rate_select) +{ + /* eDP 1.4 rate select method. */ + if (intel_dp->use_rate_select) { + *link_bw = 0; + *rate_select = + intel_dp_rate_select(intel_dp, port_clock); + } else { + *link_bw = drm_dp_link_rate_to_bw_code(port_clock); + *rate_select = 0; + } +} + +static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + return INTEL_GEN(dev_priv) >= 11 && + pipe_config->cpu_transcoder != TRANSCODER_A; +} + +static bool intel_dp_supports_fec(struct intel_dp *intel_dp, + const struct intel_crtc_state *pipe_config) +{ + return intel_dp_source_supports_fec(intel_dp, pipe_config) && + drm_dp_sink_supports_fec(intel_dp->fec_capable); +} + +static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + return INTEL_GEN(dev_priv) >= 10 && + pipe_config->cpu_transcoder != TRANSCODER_A; +} + +static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, + const struct intel_crtc_state *pipe_config) +{ + if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable) + return false; + + return intel_dp_source_supports_dsc(intel_dp, pipe_config) && + drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); +} + +static int intel_dp_compute_bpp(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_connector *intel_connector = intel_dp->attached_connector; + int bpp, bpc; + + bpp = pipe_config->pipe_bpp; + bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports); + + if (bpc > 0) + bpp = min(bpp, 3*bpc); + + if (intel_dp_is_edp(intel_dp)) { + /* Get bpp from vbt only for panels that dont have bpp in edid */ + if (intel_connector->base.display_info.bpc == 0 && + dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { + DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", + dev_priv->vbt.edp.bpp); + bpp = dev_priv->vbt.edp.bpp; + } + } + + return bpp; +} + +/* Adjust link config limits based on compliance test requests. */ +void +intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + struct link_config_limits *limits) +{ + /* For DP Compliance we override the computed bpp for the pipe */ + if (intel_dp->compliance.test_data.bpc != 0) { + int bpp = 3 * intel_dp->compliance.test_data.bpc; + + limits->min_bpp = limits->max_bpp = bpp; + pipe_config->dither_force_disable = bpp == 6 * 3; + + DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp); + } + + /* Use values requested by Compliance Test Request */ + if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { + int index; + + /* Validate the compliance test data since max values + * might have changed due to link train fallback. + */ + if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, + intel_dp->compliance.test_lane_count)) { + index = intel_dp_rate_index(intel_dp->common_rates, + intel_dp->num_common_rates, + intel_dp->compliance.test_link_rate); + if (index >= 0) + limits->min_clock = limits->max_clock = index; + limits->min_lane_count = limits->max_lane_count = + intel_dp->compliance.test_lane_count; + } + } +} + +static int intel_dp_output_bpp(const struct intel_crtc_state *crtc_state, int bpp) +{ + /* + * bpp value was assumed to RGB format. And YCbCr 4:2:0 output + * format of the number of bytes per pixel will be half the number + * of bytes of RGB pixel. + */ + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) + bpp /= 2; + + return bpp; +} + +/* Optimize link config in order: max bpp, min clock, min lanes */ +static int +intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + const struct link_config_limits *limits) +{ + struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + int bpp, clock, lane_count; + int mode_rate, link_clock, link_avail; + + for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { + mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, + bpp); + + for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { + for (lane_count = limits->min_lane_count; + lane_count <= limits->max_lane_count; + lane_count <<= 1) { + link_clock = intel_dp->common_rates[clock]; + link_avail = intel_dp_max_data_rate(link_clock, + lane_count); + + if (mode_rate <= link_avail) { + pipe_config->lane_count = lane_count; + pipe_config->pipe_bpp = bpp; + pipe_config->port_clock = link_clock; + + return 0; + } + } + } + } + + return -EINVAL; +} + +static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) +{ + int i, num_bpc; + u8 dsc_bpc[3] = {0}; + + num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, + dsc_bpc); + for (i = 0; i < num_bpc; i++) { + if (dsc_max_bpc >= dsc_bpc[i]) + return dsc_bpc[i] * 3; + } + + return 0; +} + +static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state, + struct link_config_limits *limits) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + u8 dsc_max_bpc; + int pipe_bpp; + int ret; + + pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && + intel_dp_supports_fec(intel_dp, pipe_config); + + if (!intel_dp_supports_dsc(intel_dp, pipe_config)) + return -EINVAL; + + dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC, + conn_state->max_requested_bpc); + + pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); + if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) { + DRM_DEBUG_KMS("No DSC support for less than 8bpc\n"); + return -EINVAL; + } + + /* + * For now enable DSC for max bpp, max link rate, max lane count. + * Optimize this later for the minimum possible link rate/lane count + * with DSC enabled for the requested mode. + */ + pipe_config->pipe_bpp = pipe_bpp; + pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; + pipe_config->lane_count = limits->max_lane_count; + + if (intel_dp_is_edp(intel_dp)) { + pipe_config->dsc_params.compressed_bpp = + min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, + pipe_config->pipe_bpp); + pipe_config->dsc_params.slice_count = + drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, + true); + } else { + u16 dsc_max_output_bpp; + u8 dsc_dp_slice_count; + + dsc_max_output_bpp = + intel_dp_dsc_get_output_bpp(pipe_config->port_clock, + pipe_config->lane_count, + adjusted_mode->crtc_clock, + adjusted_mode->crtc_hdisplay); + dsc_dp_slice_count = + intel_dp_dsc_get_slice_count(intel_dp, + adjusted_mode->crtc_clock, + adjusted_mode->crtc_hdisplay); + if (!dsc_max_output_bpp || !dsc_dp_slice_count) { + DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n"); + return -EINVAL; + } + pipe_config->dsc_params.compressed_bpp = min_t(u16, + dsc_max_output_bpp >> 4, + pipe_config->pipe_bpp); + pipe_config->dsc_params.slice_count = dsc_dp_slice_count; + } + /* + * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate + * is greater than the maximum Cdclock and if slice count is even + * then we need to use 2 VDSC instances. + */ + if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { + if (pipe_config->dsc_params.slice_count > 1) { + pipe_config->dsc_params.dsc_split = true; + } else { + DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n"); + return -EINVAL; + } + } + + ret = intel_dp_compute_dsc_params(intel_dp, pipe_config); + if (ret < 0) { + DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d " + "Compressed BPP = %d\n", + pipe_config->pipe_bpp, + pipe_config->dsc_params.compressed_bpp); + return ret; + } + + pipe_config->dsc_params.compression_enable = true; + DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d " + "Compressed Bpp = %d Slice Count = %d\n", + pipe_config->pipe_bpp, + pipe_config->dsc_params.compressed_bpp, + pipe_config->dsc_params.slice_count); + + return 0; +} + +int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state) +{ + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) + return 6 * 3; + else + return 8 * 3; +} + +static int +intel_dp_compute_link_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) +{ + struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct link_config_limits limits; + int common_len; + int ret; + + common_len = intel_dp_common_len_rate_limit(intel_dp, + intel_dp->max_link_rate); + + /* No common link rates between source and sink */ + WARN_ON(common_len <= 0); + + limits.min_clock = 0; + limits.max_clock = common_len - 1; + + limits.min_lane_count = 1; + limits.max_lane_count = intel_dp_max_lane_count(intel_dp); + + limits.min_bpp = intel_dp_min_bpp(pipe_config); + limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); + + if (intel_dp_is_edp(intel_dp)) { + /* + * Use the maximum clock and number of lanes the eDP panel + * advertizes being capable of. The panels are generally + * designed to support only a single clock and lane + * configuration, and typically these values correspond to the + * native resolution of the panel. + */ + limits.min_lane_count = limits.max_lane_count; + limits.min_clock = limits.max_clock; + } + + intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); + + DRM_DEBUG_KMS("DP link computation with max lane count %i " + "max rate %d max bpp %d pixel clock %iKHz\n", + limits.max_lane_count, + intel_dp->common_rates[limits.max_clock], + limits.max_bpp, adjusted_mode->crtc_clock); + + /* + * Optimize for slow and wide. This is the place to add alternative + * optimization policy. + */ + ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); + + /* enable compression if the mode doesn't fit available BW */ + DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en); + if (ret || intel_dp->force_dsc_en) { + ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, + conn_state, &limits); + if (ret < 0) + return ret; + } + + if (pipe_config->dsc_params.compression_enable) { + DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", + pipe_config->lane_count, pipe_config->port_clock, + pipe_config->pipe_bpp, + pipe_config->dsc_params.compressed_bpp); + + DRM_DEBUG_KMS("DP link rate required %i available %i\n", + intel_dp_link_required(adjusted_mode->crtc_clock, + pipe_config->dsc_params.compressed_bpp), + intel_dp_max_data_rate(pipe_config->port_clock, + pipe_config->lane_count)); + } else { + DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n", + pipe_config->lane_count, pipe_config->port_clock, + pipe_config->pipe_bpp); + + DRM_DEBUG_KMS("DP link rate required %i available %i\n", + intel_dp_link_required(adjusted_mode->crtc_clock, + pipe_config->pipe_bpp), + intel_dp_max_data_rate(pipe_config->port_clock, + pipe_config->lane_count)); + } + return 0; +} + +static int +intel_dp_ycbcr420_config(struct intel_dp *intel_dp, + struct drm_connector *connector, + struct intel_crtc_state *crtc_state) +{ + const struct drm_display_info *info = &connector->display_info; + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + int ret; + + if (!drm_mode_is_420_only(info, adjusted_mode) || + !intel_dp_get_colorimetry_status(intel_dp) || + !connector->ycbcr_420_allowed) + return 0; + + crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; + + /* YCBCR 420 output conversion needs a scaler */ + ret = skl_update_scaler_crtc(crtc_state); + if (ret) { + DRM_DEBUG_KMS("Scaler allocation for output failed\n"); + return ret; + } + + intel_pch_panel_fitting(crtc, crtc_state, DRM_MODE_SCALE_FULLSCREEN); + + return 0; +} + +bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + const struct intel_digital_connector_state *intel_conn_state = + to_intel_digital_connector_state(conn_state); + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + + if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { + /* + * See: + * CEA-861-E - 5.1 Default Encoding Parameters + * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry + */ + return crtc_state->pipe_bpp != 18 && + drm_default_rgb_quant_range(adjusted_mode) == + HDMI_QUANTIZATION_RANGE_LIMITED; + } else { + return intel_conn_state->broadcast_rgb == + INTEL_BROADCAST_RGB_LIMITED; + } +} + +int +intel_dp_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base); + enum port port = encoder->port; + struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); + struct intel_connector *intel_connector = intel_dp->attached_connector; + struct intel_digital_connector_state *intel_conn_state = + to_intel_digital_connector_state(conn_state); + bool constant_n = drm_dp_has_quirk(&intel_dp->desc, + DP_DPCD_QUIRK_CONSTANT_N); + int ret = 0, output_bpp; + + if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) + pipe_config->has_pch_encoder = true; + + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + if (lspcon->active) + lspcon_ycbcr420_config(&intel_connector->base, pipe_config); + else + ret = intel_dp_ycbcr420_config(intel_dp, &intel_connector->base, + pipe_config); + + if (ret) + return ret; + + pipe_config->has_drrs = false; + if (IS_G4X(dev_priv) || port == PORT_A) + pipe_config->has_audio = false; + else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) + pipe_config->has_audio = intel_dp->has_audio; + else + pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; + + if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { + intel_fixed_panel_mode(intel_connector->panel.fixed_mode, + adjusted_mode); + + if (INTEL_GEN(dev_priv) >= 9) { + ret = skl_update_scaler_crtc(pipe_config); + if (ret) + return ret; + } + + if (HAS_GMCH(dev_priv)) + intel_gmch_panel_fitting(intel_crtc, pipe_config, + conn_state->scaling_mode); + else + intel_pch_panel_fitting(intel_crtc, pipe_config, + conn_state->scaling_mode); + } + + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return -EINVAL; + + if (HAS_GMCH(dev_priv) && + adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) + return -EINVAL; + + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) + return -EINVAL; + + ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); + if (ret < 0) + return ret; + + pipe_config->limited_color_range = + intel_dp_limited_color_range(pipe_config, conn_state); + + if (pipe_config->dsc_params.compression_enable) + output_bpp = pipe_config->dsc_params.compressed_bpp; + else + output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp); + + intel_link_compute_m_n(output_bpp, + pipe_config->lane_count, + adjusted_mode->crtc_clock, + pipe_config->port_clock, + &pipe_config->dp_m_n, + constant_n); + + if (intel_connector->panel.downclock_mode != NULL && + dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { + pipe_config->has_drrs = true; + intel_link_compute_m_n(output_bpp, + pipe_config->lane_count, + intel_connector->panel.downclock_mode->clock, + pipe_config->port_clock, + &pipe_config->dp_m2_n2, + constant_n); + } + + if (!HAS_DDI(dev_priv)) + intel_dp_set_clock(encoder, pipe_config); + + intel_psr_compute_config(intel_dp, pipe_config); + + return 0; +} + +void intel_dp_set_link_params(struct intel_dp *intel_dp, + int link_rate, u8 lane_count, + bool link_mst) +{ + intel_dp->link_trained = false; + intel_dp->link_rate = link_rate; + intel_dp->lane_count = lane_count; + intel_dp->link_mst = link_mst; +} + +static void intel_dp_prepare(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + enum port port = encoder->port; + struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + + intel_dp_set_link_params(intel_dp, pipe_config->port_clock, + pipe_config->lane_count, + intel_crtc_has_type(pipe_config, + INTEL_OUTPUT_DP_MST)); + + /* + * There are four kinds of DP registers: + * + * IBX PCH + * SNB CPU + * IVB CPU + * CPT PCH + * + * IBX PCH and CPU are the same for almost everything, + * except that the CPU DP PLL is configured in this + * register + * + * CPT PCH is quite different, having many bits moved + * to the TRANS_DP_CTL register instead. That + * configuration happens (oddly) in ironlake_pch_enable + */ + + /* Preserve the BIOS-computed detected bit. This is + * supposed to be read-only. + */ + intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; + + /* Handle DP bits in common between all three register formats */ + intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; + intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count); + + /* Split out the IBX/CPU vs CPT settings */ + + if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + intel_dp->DP |= DP_SYNC_HS_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + intel_dp->DP |= DP_SYNC_VS_HIGH; + intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; + + if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) + intel_dp->DP |= DP_ENHANCED_FRAMING; + + intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); + } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { + u32 trans_dp; + + intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; + + trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe)); + if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) + trans_dp |= TRANS_DP_ENH_FRAMING; + else + trans_dp &= ~TRANS_DP_ENH_FRAMING; + I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp); + } else { + if (IS_G4X(dev_priv) && pipe_config->limited_color_range) + intel_dp->DP |= DP_COLOR_RANGE_16_235; + + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + intel_dp->DP |= DP_SYNC_HS_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + intel_dp->DP |= DP_SYNC_VS_HIGH; + intel_dp->DP |= DP_LINK_TRAIN_OFF; + + if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) + intel_dp->DP |= DP_ENHANCED_FRAMING; + + if (IS_CHERRYVIEW(dev_priv)) + intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); + else + intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); + } +} + +#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) +#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) + +#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) +#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) + +#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) +#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) + +static void intel_pps_verify_state(struct intel_dp *intel_dp); + +static void wait_panel_status(struct intel_dp *intel_dp, + u32 mask, + u32 value) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + i915_reg_t pp_stat_reg, pp_ctrl_reg; + + lockdep_assert_held(&dev_priv->pps_mutex); + + intel_pps_verify_state(intel_dp); + + pp_stat_reg = _pp_stat_reg(intel_dp); + pp_ctrl_reg = _pp_ctrl_reg(intel_dp); + + DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", + mask, value, + I915_READ(pp_stat_reg), + I915_READ(pp_ctrl_reg)); + + if (intel_wait_for_register(&dev_priv->uncore, + pp_stat_reg, mask, value, + 5000)) + DRM_ERROR("Panel status timeout: status %08x control %08x\n", + I915_READ(pp_stat_reg), + I915_READ(pp_ctrl_reg)); + + DRM_DEBUG_KMS("Wait complete\n"); +} + +static void wait_panel_on(struct intel_dp *intel_dp) +{ + DRM_DEBUG_KMS("Wait for panel power on\n"); + wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); +} + +static void wait_panel_off(struct intel_dp *intel_dp) +{ + DRM_DEBUG_KMS("Wait for panel power off time\n"); + wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); +} + +static void wait_panel_power_cycle(struct intel_dp *intel_dp) +{ + ktime_t panel_power_on_time; + s64 panel_power_off_duration; + + DRM_DEBUG_KMS("Wait for panel power cycle\n"); + + /* take the difference of currrent time and panel power off time + * and then make panel wait for t11_t12 if needed. */ + panel_power_on_time = ktime_get_boottime(); + panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); + + /* When we disable the VDD override bit last we have to do the manual + * wait. */ + if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) + wait_remaining_ms_from_jiffies(jiffies, + intel_dp->panel_power_cycle_delay - panel_power_off_duration); + + wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); +} + +static void wait_backlight_on(struct intel_dp *intel_dp) +{ + wait_remaining_ms_from_jiffies(intel_dp->last_power_on, + intel_dp->backlight_on_delay); +} + +static void edp_wait_backlight_off(struct intel_dp *intel_dp) +{ + wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, + intel_dp->backlight_off_delay); +} + +/* Read the current pp_control value, unlocking the register if it + * is locked + */ + +static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u32 control; + + lockdep_assert_held(&dev_priv->pps_mutex); + + control = I915_READ(_pp_ctrl_reg(intel_dp)); + if (WARN_ON(!HAS_DDI(dev_priv) && + (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { + control &= ~PANEL_UNLOCK_MASK; + control |= PANEL_UNLOCK_REGS; + } + return control; +} + +/* + * Must be paired with edp_panel_vdd_off(). + * Must hold pps_mutex around the whole on/off sequence. + * Can be nested with intel_edp_panel_vdd_{on,off}() calls. + */ +static bool edp_panel_vdd_on(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + u32 pp; + i915_reg_t pp_stat_reg, pp_ctrl_reg; + bool need_to_disable = !intel_dp->want_panel_vdd; + + lockdep_assert_held(&dev_priv->pps_mutex); + + if (!intel_dp_is_edp(intel_dp)) + return false; + + cancel_delayed_work(&intel_dp->panel_vdd_work); + intel_dp->want_panel_vdd = true; + + if (edp_have_panel_vdd(intel_dp)) + return need_to_disable; + + intel_display_power_get(dev_priv, + intel_aux_power_domain(intel_dig_port)); + + DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", + port_name(intel_dig_port->base.port)); + + if (!edp_have_panel_power(intel_dp)) + wait_panel_power_cycle(intel_dp); + + pp = ironlake_get_pp_control(intel_dp); + pp |= EDP_FORCE_VDD; + + pp_stat_reg = _pp_stat_reg(intel_dp); + pp_ctrl_reg = _pp_ctrl_reg(intel_dp); + + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", + I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); + /* + * If the panel wasn't on, delay before accessing aux channel + */ + if (!edp_have_panel_power(intel_dp)) { + DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n", + port_name(intel_dig_port->base.port)); + msleep(intel_dp->panel_power_up_delay); + } + + return need_to_disable; +} + +/* + * Must be paired with intel_edp_panel_vdd_off() or + * intel_edp_panel_off(). + * Nested calls to these functions are not allowed since + * we drop the lock. Caller must use some higher level + * locking to prevent nested calls from other threads. + */ +void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) +{ + intel_wakeref_t wakeref; + bool vdd; + + if (!intel_dp_is_edp(intel_dp)) + return; + + vdd = false; + with_pps_lock(intel_dp, wakeref) + vdd = edp_panel_vdd_on(intel_dp); + I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n", + port_name(dp_to_dig_port(intel_dp)->base.port)); +} + +static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *intel_dig_port = + dp_to_dig_port(intel_dp); + u32 pp; + i915_reg_t pp_stat_reg, pp_ctrl_reg; + + lockdep_assert_held(&dev_priv->pps_mutex); + + WARN_ON(intel_dp->want_panel_vdd); + + if (!edp_have_panel_vdd(intel_dp)) + return; + + DRM_DEBUG_KMS("Turning eDP port %c VDD off\n", + port_name(intel_dig_port->base.port)); + + pp = ironlake_get_pp_control(intel_dp); + pp &= ~EDP_FORCE_VDD; + + pp_ctrl_reg = _pp_ctrl_reg(intel_dp); + pp_stat_reg = _pp_stat_reg(intel_dp); + + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + + /* Make sure sequencer is idle before allowing subsequent activity */ + DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", + I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); + + if ((pp & PANEL_POWER_ON) == 0) + intel_dp->panel_power_off_time = ktime_get_boottime(); + + intel_display_power_put_unchecked(dev_priv, + intel_aux_power_domain(intel_dig_port)); +} + +static void edp_panel_vdd_work(struct work_struct *__work) +{ + struct intel_dp *intel_dp = + container_of(to_delayed_work(__work), + struct intel_dp, panel_vdd_work); + intel_wakeref_t wakeref; + + with_pps_lock(intel_dp, wakeref) { + if (!intel_dp->want_panel_vdd) + edp_panel_vdd_off_sync(intel_dp); + } +} + +static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) +{ + unsigned long delay; + + /* + * Queue the timer to fire a long time from now (relative to the power + * down delay) to keep the panel power up across a sequence of + * operations. + */ + delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5); + schedule_delayed_work(&intel_dp->panel_vdd_work, delay); +} + +/* + * Must be paired with edp_panel_vdd_on(). + * Must hold pps_mutex around the whole on/off sequence. + * Can be nested with intel_edp_panel_vdd_{on,off}() calls. + */ +static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + lockdep_assert_held(&dev_priv->pps_mutex); + + if (!intel_dp_is_edp(intel_dp)) + return; + + I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on", + port_name(dp_to_dig_port(intel_dp)->base.port)); + + intel_dp->want_panel_vdd = false; + + if (sync) + edp_panel_vdd_off_sync(intel_dp); + else + edp_panel_vdd_schedule_off(intel_dp); +} + +static void edp_panel_on(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u32 pp; + i915_reg_t pp_ctrl_reg; + + lockdep_assert_held(&dev_priv->pps_mutex); + + if (!intel_dp_is_edp(intel_dp)) + return; + + DRM_DEBUG_KMS("Turn eDP port %c panel power on\n", + port_name(dp_to_dig_port(intel_dp)->base.port)); + + if (WARN(edp_have_panel_power(intel_dp), + "eDP port %c panel power already on\n", + port_name(dp_to_dig_port(intel_dp)->base.port))) + return; + + wait_panel_power_cycle(intel_dp); + + pp_ctrl_reg = _pp_ctrl_reg(intel_dp); + pp = ironlake_get_pp_control(intel_dp); + if (IS_GEN(dev_priv, 5)) { + /* ILK workaround: disable reset around power sequence */ + pp &= ~PANEL_POWER_RESET; + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + } + + pp |= PANEL_POWER_ON; + if (!IS_GEN(dev_priv, 5)) + pp |= PANEL_POWER_RESET; + + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + + wait_panel_on(intel_dp); + intel_dp->last_power_on = jiffies; + + if (IS_GEN(dev_priv, 5)) { + pp |= PANEL_POWER_RESET; /* restore panel reset bit */ + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + } +} + +void intel_edp_panel_on(struct intel_dp *intel_dp) +{ + intel_wakeref_t wakeref; + + if (!intel_dp_is_edp(intel_dp)) + return; + + with_pps_lock(intel_dp, wakeref) + edp_panel_on(intel_dp); +} + + +static void edp_panel_off(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + u32 pp; + i915_reg_t pp_ctrl_reg; + + lockdep_assert_held(&dev_priv->pps_mutex); + + if (!intel_dp_is_edp(intel_dp)) + return; + + DRM_DEBUG_KMS("Turn eDP port %c panel power off\n", + port_name(dig_port->base.port)); + + WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n", + port_name(dig_port->base.port)); + + pp = ironlake_get_pp_control(intel_dp); + /* We need to switch off panel power _and_ force vdd, for otherwise some + * panels get very unhappy and cease to work. */ + pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | + EDP_BLC_ENABLE); + + pp_ctrl_reg = _pp_ctrl_reg(intel_dp); + + intel_dp->want_panel_vdd = false; + + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + + wait_panel_off(intel_dp); + intel_dp->panel_power_off_time = ktime_get_boottime(); + + /* We got a reference when we enabled the VDD. */ + intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); +} + +void intel_edp_panel_off(struct intel_dp *intel_dp) +{ + intel_wakeref_t wakeref; + + if (!intel_dp_is_edp(intel_dp)) + return; + + with_pps_lock(intel_dp, wakeref) + edp_panel_off(intel_dp); +} + +/* Enable backlight in the panel power control. */ +static void _intel_edp_backlight_on(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + intel_wakeref_t wakeref; + + /* + * If we enable the backlight right away following a panel power + * on, we may see slight flicker as the panel syncs with the eDP + * link. So delay a bit to make sure the image is solid before + * allowing it to appear. + */ + wait_backlight_on(intel_dp); + + with_pps_lock(intel_dp, wakeref) { + i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); + u32 pp; + + pp = ironlake_get_pp_control(intel_dp); + pp |= EDP_BLC_ENABLE; + + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + } +} + +/* Enable backlight PWM and backlight PP control. */ +void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder); + + if (!intel_dp_is_edp(intel_dp)) + return; + + DRM_DEBUG_KMS("\n"); + + intel_panel_enable_backlight(crtc_state, conn_state); + _intel_edp_backlight_on(intel_dp); +} + +/* Disable backlight in the panel power control. */ +static void _intel_edp_backlight_off(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + intel_wakeref_t wakeref; + + if (!intel_dp_is_edp(intel_dp)) + return; + + with_pps_lock(intel_dp, wakeref) { + i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp); + u32 pp; + + pp = ironlake_get_pp_control(intel_dp); + pp &= ~EDP_BLC_ENABLE; + + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); + } + + intel_dp->last_backlight_off = jiffies; + edp_wait_backlight_off(intel_dp); +} + +/* Disable backlight PP control and backlight PWM. */ +void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder); + + if (!intel_dp_is_edp(intel_dp)) + return; + + DRM_DEBUG_KMS("\n"); + + _intel_edp_backlight_off(intel_dp); + intel_panel_disable_backlight(old_conn_state); +} + +/* + * Hook for controlling the panel power control backlight through the bl_power + * sysfs attribute. Take care to handle multiple calls. + */ +static void intel_edp_backlight_power(struct intel_connector *connector, + bool enable) +{ + struct intel_dp *intel_dp = intel_attached_dp(&connector->base); + intel_wakeref_t wakeref; + bool is_enabled; + + is_enabled = false; + with_pps_lock(intel_dp, wakeref) + is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE; + if (is_enabled == enable) + return; + + DRM_DEBUG_KMS("panel power control backlight %s\n", + enable ? "enable" : "disable"); + + if (enable) + _intel_edp_backlight_on(intel_dp); + else + _intel_edp_backlight_off(intel_dp); +} + +static void assert_dp_port(struct intel_dp *intel_dp, bool state) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN; + + I915_STATE_WARN(cur_state != state, + "DP port %c state assertion failure (expected %s, current %s)\n", + port_name(dig_port->base.port), + onoff(state), onoff(cur_state)); +} +#define assert_dp_port_disabled(d) assert_dp_port((d), false) + +static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) +{ + bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE; + + I915_STATE_WARN(cur_state != state, + "eDP PLL state assertion failure (expected %s, current %s)\n", + onoff(state), onoff(cur_state)); +} +#define assert_edp_pll_enabled(d) assert_edp_pll((d), true) +#define assert_edp_pll_disabled(d) assert_edp_pll((d), false) + +static void ironlake_edp_pll_on(struct intel_dp *intel_dp, + const struct intel_crtc_state *pipe_config) +{ + struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + assert_pipe_disabled(dev_priv, crtc->pipe); + assert_dp_port_disabled(intel_dp); + assert_edp_pll_disabled(dev_priv); + + DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n", + pipe_config->port_clock); + + intel_dp->DP &= ~DP_PLL_FREQ_MASK; + + if (pipe_config->port_clock == 162000) + intel_dp->DP |= DP_PLL_FREQ_162MHZ; + else + intel_dp->DP |= DP_PLL_FREQ_270MHZ; + + I915_WRITE(DP_A, intel_dp->DP); + POSTING_READ(DP_A); + udelay(500); + + /* + * [DevILK] Work around required when enabling DP PLL + * while a pipe is enabled going to FDI: + * 1. Wait for the start of vertical blank on the enabled pipe going to FDI + * 2. Program DP PLL enable + */ + if (IS_GEN(dev_priv, 5)) + intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe); + + intel_dp->DP |= DP_PLL_ENABLE; + + I915_WRITE(DP_A, intel_dp->DP); + POSTING_READ(DP_A); + udelay(200); +} + +static void ironlake_edp_pll_off(struct intel_dp *intel_dp, + const struct intel_crtc_state *old_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + assert_pipe_disabled(dev_priv, crtc->pipe); + assert_dp_port_disabled(intel_dp); + assert_edp_pll_enabled(dev_priv); + + DRM_DEBUG_KMS("disabling eDP PLL\n"); + + intel_dp->DP &= ~DP_PLL_ENABLE; + + I915_WRITE(DP_A, intel_dp->DP); + POSTING_READ(DP_A); + udelay(200); +} + +static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) +{ + /* + * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus + * be capable of signalling downstream hpd with a long pulse. + * Whether or not that means D3 is safe to use is not clear, + * but let's assume so until proven otherwise. + * + * FIXME should really check all downstream ports... + */ + return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && + intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT && + intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; +} + +void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + bool enable) +{ + int ret; + + if (!crtc_state->dsc_params.compression_enable) + return; + + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, + enable ? DP_DECOMPRESSION_EN : 0); + if (ret < 0) + DRM_DEBUG_KMS("Failed to %s sink decompression state\n", + enable ? "enable" : "disable"); +} + +/* If the sink supports it, try to set the power state appropriately */ +void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) +{ + int ret, i; + + /* Should have a valid DPCD by this point */ + if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) + return; + + if (mode != DRM_MODE_DPMS_ON) { + if (downstream_hpd_needs_d0(intel_dp)) + return; + + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, + DP_SET_POWER_D3); + } else { + struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); + + /* + * When turning on, we need to retry for 1ms to give the sink + * time to wake up. + */ + for (i = 0; i < 3; i++) { + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, + DP_SET_POWER_D0); + if (ret == 1) + break; + msleep(1); + } + + if (ret == 1 && lspcon->active) + lspcon_wait_pcon_mode(lspcon); + } + + if (ret != 1) + DRM_DEBUG_KMS("failed to %s sink power state\n", + mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); +} + +static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, + enum port port, enum pipe *pipe) +{ + enum pipe p; + + for_each_pipe(dev_priv, p) { + u32 val = I915_READ(TRANS_DP_CTL(p)); + + if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { + *pipe = p; + return true; + } + } + + DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port)); + + /* must initialize pipe to something for the asserts */ + *pipe = PIPE_A; + + return false; +} + +bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t dp_reg, enum port port, + enum pipe *pipe) +{ + bool ret; + u32 val; + + val = I915_READ(dp_reg); + + ret = val & DP_PORT_EN; + + /* asserts want to know the pipe even if the port is disabled */ + if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) + *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; + else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) + ret &= cpt_dp_port_selected(dev_priv, port, pipe); + else if (IS_CHERRYVIEW(dev_priv)) + *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; + else + *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; + + return ret; +} + +static bool intel_dp_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + intel_wakeref_t wakeref; + bool ret; + + wakeref = intel_display_power_get_if_enabled(dev_priv, + encoder->power_domain); + if (!wakeref) + return false; + + ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, + encoder->port, pipe); + + intel_display_power_put(dev_priv, encoder->power_domain, wakeref); + + return ret; +} + +static void intel_dp_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + u32 tmp, flags = 0; + enum port port = encoder->port; + struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + + if (encoder->type == INTEL_OUTPUT_EDP) + pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP); + else + pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); + + tmp = I915_READ(intel_dp->output_reg); + + pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; + + if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { + u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe)); + + if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) + flags |= DRM_MODE_FLAG_PHSYNC; + else + flags |= DRM_MODE_FLAG_NHSYNC; + + if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH) + flags |= DRM_MODE_FLAG_PVSYNC; + else + flags |= DRM_MODE_FLAG_NVSYNC; + } else { + if (tmp & DP_SYNC_HS_HIGH) + flags |= DRM_MODE_FLAG_PHSYNC; + else + flags |= DRM_MODE_FLAG_NHSYNC; + + if (tmp & DP_SYNC_VS_HIGH) + flags |= DRM_MODE_FLAG_PVSYNC; + else + flags |= DRM_MODE_FLAG_NVSYNC; + } + + pipe_config->base.adjusted_mode.flags |= flags; + + if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235) + pipe_config->limited_color_range = true; + + pipe_config->lane_count = + ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; + + intel_dp_get_m_n(crtc, pipe_config); + + if (port == PORT_A) { + if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) + pipe_config->port_clock = 162000; + else + pipe_config->port_clock = 270000; + } + + pipe_config->base.adjusted_mode.crtc_clock = + intel_dotclock_calculate(pipe_config->port_clock, + &pipe_config->dp_m_n); + + if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && + pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { + /* + * This is a big fat ugly hack. + * + * Some machines in UEFI boot mode provide us a VBT that has 18 + * bpp and 1.62 GHz link bandwidth for eDP, which for reasons + * unknown we fail to light up. Yet the same BIOS boots up with + * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as + * max, not what it tells us to use. + * + * Note: This will still be broken if the eDP panel is not lit + * up by the BIOS, and thus we can't get the mode at module + * load. + */ + DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", + pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); + dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; + } +} + +static void intel_disable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + intel_dp->link_trained = false; + + if (old_crtc_state->has_audio) + intel_audio_codec_disable(encoder, + old_crtc_state, old_conn_state); + + /* Make sure the panel is off before trying to change the mode. But also + * ensure that we have vdd while we switch off the panel. */ + intel_edp_panel_vdd_on(intel_dp); + intel_edp_backlight_off(old_conn_state); + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); + intel_edp_panel_off(intel_dp); +} + +static void g4x_disable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + intel_disable_dp(encoder, old_crtc_state, old_conn_state); +} + +static void vlv_disable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + intel_disable_dp(encoder, old_crtc_state, old_conn_state); +} + +static void g4x_post_disable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + enum port port = encoder->port; + + /* + * Bspec does not list a specific disable sequence for g4x DP. + * Follow the ilk+ sequence (disable pipe before the port) for + * g4x DP as it does not suffer from underruns like the normal + * g4x modeset sequence (disable pipe after the port). + */ + intel_dp_link_down(encoder, old_crtc_state); + + /* Only ilk+ has port A */ + if (port == PORT_A) + ironlake_edp_pll_off(intel_dp, old_crtc_state); +} + +static void vlv_post_disable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + intel_dp_link_down(encoder, old_crtc_state); +} + +static void chv_post_disable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + intel_dp_link_down(encoder, old_crtc_state); + + vlv_dpio_get(dev_priv); + + /* Assert data lane reset */ + chv_data_lane_soft_reset(encoder, old_crtc_state, true); + + vlv_dpio_put(dev_priv); +} + +static void +_intel_dp_set_link_train(struct intel_dp *intel_dp, + u32 *DP, + u8 dp_train_pat) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + enum port port = intel_dig_port->base.port; + u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); + + if (dp_train_pat & train_pat_mask) + DRM_DEBUG_KMS("Using DP training pattern TPS%d\n", + dp_train_pat & train_pat_mask); + + if (HAS_DDI(dev_priv)) { + u32 temp = I915_READ(DP_TP_CTL(port)); + + if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) + temp |= DP_TP_CTL_SCRAMBLE_DISABLE; + else + temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; + + temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; + switch (dp_train_pat & train_pat_mask) { + case DP_TRAINING_PATTERN_DISABLE: + temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; + + break; + case DP_TRAINING_PATTERN_1: + temp |= DP_TP_CTL_LINK_TRAIN_PAT1; + break; + case DP_TRAINING_PATTERN_2: + temp |= DP_TP_CTL_LINK_TRAIN_PAT2; + break; + case DP_TRAINING_PATTERN_3: + temp |= DP_TP_CTL_LINK_TRAIN_PAT3; + break; + case DP_TRAINING_PATTERN_4: + temp |= DP_TP_CTL_LINK_TRAIN_PAT4; + break; + } + I915_WRITE(DP_TP_CTL(port), temp); + + } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || + (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { + *DP &= ~DP_LINK_TRAIN_MASK_CPT; + + switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { + case DP_TRAINING_PATTERN_DISABLE: + *DP |= DP_LINK_TRAIN_OFF_CPT; + break; + case DP_TRAINING_PATTERN_1: + *DP |= DP_LINK_TRAIN_PAT_1_CPT; + break; + case DP_TRAINING_PATTERN_2: + *DP |= DP_LINK_TRAIN_PAT_2_CPT; + break; + case DP_TRAINING_PATTERN_3: + DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n"); + *DP |= DP_LINK_TRAIN_PAT_2_CPT; + break; + } + + } else { + *DP &= ~DP_LINK_TRAIN_MASK; + + switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { + case DP_TRAINING_PATTERN_DISABLE: + *DP |= DP_LINK_TRAIN_OFF; + break; + case DP_TRAINING_PATTERN_1: + *DP |= DP_LINK_TRAIN_PAT_1; + break; + case DP_TRAINING_PATTERN_2: + *DP |= DP_LINK_TRAIN_PAT_2; + break; + case DP_TRAINING_PATTERN_3: + DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n"); + *DP |= DP_LINK_TRAIN_PAT_2; + break; + } + } +} + +static void intel_dp_enable_port(struct intel_dp *intel_dp, + const struct intel_crtc_state *old_crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + /* enable with pattern 1 (as per spec) */ + + intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1); + + /* + * Magic for VLV/CHV. We _must_ first set up the register + * without actually enabling the port, and then do another + * write to enable the port. Otherwise link training will + * fail when the power sequencer is freshly used for this port. + */ + intel_dp->DP |= DP_PORT_EN; + if (old_crtc_state->has_audio) + intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; + + I915_WRITE(intel_dp->output_reg, intel_dp->DP); + POSTING_READ(intel_dp->output_reg); +} + +static void intel_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); + u32 dp_reg = I915_READ(intel_dp->output_reg); + enum pipe pipe = crtc->pipe; + intel_wakeref_t wakeref; + + if (WARN_ON(dp_reg & DP_PORT_EN)) + return; + + with_pps_lock(intel_dp, wakeref) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + vlv_init_panel_power_sequencer(encoder, pipe_config); + + intel_dp_enable_port(intel_dp, pipe_config); + + edp_panel_vdd_on(intel_dp); + edp_panel_on(intel_dp); + edp_panel_vdd_off(intel_dp, true); + } + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + unsigned int lane_mask = 0x0; + + if (IS_CHERRYVIEW(dev_priv)) + lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count); + + vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp), + lane_mask); + } + + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + intel_dp_start_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); + + if (pipe_config->has_audio) { + DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", + pipe_name(pipe)); + intel_audio_codec_enable(encoder, pipe_config, conn_state); + } +} + +static void g4x_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + intel_enable_dp(encoder, pipe_config, conn_state); + intel_edp_backlight_on(pipe_config, conn_state); +} + +static void vlv_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + intel_edp_backlight_on(pipe_config, conn_state); +} + +static void g4x_pre_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + enum port port = encoder->port; + + intel_dp_prepare(encoder, pipe_config); + + /* Only ilk+ has port A */ + if (port == PORT_A) + ironlake_edp_pll_on(intel_dp, pipe_config); +} + +static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); + enum pipe pipe = intel_dp->pps_pipe; + i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); + + WARN_ON(intel_dp->active_pipe != INVALID_PIPE); + + if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) + return; + + edp_panel_vdd_off_sync(intel_dp); + + /* + * VLV seems to get confused when multiple power sequencers + * have the same port selected (even if only one has power/vdd + * enabled). The failure manifests as vlv_wait_port_ready() failing + * CHV on the other hand doesn't seem to mind having the same port + * selected in multiple power sequencers, but let's clear the + * port select always when logically disconnecting a power sequencer + * from a port. + */ + DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n", + pipe_name(pipe), port_name(intel_dig_port->base.port)); + I915_WRITE(pp_on_reg, 0); + POSTING_READ(pp_on_reg); + + intel_dp->pps_pipe = INVALID_PIPE; +} + +static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + struct intel_encoder *encoder; + + lockdep_assert_held(&dev_priv->pps_mutex); + + for_each_intel_dp(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + enum port port = encoder->port; + + WARN(intel_dp->active_pipe == pipe, + "stealing pipe %c power sequencer from active (e)DP port %c\n", + pipe_name(pipe), port_name(port)); + + if (intel_dp->pps_pipe != pipe) + continue; + + DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n", + pipe_name(pipe), port_name(port)); + + /* make sure vdd is off before we steal it */ + vlv_detach_power_sequencer(intel_dp); + } +} + +static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + + lockdep_assert_held(&dev_priv->pps_mutex); + + WARN_ON(intel_dp->active_pipe != INVALID_PIPE); + + if (intel_dp->pps_pipe != INVALID_PIPE && + intel_dp->pps_pipe != crtc->pipe) { + /* + * If another power sequencer was being used on this + * port previously make sure to turn off vdd there while + * we still have control of it. + */ + vlv_detach_power_sequencer(intel_dp); + } + + /* + * We may be stealing the power + * sequencer from another port. + */ + vlv_steal_power_sequencer(dev_priv, crtc->pipe); + + intel_dp->active_pipe = crtc->pipe; + + if (!intel_dp_is_edp(intel_dp)) + return; + + /* now it's all ours */ + intel_dp->pps_pipe = crtc->pipe; + + DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n", + pipe_name(intel_dp->pps_pipe), port_name(encoder->port)); + + /* init power sequencer on this pipe and port */ + intel_dp_init_panel_power_sequencer(intel_dp); + intel_dp_init_panel_power_sequencer_registers(intel_dp, true); +} + +static void vlv_pre_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + vlv_phy_pre_encoder_enable(encoder, pipe_config); + + intel_enable_dp(encoder, pipe_config, conn_state); +} + +static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + intel_dp_prepare(encoder, pipe_config); + + vlv_phy_pre_pll_enable(encoder, pipe_config); +} + +static void chv_pre_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + chv_phy_pre_encoder_enable(encoder, pipe_config); + + intel_enable_dp(encoder, pipe_config, conn_state); + + /* Second common lane will stay alive on its own now */ + chv_phy_release_cl2_override(encoder); +} + +static void chv_dp_pre_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + intel_dp_prepare(encoder, pipe_config); + + chv_phy_pre_pll_enable(encoder, pipe_config); +} + +static void chv_dp_post_pll_disable(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + chv_phy_post_pll_disable(encoder, old_crtc_state); +} + +/* + * Fetch AUX CH registers 0x202 - 0x207 which contain + * link status information + */ +bool +intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]) +{ + return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status, + DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE; +} + +/* These are source-specific values. */ +u8 +intel_dp_voltage_max(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + enum port port = encoder->port; + + if (HAS_DDI(dev_priv)) + return intel_ddi_dp_voltage_max(encoder); + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; + else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) + return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; + else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) + return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; + else + return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; +} + +u8 +intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + enum port port = encoder->port; + + if (HAS_DDI(dev_priv)) { + return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing); + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + return DP_TRAIN_PRE_EMPH_LEVEL_3; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + return DP_TRAIN_PRE_EMPH_LEVEL_2; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + return DP_TRAIN_PRE_EMPH_LEVEL_1; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + default: + return DP_TRAIN_PRE_EMPH_LEVEL_0; + } + } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { + switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + return DP_TRAIN_PRE_EMPH_LEVEL_2; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + return DP_TRAIN_PRE_EMPH_LEVEL_1; + default: + return DP_TRAIN_PRE_EMPH_LEVEL_0; + } + } else { + switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + return DP_TRAIN_PRE_EMPH_LEVEL_2; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + return DP_TRAIN_PRE_EMPH_LEVEL_2; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + return DP_TRAIN_PRE_EMPH_LEVEL_1; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + default: + return DP_TRAIN_PRE_EMPH_LEVEL_0; + } + } +} + +static u32 vlv_signal_levels(struct intel_dp *intel_dp) +{ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + unsigned long demph_reg_value, preemph_reg_value, + uniqtranscale_reg_value; + u8 train_set = intel_dp->train_set[0]; + + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_0: + preemph_reg_value = 0x0004000; + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + demph_reg_value = 0x2B405555; + uniqtranscale_reg_value = 0x552AB83A; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + demph_reg_value = 0x2B404040; + uniqtranscale_reg_value = 0x5548B83A; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + demph_reg_value = 0x2B245555; + uniqtranscale_reg_value = 0x5560B83A; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + demph_reg_value = 0x2B405555; + uniqtranscale_reg_value = 0x5598DA3A; + break; + default: + return 0; + } + break; + case DP_TRAIN_PRE_EMPH_LEVEL_1: + preemph_reg_value = 0x0002000; + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + demph_reg_value = 0x2B404040; + uniqtranscale_reg_value = 0x5552B83A; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + demph_reg_value = 0x2B404848; + uniqtranscale_reg_value = 0x5580B83A; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + demph_reg_value = 0x2B404040; + uniqtranscale_reg_value = 0x55ADDA3A; + break; + default: + return 0; + } + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + preemph_reg_value = 0x0000000; + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + demph_reg_value = 0x2B305555; + uniqtranscale_reg_value = 0x5570B83A; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + demph_reg_value = 0x2B2B4040; + uniqtranscale_reg_value = 0x55ADDA3A; + break; + default: + return 0; + } + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + preemph_reg_value = 0x0006000; + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + demph_reg_value = 0x1B405555; + uniqtranscale_reg_value = 0x55ADDA3A; + break; + default: + return 0; + } + break; + default: + return 0; + } + + vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, + uniqtranscale_reg_value, 0); + + return 0; +} + +static u32 chv_signal_levels(struct intel_dp *intel_dp) +{ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + u32 deemph_reg_value, margin_reg_value; + bool uniq_trans_scale = false; + u8 train_set = intel_dp->train_set[0]; + + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_0: + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + deemph_reg_value = 128; + margin_reg_value = 52; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + deemph_reg_value = 128; + margin_reg_value = 77; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + deemph_reg_value = 128; + margin_reg_value = 102; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + deemph_reg_value = 128; + margin_reg_value = 154; + uniq_trans_scale = true; + break; + default: + return 0; + } + break; + case DP_TRAIN_PRE_EMPH_LEVEL_1: + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + deemph_reg_value = 85; + margin_reg_value = 78; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + deemph_reg_value = 85; + margin_reg_value = 116; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + deemph_reg_value = 85; + margin_reg_value = 154; + break; + default: + return 0; + } + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + deemph_reg_value = 64; + margin_reg_value = 104; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + deemph_reg_value = 64; + margin_reg_value = 154; + break; + default: + return 0; + } + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + deemph_reg_value = 43; + margin_reg_value = 154; + break; + default: + return 0; + } + break; + default: + return 0; + } + + chv_set_phy_signal_level(encoder, deemph_reg_value, + margin_reg_value, uniq_trans_scale); + + return 0; +} + +static u32 +g4x_signal_levels(u8 train_set) +{ + u32 signal_levels = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + default: + signal_levels |= DP_VOLTAGE_0_4; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + signal_levels |= DP_VOLTAGE_0_6; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + signal_levels |= DP_VOLTAGE_0_8; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + signal_levels |= DP_VOLTAGE_1_2; + break; + } + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_0: + default: + signal_levels |= DP_PRE_EMPHASIS_0; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_1: + signal_levels |= DP_PRE_EMPHASIS_3_5; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + signal_levels |= DP_PRE_EMPHASIS_6; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + signal_levels |= DP_PRE_EMPHASIS_9_5; + break; + } + return signal_levels; +} + +/* SNB CPU eDP voltage swing and pre-emphasis control */ +static u32 +snb_cpu_edp_signal_levels(u8 train_set) +{ + int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | + DP_TRAIN_PRE_EMPHASIS_MASK); + switch (signal_levels) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: + return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: + return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2: + return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: + return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: + return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; + default: + DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" + "0x%x\n", signal_levels); + return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; + } +} + +/* IVB CPU eDP voltage swing and pre-emphasis control */ +static u32 +ivb_cpu_edp_signal_levels(u8 train_set) +{ + int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | + DP_TRAIN_PRE_EMPHASIS_MASK); + switch (signal_levels) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0: + return EDP_LINK_TRAIN_400MV_0DB_IVB; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1: + return EDP_LINK_TRAIN_400MV_3_5DB_IVB; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2: + return EDP_LINK_TRAIN_400MV_6DB_IVB; + + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0: + return EDP_LINK_TRAIN_600MV_0DB_IVB; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1: + return EDP_LINK_TRAIN_600MV_3_5DB_IVB; + + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0: + return EDP_LINK_TRAIN_800MV_0DB_IVB; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: + return EDP_LINK_TRAIN_800MV_3_5DB_IVB; + + default: + DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" + "0x%x\n", signal_levels); + return EDP_LINK_TRAIN_500MV_0DB_IVB; + } +} + +void +intel_dp_set_signal_levels(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + enum port port = intel_dig_port->base.port; + u32 signal_levels, mask = 0; + u8 train_set = intel_dp->train_set[0]; + + if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) { + signal_levels = bxt_signal_levels(intel_dp); + } else if (HAS_DDI(dev_priv)) { + signal_levels = ddi_signal_levels(intel_dp); + mask = DDI_BUF_EMP_MASK; + } else if (IS_CHERRYVIEW(dev_priv)) { + signal_levels = chv_signal_levels(intel_dp); + } else if (IS_VALLEYVIEW(dev_priv)) { + signal_levels = vlv_signal_levels(intel_dp); + } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { + signal_levels = ivb_cpu_edp_signal_levels(train_set); + mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; + } else if (IS_GEN(dev_priv, 6) && port == PORT_A) { + signal_levels = snb_cpu_edp_signal_levels(train_set); + mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; + } else { + signal_levels = g4x_signal_levels(train_set); + mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; + } + + if (mask) + DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); + + DRM_DEBUG_KMS("Using vswing level %d\n", + train_set & DP_TRAIN_VOLTAGE_SWING_MASK); + DRM_DEBUG_KMS("Using pre-emphasis level %d\n", + (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT); + + intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels; + + I915_WRITE(intel_dp->output_reg, intel_dp->DP); + POSTING_READ(intel_dp->output_reg); +} + +void +intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, + u8 dp_train_pat) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = + to_i915(intel_dig_port->base.base.dev); + + _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat); + + I915_WRITE(intel_dp->output_reg, intel_dp->DP); + POSTING_READ(intel_dp->output_reg); +} + +void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + enum port port = intel_dig_port->base.port; + u32 val; + + if (!HAS_DDI(dev_priv)) + return; + + val = I915_READ(DP_TP_CTL(port)); + val &= ~DP_TP_CTL_LINK_TRAIN_MASK; + val |= DP_TP_CTL_LINK_TRAIN_IDLE; + I915_WRITE(DP_TP_CTL(port), val); + + /* + * On PORT_A we can have only eDP in SST mode. There the only reason + * we need to set idle transmission mode is to work around a HW issue + * where we enable the pipe while not in idle link-training mode. + * In this case there is requirement to wait for a minimum number of + * idle patterns to be sent. + */ + if (port == PORT_A) + return; + + if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port), + DP_TP_STATUS_IDLE_DONE, + DP_TP_STATUS_IDLE_DONE, + 1)) + DRM_ERROR("Timed out waiting for DP idle patterns\n"); +} + +static void +intel_dp_link_down(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + enum port port = encoder->port; + u32 DP = intel_dp->DP; + + if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) + return; + + DRM_DEBUG_KMS("\n"); + + if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || + (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { + DP &= ~DP_LINK_TRAIN_MASK_CPT; + DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; + } else { + DP &= ~DP_LINK_TRAIN_MASK; + DP |= DP_LINK_TRAIN_PAT_IDLE; + } + I915_WRITE(intel_dp->output_reg, DP); + POSTING_READ(intel_dp->output_reg); + + DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); + I915_WRITE(intel_dp->output_reg, DP); + POSTING_READ(intel_dp->output_reg); + + /* + * HW workaround for IBX, we need to move the port + * to transcoder A after disabling it to allow the + * matching HDMI port to be enabled on transcoder A. + */ + if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) { + /* + * We get CPU/PCH FIFO underruns on the other pipe when + * doing the workaround. Sweep them under the rug. + */ + intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); + + /* always enable with pattern 1 (as per spec) */ + DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); + DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | + DP_LINK_TRAIN_PAT_1; + I915_WRITE(intel_dp->output_reg, DP); + POSTING_READ(intel_dp->output_reg); + + DP &= ~DP_PORT_EN; + I915_WRITE(intel_dp->output_reg, DP); + POSTING_READ(intel_dp->output_reg); + + intel_wait_for_vblank_if_active(dev_priv, PIPE_A); + intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); + intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); + } + + msleep(intel_dp->panel_power_down_delay); + + intel_dp->DP = DP; + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + intel_wakeref_t wakeref; + + with_pps_lock(intel_dp, wakeref) + intel_dp->active_pipe = INVALID_PIPE; + } +} + +static void +intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp) +{ + u8 dpcd_ext[6]; + + /* + * Prior to DP1.3 the bit represented by + * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved. + * if it is set DP_DPCD_REV at 0000h could be at a value less than + * the true capability of the panel. The only way to check is to + * then compare 0000h and 2200h. + */ + if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) + return; + + if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV, + &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) { + DRM_ERROR("DPCD failed read at extended capabilities\n"); + return; + } + + if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { + DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n"); + return; + } + + if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext))) + return; + + DRM_DEBUG_KMS("Base DPCD: %*ph\n", + (int)sizeof(intel_dp->dpcd), intel_dp->dpcd); + + memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)); +} + +bool +intel_dp_read_dpcd(struct intel_dp *intel_dp) +{ + if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, + sizeof(intel_dp->dpcd)) < 0) + return false; /* aux transfer failed */ + + intel_dp_extended_receiver_capabilities(intel_dp); + + DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd); + + return intel_dp->dpcd[DP_DPCD_REV] != 0; +} + +bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) +{ + u8 dprx = 0; + + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, + &dprx) != 1) + return false; + return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; +} + +static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) +{ + /* + * Clear the cached register set to avoid using stale values + * for the sinks that do not support DSC. + */ + memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); + + /* Clear fec_capable to avoid using stale values */ + intel_dp->fec_capable = 0; + + /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || + intel_dp->edp_dpcd[0] >= DP_EDP_14) { + if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, + intel_dp->dsc_dpcd, + sizeof(intel_dp->dsc_dpcd)) < 0) + DRM_ERROR("Failed to read DPCD register 0x%x\n", + DP_DSC_SUPPORT); + + DRM_DEBUG_KMS("DSC DPCD: %*ph\n", + (int)sizeof(intel_dp->dsc_dpcd), + intel_dp->dsc_dpcd); + + /* FEC is supported only on DP 1.4 */ + if (!intel_dp_is_edp(intel_dp) && + drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, + &intel_dp->fec_capable) < 0) + DRM_ERROR("Failed to read FEC DPCD register\n"); + + DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable); + } +} + +static bool +intel_edp_init_dpcd(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = + to_i915(dp_to_dig_port(intel_dp)->base.base.dev); + + /* this function is meant to be called only once */ + WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0); + + if (!intel_dp_read_dpcd(intel_dp)) + return false; + + drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, + drm_dp_is_branch(intel_dp->dpcd)); + + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) + dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] & + DP_NO_AUX_HANDSHAKE_LINK_TRAINING; + + /* + * Read the eDP display control registers. + * + * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in + * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it + * set, but require eDP 1.4+ detection (e.g. for supported link rates + * method). The display control registers should read zero if they're + * not supported anyway. + */ + if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, + intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == + sizeof(intel_dp->edp_dpcd)) + DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd), + intel_dp->edp_dpcd); + + /* + * This has to be called after intel_dp->edp_dpcd is filled, PSR checks + * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] + */ + intel_psr_init_dpcd(intel_dp); + + /* Read the eDP 1.4+ supported link rates. */ + if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { + __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; + int i; + + drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, + sink_rates, sizeof(sink_rates)); + + for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { + int val = le16_to_cpu(sink_rates[i]); + + if (val == 0) + break; + + /* Value read multiplied by 200kHz gives the per-lane + * link rate in kHz. The source rates are, however, + * stored in terms of LS_Clk kHz. The full conversion + * back to symbols is + * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) + */ + intel_dp->sink_rates[i] = (val * 200) / 10; + } + intel_dp->num_sink_rates = i; + } + + /* + * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, + * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. + */ + if (intel_dp->num_sink_rates) + intel_dp->use_rate_select = true; + else + intel_dp_set_sink_rates(intel_dp); + + intel_dp_set_common_rates(intel_dp); + + /* Read the eDP DSC DPCD registers */ + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + intel_dp_get_dsc_sink_cap(intel_dp); + + return true; +} + + +static bool +intel_dp_get_dpcd(struct intel_dp *intel_dp) +{ + if (!intel_dp_read_dpcd(intel_dp)) + return false; + + /* Don't clobber cached eDP rates. */ + if (!intel_dp_is_edp(intel_dp)) { + intel_dp_set_sink_rates(intel_dp); + intel_dp_set_common_rates(intel_dp); + } + + /* + * Some eDP panels do not set a valid value for sink count, that is why + * it don't care about read it here and in intel_edp_init_dpcd(). + */ + if (!intel_dp_is_edp(intel_dp)) { + u8 count; + ssize_t r; + + r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count); + if (r < 1) + return false; + + /* + * Sink count can change between short pulse hpd hence + * a member variable in intel_dp will track any changes + * between short pulse interrupts. + */ + intel_dp->sink_count = DP_GET_SINK_COUNT(count); + + /* + * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that + * a dongle is present but no display. Unless we require to know + * if a dongle is present or not, we don't need to update + * downstream port information. So, an early return here saves + * time from performing other operations which are not required. + */ + if (!intel_dp->sink_count) + return false; + } + + if (!drm_dp_is_branch(intel_dp->dpcd)) + return true; /* native DP sink */ + + if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) + return true; /* no per-port downstream info */ + + if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0, + intel_dp->downstream_ports, + DP_MAX_DOWNSTREAM_PORTS) < 0) + return false; /* downstream port status fetch failed */ + + return true; +} + +static bool +intel_dp_sink_can_mst(struct intel_dp *intel_dp) +{ + u8 mstm_cap; + + if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) + return false; + + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1) + return false; + + return mstm_cap & DP_MST_CAP; +} + +static bool +intel_dp_can_mst(struct intel_dp *intel_dp) +{ + return i915_modparams.enable_dp_mst && + intel_dp->can_mst && + intel_dp_sink_can_mst(intel_dp); +} + +static void +intel_dp_configure_mst(struct intel_dp *intel_dp) +{ + struct intel_encoder *encoder = + &dp_to_dig_port(intel_dp)->base; + bool sink_can_mst = intel_dp_sink_can_mst(intel_dp); + + DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n", + port_name(encoder->port), yesno(intel_dp->can_mst), + yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst)); + + if (!intel_dp->can_mst) + return; + + intel_dp->is_mst = sink_can_mst && + i915_modparams.enable_dp_mst; + + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, + intel_dp->is_mst); +} + +static bool +intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) +{ + return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, + sink_irq_vector, DP_DPRX_ESI_LEN) == + DP_DPRX_ESI_LEN; +} + +u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count, + int mode_clock, int mode_hdisplay) +{ + u16 bits_per_pixel, max_bpp_small_joiner_ram; + int i; + + /* + * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* + * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP) + * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1, + * for MST -> TimeSlotsPerMTP has to be calculated + */ + bits_per_pixel = (link_clock * lane_count * 8 * + DP_DSC_FEC_OVERHEAD_FACTOR) / + mode_clock; + + /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ + max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / + mode_hdisplay; + + /* + * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW + * check, output bpp from small joiner RAM check) + */ + bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); + + /* Error out if the max bpp is less than smallest allowed valid bpp */ + if (bits_per_pixel < valid_dsc_bpp[0]) { + DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel); + return 0; + } + + /* Find the nearest match in the array of known BPPs from VESA */ + for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { + if (bits_per_pixel < valid_dsc_bpp[i + 1]) + break; + } + bits_per_pixel = valid_dsc_bpp[i]; + + /* + * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, + * fractional part is 0 + */ + return bits_per_pixel << 4; +} + +u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, + int mode_clock, + int mode_hdisplay) +{ + u8 min_slice_count, i; + int max_slice_width; + + if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) + min_slice_count = DIV_ROUND_UP(mode_clock, + DP_DSC_MAX_ENC_THROUGHPUT_0); + else + min_slice_count = DIV_ROUND_UP(mode_clock, + DP_DSC_MAX_ENC_THROUGHPUT_1); + + max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); + if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { + DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n", + max_slice_width); + return 0; + } + /* Also take into account max slice width */ + min_slice_count = min_t(u8, min_slice_count, + DIV_ROUND_UP(mode_hdisplay, + max_slice_width)); + + /* Find the closest match to the valid slice count values */ + for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { + if (valid_dsc_slicecount[i] > + drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, + false)) + break; + if (min_slice_count <= valid_dsc_slicecount[i]) + return valid_dsc_slicecount[i]; + } + + DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count); + return 0; +} + +static void +intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct dp_sdp vsc_sdp = {}; + + /* Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 */ + vsc_sdp.sdp_header.HB0 = 0; + vsc_sdp.sdp_header.HB1 = 0x7; + + /* + * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ + * Colorimetry Format indication. + */ + vsc_sdp.sdp_header.HB2 = 0x5; + + /* + * VSC SDP supporting 3D stereo, + PSR2, + Pixel Encoding/ + * Colorimetry Format indication (HB2 = 05h). + */ + vsc_sdp.sdp_header.HB3 = 0x13; + + /* + * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h + * DB16[3:0] DP 1.4a spec, Table 2-120 + */ + vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/ + /* RGB->YCBCR color conversion uses the BT.709 color space. */ + vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */ + + /* + * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only, + * the following Component Bit Depth values are defined: + * 001b = 8bpc. + * 010b = 10bpc. + * 011b = 12bpc. + * 100b = 16bpc. + */ + switch (crtc_state->pipe_bpp) { + case 24: /* 8bpc */ + vsc_sdp.db[17] = 0x1; + break; + case 30: /* 10bpc */ + vsc_sdp.db[17] = 0x2; + break; + case 36: /* 12bpc */ + vsc_sdp.db[17] = 0x3; + break; + case 48: /* 16bpc */ + vsc_sdp.db[17] = 0x4; + break; + default: + MISSING_CASE(crtc_state->pipe_bpp); + break; + } + + /* + * Dynamic Range (Bit 7) + * 0 = VESA range, 1 = CTA range. + * all YCbCr are always limited range + */ + vsc_sdp.db[17] |= 0x80; + + /* + * Content Type (Bits 2:0) + * 000b = Not defined. + * 001b = Graphics. + * 010b = Photo. + * 011b = Video. + * 100b = Game + * All other values are RESERVED. + * Note: See CTA-861-G for the definition and expected + * processing by a stream sink for the above contect types. + */ + vsc_sdp.db[18] = 0; + + intel_dig_port->write_infoframe(&intel_dig_port->base, + crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp)); +} + +void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420) + return; + + intel_pixel_encoding_setup_vsc(intel_dp, crtc_state); +} + +static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) +{ + int status = 0; + int test_link_rate; + u8 test_lane_count, test_link_bw; + /* (DP CTS 1.2) + * 4.3.1.11 + */ + /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ + status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, + &test_lane_count); + + if (status <= 0) { + DRM_DEBUG_KMS("Lane count read failed\n"); + return DP_TEST_NAK; + } + test_lane_count &= DP_MAX_LANE_COUNT_MASK; + + status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, + &test_link_bw); + if (status <= 0) { + DRM_DEBUG_KMS("Link Rate read failed\n"); + return DP_TEST_NAK; + } + test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); + + /* Validate the requested link rate and lane count */ + if (!intel_dp_link_params_valid(intel_dp, test_link_rate, + test_lane_count)) + return DP_TEST_NAK; + + intel_dp->compliance.test_lane_count = test_lane_count; + intel_dp->compliance.test_link_rate = test_link_rate; + + return DP_TEST_ACK; +} + +static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) +{ + u8 test_pattern; + u8 test_misc; + __be16 h_width, v_height; + int status = 0; + + /* Read the TEST_PATTERN (DP CTS 3.1.5) */ + status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, + &test_pattern); + if (status <= 0) { + DRM_DEBUG_KMS("Test pattern read failed\n"); + return DP_TEST_NAK; + } + if (test_pattern != DP_COLOR_RAMP) + return DP_TEST_NAK; + + status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, + &h_width, 2); + if (status <= 0) { + DRM_DEBUG_KMS("H Width read failed\n"); + return DP_TEST_NAK; + } + + status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, + &v_height, 2); + if (status <= 0) { + DRM_DEBUG_KMS("V Height read failed\n"); + return DP_TEST_NAK; + } + + status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, + &test_misc); + if (status <= 0) { + DRM_DEBUG_KMS("TEST MISC read failed\n"); + return DP_TEST_NAK; + } + if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) + return DP_TEST_NAK; + if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) + return DP_TEST_NAK; + switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { + case DP_TEST_BIT_DEPTH_6: + intel_dp->compliance.test_data.bpc = 6; + break; + case DP_TEST_BIT_DEPTH_8: + intel_dp->compliance.test_data.bpc = 8; + break; + default: + return DP_TEST_NAK; + } + + intel_dp->compliance.test_data.video_pattern = test_pattern; + intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); + intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); + /* Set test active flag here so userspace doesn't interrupt things */ + intel_dp->compliance.test_active = 1; + + return DP_TEST_ACK; +} + +static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) +{ + u8 test_result = DP_TEST_ACK; + struct intel_connector *intel_connector = intel_dp->attached_connector; + struct drm_connector *connector = &intel_connector->base; + + if (intel_connector->detect_edid == NULL || + connector->edid_corrupt || + intel_dp->aux.i2c_defer_count > 6) { + /* Check EDID read for NACKs, DEFERs and corruption + * (DP CTS 1.2 Core r1.1) + * 4.2.2.4 : Failed EDID read, I2C_NAK + * 4.2.2.5 : Failed EDID read, I2C_DEFER + * 4.2.2.6 : EDID corruption detected + * Use failsafe mode for all cases + */ + if (intel_dp->aux.i2c_nack_count > 0 || + intel_dp->aux.i2c_defer_count > 0) + DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n", + intel_dp->aux.i2c_nack_count, + intel_dp->aux.i2c_defer_count); + intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; + } else { + struct edid *block = intel_connector->detect_edid; + + /* We have to write the checksum + * of the last block read + */ + block += intel_connector->detect_edid->extensions; + + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, + block->checksum) <= 0) + DRM_DEBUG_KMS("Failed to write EDID checksum\n"); + + test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; + intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; + } + + /* Set test active flag here so userspace doesn't interrupt things */ + intel_dp->compliance.test_active = 1; + + return test_result; +} + +static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) +{ + u8 test_result = DP_TEST_NAK; + return test_result; +} + +static void intel_dp_handle_test_request(struct intel_dp *intel_dp) +{ + u8 response = DP_TEST_NAK; + u8 request = 0; + int status; + + status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); + if (status <= 0) { + DRM_DEBUG_KMS("Could not read test request from sink\n"); + goto update_status; + } + + switch (request) { + case DP_TEST_LINK_TRAINING: + DRM_DEBUG_KMS("LINK_TRAINING test requested\n"); + response = intel_dp_autotest_link_training(intel_dp); + break; + case DP_TEST_LINK_VIDEO_PATTERN: + DRM_DEBUG_KMS("TEST_PATTERN test requested\n"); + response = intel_dp_autotest_video_pattern(intel_dp); + break; + case DP_TEST_LINK_EDID_READ: + DRM_DEBUG_KMS("EDID test requested\n"); + response = intel_dp_autotest_edid(intel_dp); + break; + case DP_TEST_LINK_PHY_TEST_PATTERN: + DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); + response = intel_dp_autotest_phy_pattern(intel_dp); + break; + default: + DRM_DEBUG_KMS("Invalid test request '%02x'\n", request); + break; + } + + if (response & DP_TEST_ACK) + intel_dp->compliance.test_type = request; + +update_status: + status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); + if (status <= 0) + DRM_DEBUG_KMS("Could not write test response to sink\n"); +} + +static int +intel_dp_check_mst_status(struct intel_dp *intel_dp) +{ + bool bret; + + if (intel_dp->is_mst) { + u8 esi[DP_DPRX_ESI_LEN] = { 0 }; + int ret = 0; + int retry; + bool handled; + + WARN_ON_ONCE(intel_dp->active_mst_links < 0); + bret = intel_dp_get_sink_irq_esi(intel_dp, esi); +go_again: + if (bret == true) { + + /* check link status - esi[10] = 0x200c */ + if (intel_dp->active_mst_links > 0 && + !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { + DRM_DEBUG_KMS("channel EQ not ok, retraining\n"); + intel_dp_start_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); + } + + DRM_DEBUG_KMS("got esi %3ph\n", esi); + ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); + + if (handled) { + for (retry = 0; retry < 3; retry++) { + int wret; + wret = drm_dp_dpcd_write(&intel_dp->aux, + DP_SINK_COUNT_ESI+1, + &esi[1], 3); + if (wret == 3) { + break; + } + } + + bret = intel_dp_get_sink_irq_esi(intel_dp, esi); + if (bret == true) { + DRM_DEBUG_KMS("got esi2 %3ph\n", esi); + goto go_again; + } + } else + ret = 0; + + return ret; + } else { + DRM_DEBUG_KMS("failed to get ESI - device may have failed\n"); + intel_dp->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, + intel_dp->is_mst); + } + } + return -EINVAL; +} + +static bool +intel_dp_needs_link_retrain(struct intel_dp *intel_dp) +{ + u8 link_status[DP_LINK_STATUS_SIZE]; + + if (!intel_dp->link_trained) + return false; + + /* + * While PSR source HW is enabled, it will control main-link sending + * frames, enabling and disabling it so trying to do a retrain will fail + * as the link would or not be on or it could mix training patterns + * and frame data at the same time causing retrain to fail. + * Also when exiting PSR, HW will retrain the link anyways fixing + * any link status error. + */ + if (intel_psr_enabled(intel_dp)) + return false; + + if (!intel_dp_get_link_status(intel_dp, link_status)) + return false; + + /* + * Validate the cached values of intel_dp->link_rate and + * intel_dp->lane_count before attempting to retrain. + */ + if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, + intel_dp->lane_count)) + return false; + + /* Retrain if Channel EQ or CR not ok */ + return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); +} + +int intel_dp_retrain_link(struct intel_encoder *encoder, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_connector *connector = intel_dp->attached_connector; + struct drm_connector_state *conn_state; + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + int ret; + + /* FIXME handle the MST connectors as well */ + + if (!connector || connector->base.status != connector_status_connected) + return 0; + + ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, + ctx); + if (ret) + return ret; + + conn_state = connector->base.state; + + crtc = to_intel_crtc(conn_state->crtc); + if (!crtc) + return 0; + + ret = drm_modeset_lock(&crtc->base.mutex, ctx); + if (ret) + return ret; + + crtc_state = to_intel_crtc_state(crtc->base.state); + + WARN_ON(!intel_crtc_has_dp_encoder(crtc_state)); + + if (!crtc_state->base.active) + return 0; + + if (conn_state->commit && + !try_wait_for_completion(&conn_state->commit->hw_done)) + return 0; + + if (!intel_dp_needs_link_retrain(intel_dp)) + return 0; + + /* Suppress underruns caused by re-training */ + intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); + if (crtc_state->has_pch_encoder) + intel_set_pch_fifo_underrun_reporting(dev_priv, + intel_crtc_pch_transcoder(crtc), false); + + intel_dp_start_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); + + /* Keep underrun reporting disabled until things are stable */ + intel_wait_for_vblank(dev_priv, crtc->pipe); + + intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); + if (crtc_state->has_pch_encoder) + intel_set_pch_fifo_underrun_reporting(dev_priv, + intel_crtc_pch_transcoder(crtc), true); + + return 0; +} + +/* + * If display is now connected check links status, + * there has been known issues of link loss triggering + * long pulse. + * + * Some sinks (eg. ASUS PB287Q) seem to perform some + * weird HPD ping pong during modesets. So we can apparently + * end up with HPD going low during a modeset, and then + * going back up soon after. And once that happens we must + * retrain the link to get a picture. That's in case no + * userspace component reacted to intermittent HPD dip. + */ +static bool intel_dp_hotplug(struct intel_encoder *encoder, + struct intel_connector *connector) +{ + struct drm_modeset_acquire_ctx ctx; + bool changed; + int ret; + + changed = intel_encoder_hotplug(encoder, connector); + + drm_modeset_acquire_init(&ctx, 0); + + for (;;) { + ret = intel_dp_retrain_link(encoder, &ctx); + + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + continue; + } + + break; + } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + WARN(ret, "Acquiring modeset locks failed with %i\n", ret); + + return changed; +} + +static void intel_dp_check_service_irq(struct intel_dp *intel_dp) +{ + u8 val; + + if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) + return; + + if (drm_dp_dpcd_readb(&intel_dp->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) + return; + + drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); + + if (val & DP_AUTOMATED_TEST_REQUEST) + intel_dp_handle_test_request(intel_dp); + + if (val & DP_CP_IRQ) + intel_hdcp_handle_cp_irq(intel_dp->attached_connector); + + if (val & DP_SINK_SPECIFIC_IRQ) + DRM_DEBUG_DRIVER("Sink specific irq unhandled\n"); +} + +/* + * According to DP spec + * 5.1.2: + * 1. Read DPCD + * 2. Configure link according to Receiver Capabilities + * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 + * 4. Check link status on receipt of hot-plug interrupt + * + * intel_dp_short_pulse - handles short pulse interrupts + * when full detection is not required. + * Returns %true if short pulse is handled and full detection + * is NOT required and %false otherwise. + */ +static bool +intel_dp_short_pulse(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u8 old_sink_count = intel_dp->sink_count; + bool ret; + + /* + * Clearing compliance test variables to allow capturing + * of values for next automated test request. + */ + memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); + + /* + * Now read the DPCD to see if it's actually running + * If the current value of sink count doesn't match with + * the value that was stored earlier or dpcd read failed + * we need to do full detection + */ + ret = intel_dp_get_dpcd(intel_dp); + + if ((old_sink_count != intel_dp->sink_count) || !ret) { + /* No need to proceed if we are going to do full detect */ + return false; + } + + intel_dp_check_service_irq(intel_dp); + + /* Handle CEC interrupts, if any */ + drm_dp_cec_irq(&intel_dp->aux); + + /* defer to the hotplug work for link retraining if needed */ + if (intel_dp_needs_link_retrain(intel_dp)) + return false; + + intel_psr_short_pulse(intel_dp); + + if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { + DRM_DEBUG_KMS("Link Training Compliance Test requested\n"); + /* Send a Hotplug Uevent to userspace to start modeset */ + drm_kms_helper_hotplug_event(&dev_priv->drm); + } + + return true; +} + +/* XXX this is probably wrong for multiple downstream ports */ +static enum drm_connector_status +intel_dp_detect_dpcd(struct intel_dp *intel_dp) +{ + struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); + u8 *dpcd = intel_dp->dpcd; + u8 type; + + if (WARN_ON(intel_dp_is_edp(intel_dp))) + return connector_status_connected; + + if (lspcon->active) + lspcon_resume(lspcon); + + if (!intel_dp_get_dpcd(intel_dp)) + return connector_status_disconnected; + + /* if there's no downstream port, we're done */ + if (!drm_dp_is_branch(dpcd)) + return connector_status_connected; + + /* If we're HPD-aware, SINK_COUNT changes dynamically */ + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && + intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { + + return intel_dp->sink_count ? + connector_status_connected : connector_status_disconnected; + } + + if (intel_dp_can_mst(intel_dp)) + return connector_status_connected; + + /* If no HPD, poke DDC gently */ + if (drm_probe_ddc(&intel_dp->aux.ddc)) + return connector_status_connected; + + /* Well we tried, say unknown for unreliable port types */ + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { + type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; + if (type == DP_DS_PORT_TYPE_VGA || + type == DP_DS_PORT_TYPE_NON_EDID) + return connector_status_unknown; + } else { + type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DWN_STRM_PORT_TYPE_MASK; + if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || + type == DP_DWN_STRM_PORT_TYPE_OTHER) + return connector_status_unknown; + } + + /* Anything else is out of spec, warn and ignore */ + DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); + return connector_status_disconnected; +} + +static enum drm_connector_status +edp_detect(struct intel_dp *intel_dp) +{ + return connector_status_connected; +} + +static bool ibx_digital_port_connected(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 bit; + + switch (encoder->hpd_pin) { + case HPD_PORT_B: + bit = SDE_PORTB_HOTPLUG; + break; + case HPD_PORT_C: + bit = SDE_PORTC_HOTPLUG; + break; + case HPD_PORT_D: + bit = SDE_PORTD_HOTPLUG; + break; + default: + MISSING_CASE(encoder->hpd_pin); + return false; + } + + return I915_READ(SDEISR) & bit; +} + +static bool cpt_digital_port_connected(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 bit; + + switch (encoder->hpd_pin) { + case HPD_PORT_B: + bit = SDE_PORTB_HOTPLUG_CPT; + break; + case HPD_PORT_C: + bit = SDE_PORTC_HOTPLUG_CPT; + break; + case HPD_PORT_D: + bit = SDE_PORTD_HOTPLUG_CPT; + break; + default: + MISSING_CASE(encoder->hpd_pin); + return false; + } + + return I915_READ(SDEISR) & bit; +} + +static bool spt_digital_port_connected(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 bit; + + switch (encoder->hpd_pin) { + case HPD_PORT_A: + bit = SDE_PORTA_HOTPLUG_SPT; + break; + case HPD_PORT_E: + bit = SDE_PORTE_HOTPLUG_SPT; + break; + default: + return cpt_digital_port_connected(encoder); + } + + return I915_READ(SDEISR) & bit; +} + +static bool g4x_digital_port_connected(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 bit; + + switch (encoder->hpd_pin) { + case HPD_PORT_B: + bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; + break; + case HPD_PORT_C: + bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; + break; + case HPD_PORT_D: + bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; + break; + default: + MISSING_CASE(encoder->hpd_pin); + return false; + } + + return I915_READ(PORT_HOTPLUG_STAT) & bit; +} + +static bool gm45_digital_port_connected(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 bit; + + switch (encoder->hpd_pin) { + case HPD_PORT_B: + bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; + break; + case HPD_PORT_C: + bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; + break; + case HPD_PORT_D: + bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; + break; + default: + MISSING_CASE(encoder->hpd_pin); + return false; + } + + return I915_READ(PORT_HOTPLUG_STAT) & bit; +} + +static bool ilk_digital_port_connected(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (encoder->hpd_pin == HPD_PORT_A) + return I915_READ(DEISR) & DE_DP_A_HOTPLUG; + else + return ibx_digital_port_connected(encoder); +} + +static bool snb_digital_port_connected(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (encoder->hpd_pin == HPD_PORT_A) + return I915_READ(DEISR) & DE_DP_A_HOTPLUG; + else + return cpt_digital_port_connected(encoder); +} + +static bool ivb_digital_port_connected(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (encoder->hpd_pin == HPD_PORT_A) + return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB; + else + return cpt_digital_port_connected(encoder); +} + +static bool bdw_digital_port_connected(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (encoder->hpd_pin == HPD_PORT_A) + return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG; + else + return cpt_digital_port_connected(encoder); +} + +static bool bxt_digital_port_connected(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 bit; + + switch (encoder->hpd_pin) { + case HPD_PORT_A: + bit = BXT_DE_PORT_HP_DDIA; + break; + case HPD_PORT_B: + bit = BXT_DE_PORT_HP_DDIB; + break; + case HPD_PORT_C: + bit = BXT_DE_PORT_HP_DDIC; + break; + default: + MISSING_CASE(encoder->hpd_pin); + return false; + } + + return I915_READ(GEN8_DE_PORT_ISR) & bit; +} + +static bool icl_combo_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *intel_dig_port) +{ + enum port port = intel_dig_port->base.port; + + return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port); +} + +static const char *tc_type_name(enum tc_port_type type) +{ + static const char * const names[] = { + [TC_PORT_UNKNOWN] = "unknown", + [TC_PORT_LEGACY] = "legacy", + [TC_PORT_TYPEC] = "typec", + [TC_PORT_TBT] = "tbt", + }; + + if (WARN_ON(type >= ARRAY_SIZE(names))) + type = TC_PORT_UNKNOWN; + + return names[type]; +} + +static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, + struct intel_digital_port *intel_dig_port, + bool is_legacy, bool is_typec, bool is_tbt) +{ + enum port port = intel_dig_port->base.port; + enum tc_port_type old_type = intel_dig_port->tc_type; + + WARN_ON(is_legacy + is_typec + is_tbt != 1); + + if (is_legacy) + intel_dig_port->tc_type = TC_PORT_LEGACY; + else if (is_typec) + intel_dig_port->tc_type = TC_PORT_TYPEC; + else if (is_tbt) + intel_dig_port->tc_type = TC_PORT_TBT; + else + return; + + /* Types are not supposed to be changed at runtime. */ + WARN_ON(old_type != TC_PORT_UNKNOWN && + old_type != intel_dig_port->tc_type); + + if (old_type != intel_dig_port->tc_type) + DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port), + tc_type_name(intel_dig_port->tc_type)); +} + +/* + * This function implements the first part of the Connect Flow described by our + * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading + * lanes, EDID, etc) is done as needed in the typical places. + * + * Unlike the other ports, type-C ports are not available to use as soon as we + * get a hotplug. The type-C PHYs can be shared between multiple controllers: + * display, USB, etc. As a result, handshaking through FIA is required around + * connect and disconnect to cleanly transfer ownership with the controller and + * set the type-C power state. + * + * We could opt to only do the connect flow when we actually try to use the AUX + * channels or do a modeset, then immediately run the disconnect flow after + * usage, but there are some implications on this for a dynamic environment: + * things may go away or change behind our backs. So for now our driver is + * always trying to acquire ownership of the controller as soon as it gets an + * interrupt (or polls state and sees a port is connected) and only gives it + * back when it sees a disconnect. Implementation of a more fine-grained model + * will require a lot of coordination with user space and thorough testing for + * the extra possible cases. + */ +static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv, + struct intel_digital_port *dig_port) +{ + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + u32 val; + + if (dig_port->tc_type != TC_PORT_LEGACY && + dig_port->tc_type != TC_PORT_TYPEC) + return true; + + val = I915_READ(PORT_TX_DFLEXDPPMS); + if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) { + DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port); + WARN_ON(dig_port->tc_legacy_port); + return false; + } + + /* + * This function may be called many times in a row without an HPD event + * in between, so try to avoid the write when we can. + */ + val = I915_READ(PORT_TX_DFLEXDPCSSS); + if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) { + val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + I915_WRITE(PORT_TX_DFLEXDPCSSS, val); + } + + /* + * Now we have to re-check the live state, in case the port recently + * became disconnected. Not necessary for legacy mode. + */ + if (dig_port->tc_type == TC_PORT_TYPEC && + !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) { + DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port); + icl_tc_phy_disconnect(dev_priv, dig_port); + return false; + } + + return true; +} + +/* + * See the comment at the connect function. This implements the Disconnect + * Flow. + */ +void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, + struct intel_digital_port *dig_port) +{ + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + + if (dig_port->tc_type == TC_PORT_UNKNOWN) + return; + + /* + * TBT disconnection flow is read the live status, what was done in + * caller. + */ + if (dig_port->tc_type == TC_PORT_TYPEC || + dig_port->tc_type == TC_PORT_LEGACY) { + u32 val; + + val = I915_READ(PORT_TX_DFLEXDPCSSS); + val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + I915_WRITE(PORT_TX_DFLEXDPCSSS, val); + } + + DRM_DEBUG_KMS("Port %c TC type %s disconnected\n", + port_name(dig_port->base.port), + tc_type_name(dig_port->tc_type)); + + dig_port->tc_type = TC_PORT_UNKNOWN; +} + +/* + * The type-C ports are different because even when they are connected, they may + * not be available/usable by the graphics driver: see the comment on + * icl_tc_phy_connect(). So in our driver instead of adding the additional + * concept of "usable" and make everything check for "connected and usable" we + * define a port as "connected" when it is not only connected, but also when it + * is usable by the rest of the driver. That maintains the old assumption that + * connected ports are usable, and avoids exposing to the users objects they + * can't really use. + */ +static bool icl_tc_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *intel_dig_port) +{ + enum port port = intel_dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + bool is_legacy, is_typec, is_tbt; + u32 dpsp; + + /* + * Complain if we got a legacy port HPD, but VBT didn't mark the port as + * legacy. Treat the port as legacy from now on. + */ + if (!intel_dig_port->tc_legacy_port && + I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) { + DRM_ERROR("VBT incorrectly claims port %c is not TypeC legacy\n", + port_name(port)); + intel_dig_port->tc_legacy_port = true; + } + is_legacy = intel_dig_port->tc_legacy_port; + + /* + * The spec says we shouldn't be using the ISR bits for detecting + * between TC and TBT. We should use DFLEXDPSP. + */ + dpsp = I915_READ(PORT_TX_DFLEXDPSP); + is_typec = dpsp & TC_LIVE_STATE_TC(tc_port); + is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port); + + if (!is_legacy && !is_typec && !is_tbt) { + icl_tc_phy_disconnect(dev_priv, intel_dig_port); + + return false; + } + + icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec, + is_tbt); + + if (!icl_tc_phy_connect(dev_priv, intel_dig_port)) + return false; + + return true; +} + +static bool icl_digital_port_connected(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + + if (intel_port_is_combophy(dev_priv, encoder->port)) + return icl_combo_port_connected(dev_priv, dig_port); + else if (intel_port_is_tc(dev_priv, encoder->port)) + return icl_tc_port_connected(dev_priv, dig_port); + else + MISSING_CASE(encoder->hpd_pin); + + return false; +} + +/* + * intel_digital_port_connected - is the specified port connected? + * @encoder: intel_encoder + * + * In cases where there's a connector physically connected but it can't be used + * by our hardware we also return false, since the rest of the driver should + * pretty much treat the port as disconnected. This is relevant for type-C + * (starting on ICL) where there's ownership involved. + * + * Return %true if port is connected, %false otherwise. + */ +static bool __intel_digital_port_connected(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (HAS_GMCH(dev_priv)) { + if (IS_GM45(dev_priv)) + return gm45_digital_port_connected(encoder); + else + return g4x_digital_port_connected(encoder); + } + + if (INTEL_GEN(dev_priv) >= 11) + return icl_digital_port_connected(encoder); + else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) + return spt_digital_port_connected(encoder); + else if (IS_GEN9_LP(dev_priv)) + return bxt_digital_port_connected(encoder); + else if (IS_GEN(dev_priv, 8)) + return bdw_digital_port_connected(encoder); + else if (IS_GEN(dev_priv, 7)) + return ivb_digital_port_connected(encoder); + else if (IS_GEN(dev_priv, 6)) + return snb_digital_port_connected(encoder); + else if (IS_GEN(dev_priv, 5)) + return ilk_digital_port_connected(encoder); + + MISSING_CASE(INTEL_GEN(dev_priv)); + return false; +} + +bool intel_digital_port_connected(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + bool is_connected = false; + intel_wakeref_t wakeref; + + with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) + is_connected = __intel_digital_port_connected(encoder); + + return is_connected; +} + +static struct edid * +intel_dp_get_edid(struct intel_dp *intel_dp) +{ + struct intel_connector *intel_connector = intel_dp->attached_connector; + + /* use cached edid if we have one */ + if (intel_connector->edid) { + /* invalid edid */ + if (IS_ERR(intel_connector->edid)) + return NULL; + + return drm_edid_duplicate(intel_connector->edid); + } else + return drm_get_edid(&intel_connector->base, + &intel_dp->aux.ddc); +} + +static void +intel_dp_set_edid(struct intel_dp *intel_dp) +{ + struct intel_connector *intel_connector = intel_dp->attached_connector; + struct edid *edid; + + intel_dp_unset_edid(intel_dp); + edid = intel_dp_get_edid(intel_dp); + intel_connector->detect_edid = edid; + + intel_dp->has_audio = drm_detect_monitor_audio(edid); + drm_dp_cec_set_edid(&intel_dp->aux, edid); +} + +static void +intel_dp_unset_edid(struct intel_dp *intel_dp) +{ + struct intel_connector *intel_connector = intel_dp->attached_connector; + + drm_dp_cec_unset_edid(&intel_dp->aux); + kfree(intel_connector->detect_edid); + intel_connector->detect_edid = NULL; + + intel_dp->has_audio = false; +} + +static int +intel_dp_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &dig_port->base; + enum drm_connector_status status; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); + WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); + + /* Can't disconnect eDP */ + if (intel_dp_is_edp(intel_dp)) + status = edp_detect(intel_dp); + else if (intel_digital_port_connected(encoder)) + status = intel_dp_detect_dpcd(intel_dp); + else + status = connector_status_disconnected; + + if (status == connector_status_disconnected) { + memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); + memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); + + if (intel_dp->is_mst) { + DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", + intel_dp->is_mst, + intel_dp->mst_mgr.mst_state); + intel_dp->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, + intel_dp->is_mst); + } + + goto out; + } + + if (intel_dp->reset_link_params) { + /* Initial max link lane count */ + intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); + + /* Initial max link rate */ + intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); + + intel_dp->reset_link_params = false; + } + + intel_dp_print_rates(intel_dp); + + /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ + if (INTEL_GEN(dev_priv) >= 11) + intel_dp_get_dsc_sink_cap(intel_dp); + + drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, + drm_dp_is_branch(intel_dp->dpcd)); + + intel_dp_configure_mst(intel_dp); + + if (intel_dp->is_mst) { + /* + * If we are in MST mode then this connector + * won't appear connected or have anything + * with EDID on it + */ + status = connector_status_disconnected; + goto out; + } + + /* + * Some external monitors do not signal loss of link synchronization + * with an IRQ_HPD, so force a link status check. + */ + if (!intel_dp_is_edp(intel_dp)) { + int ret; + + ret = intel_dp_retrain_link(encoder, ctx); + if (ret) + return ret; + } + + /* + * Clearing NACK and defer counts to get their exact values + * while reading EDID which are required by Compliance tests + * 4.2.2.4 and 4.2.2.5 + */ + intel_dp->aux.i2c_nack_count = 0; + intel_dp->aux.i2c_defer_count = 0; + + intel_dp_set_edid(intel_dp); + if (intel_dp_is_edp(intel_dp) || + to_intel_connector(connector)->detect_edid) + status = connector_status_connected; + + intel_dp_check_service_irq(intel_dp); + +out: + if (status != connector_status_connected && !intel_dp->is_mst) + intel_dp_unset_edid(intel_dp); + + return status; +} + +static void +intel_dp_force(struct drm_connector *connector) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *intel_encoder = &dig_port->base; + struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); + enum intel_display_power_domain aux_domain = + intel_aux_power_domain(dig_port); + intel_wakeref_t wakeref; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); + intel_dp_unset_edid(intel_dp); + + if (connector->status != connector_status_connected) + return; + + wakeref = intel_display_power_get(dev_priv, aux_domain); + + intel_dp_set_edid(intel_dp); + + intel_display_power_put(dev_priv, aux_domain, wakeref); +} + +static int intel_dp_get_modes(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct edid *edid; + + edid = intel_connector->detect_edid; + if (edid) { + int ret = intel_connector_update_modes(connector, edid); + if (ret) + return ret; + } + + /* if eDP has no EDID, fall back to fixed mode */ + if (intel_dp_is_edp(intel_attached_dp(connector)) && + intel_connector->panel.fixed_mode) { + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, + intel_connector->panel.fixed_mode); + if (mode) { + drm_mode_probed_add(connector, mode); + return 1; + } + } + + return 0; +} + +static int +intel_dp_connector_register(struct drm_connector *connector) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct drm_device *dev = connector->dev; + int ret; + + ret = intel_connector_register(connector); + if (ret) + return ret; + + i915_debugfs_connector_add(connector); + + DRM_DEBUG_KMS("registering %s bus for %s\n", + intel_dp->aux.name, connector->kdev->kobj.name); + + intel_dp->aux.dev = connector->kdev; + ret = drm_dp_aux_register(&intel_dp->aux); + if (!ret) + drm_dp_cec_register_connector(&intel_dp->aux, + connector->name, dev->dev); + return ret; +} + +static void +intel_dp_connector_unregister(struct drm_connector *connector) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + + drm_dp_cec_unregister_connector(&intel_dp->aux); + drm_dp_aux_unregister(&intel_dp->aux); + intel_connector_unregister(connector); +} + +void intel_dp_encoder_flush_work(struct drm_encoder *encoder) +{ + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_dp *intel_dp = &intel_dig_port->dp; + + intel_dp_mst_encoder_cleanup(intel_dig_port); + if (intel_dp_is_edp(intel_dp)) { + intel_wakeref_t wakeref; + + cancel_delayed_work_sync(&intel_dp->panel_vdd_work); + /* + * vdd might still be enabled do to the delayed vdd off. + * Make sure vdd is actually turned off here. + */ + with_pps_lock(intel_dp, wakeref) + edp_panel_vdd_off_sync(intel_dp); + + if (intel_dp->edp_notifier.notifier_call) { + unregister_reboot_notifier(&intel_dp->edp_notifier); + intel_dp->edp_notifier.notifier_call = NULL; + } + } + + intel_dp_aux_fini(intel_dp); +} + +static void intel_dp_encoder_destroy(struct drm_encoder *encoder) +{ + intel_dp_encoder_flush_work(encoder); + + drm_encoder_cleanup(encoder); + kfree(enc_to_dig_port(encoder)); +} + +void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + intel_wakeref_t wakeref; + + if (!intel_dp_is_edp(intel_dp)) + return; + + /* + * vdd might still be enabled do to the delayed vdd off. + * Make sure vdd is actually turned off here. + */ + cancel_delayed_work_sync(&intel_dp->panel_vdd_work); + with_pps_lock(intel_dp, wakeref) + edp_panel_vdd_off_sync(intel_dp); +} + +static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) +{ + long ret; + +#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) + ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C, + msecs_to_jiffies(timeout)); + + if (!ret) + DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); +} + +static +int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, + u8 *an) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base); + static const struct drm_dp_aux_msg msg = { + .request = DP_AUX_NATIVE_WRITE, + .address = DP_AUX_HDCP_AKSV, + .size = DRM_HDCP_KSV_LEN, + }; + u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0; + ssize_t dpcd_ret; + int ret; + + /* Output An first, that's easy */ + dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, + an, DRM_HDCP_AN_LEN); + if (dpcd_ret != DRM_HDCP_AN_LEN) { + DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n", + dpcd_ret); + return dpcd_ret >= 0 ? -EIO : dpcd_ret; + } + + /* + * Since Aksv is Oh-So-Secret, we can't access it in software. So in + * order to get it on the wire, we need to create the AUX header as if + * we were writing the data, and then tickle the hardware to output the + * data once the header is sent out. + */ + intel_dp_aux_header(txbuf, &msg); + + ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size, + rxbuf, sizeof(rxbuf), + DP_AUX_CH_CTL_AUX_AKSV_SELECT); + if (ret < 0) { + DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret); + return ret; + } else if (ret == 0) { + DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n"); + return -EIO; + } + + reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK; + if (reply != DP_AUX_NATIVE_REPLY_ACK) { + DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n", + reply); + return -EIO; + } + return 0; +} + +static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, + u8 *bksv) +{ + ssize_t ret; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, + DRM_HDCP_KSV_LEN); + if (ret != DRM_HDCP_KSV_LEN) { + DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, + u8 *bstatus) +{ + ssize_t ret; + /* + * For some reason the HDMI and DP HDCP specs call this register + * definition by different names. In the HDMI spec, it's called BSTATUS, + * but in DP it's called BINFO. + */ + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, + bstatus, DRM_HDCP_BSTATUS_LEN); + if (ret != DRM_HDCP_BSTATUS_LEN) { + DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static +int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port, + u8 *bcaps) +{ + ssize_t ret; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, + bcaps, 1); + if (ret != 1) { + DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + + return 0; +} + +static +int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, + bool *repeater_present) +{ + ssize_t ret; + u8 bcaps; + + ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); + if (ret) + return ret; + + *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT; + return 0; +} + +static +int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, + u8 *ri_prime) +{ + ssize_t ret; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, + ri_prime, DRM_HDCP_RI_LEN); + if (ret != DRM_HDCP_RI_LEN) { + DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static +int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, + bool *ksv_ready) +{ + ssize_t ret; + u8 bstatus; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, + &bstatus, 1); + if (ret != 1) { + DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + *ksv_ready = bstatus & DP_BSTATUS_READY; + return 0; +} + +static +int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, + int num_downstream, u8 *ksv_fifo) +{ + ssize_t ret; + int i; + + /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ + for (i = 0; i < num_downstream; i += 3) { + size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + DP_AUX_HDCP_KSV_FIFO, + ksv_fifo + i * DRM_HDCP_KSV_LEN, + len); + if (ret != len) { + DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n", + i, ret); + return ret >= 0 ? -EIO : ret; + } + } + return 0; +} + +static +int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, + int i, u32 *part) +{ + ssize_t ret; + + if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) + return -EINVAL; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + DP_AUX_HDCP_V_PRIME(i), part, + DRM_HDCP_V_PRIME_PART_LEN); + if (ret != DRM_HDCP_V_PRIME_PART_LEN) { + DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); + return ret >= 0 ? -EIO : ret; + } + return 0; +} + +static +int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, + bool enable) +{ + /* Not used for single stream DisplayPort setups */ + return 0; +} + +static +bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port) +{ + ssize_t ret; + u8 bstatus; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, + &bstatus, 1); + if (ret != 1) { + DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); + return false; + } + + return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ)); +} + +static +int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port, + bool *hdcp_capable) +{ + ssize_t ret; + u8 bcaps; + + ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); + if (ret) + return ret; + + *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE; + return 0; +} + +struct hdcp2_dp_errata_stream_type { + u8 msg_id; + u8 stream_type; +} __packed; + +static struct hdcp2_dp_msg_data { + u8 msg_id; + u32 offset; + bool msg_detectable; + u32 timeout; + u32 timeout2; /* Added for non_paired situation */ + } hdcp2_msg_data[] = { + {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0}, + {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, + false, HDCP_2_2_CERT_TIMEOUT_MS, 0}, + {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, + false, 0, 0}, + {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, + false, 0, 0}, + {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, + true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, + HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS}, + {HDCP_2_2_AKE_SEND_PAIRING_INFO, + DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, + HDCP_2_2_PAIRING_TIMEOUT_MS, 0}, + {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0}, + {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, + false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0}, + {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, + 0, 0}, + {HDCP_2_2_REP_SEND_RECVID_LIST, + DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, + HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0}, + {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, + 0, 0}, + {HDCP_2_2_REP_STREAM_MANAGE, + DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, + 0, 0}, + {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, + false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0}, +/* local define to shovel this through the write_2_2 interface */ +#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 + {HDCP_2_2_ERRATA_DP_STREAM_TYPE, + DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, + 0, 0}, + }; + +static inline +int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, + u8 *rx_status) +{ + ssize_t ret; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, + HDCP_2_2_DP_RXSTATUS_LEN); + if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { + DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + + return 0; +} + +static +int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, + u8 msg_id, bool *msg_ready) +{ + u8 rx_status; + int ret; + + *msg_ready = false; + ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); + if (ret < 0) + return ret; + + switch (msg_id) { + case HDCP_2_2_AKE_SEND_HPRIME: + if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) + *msg_ready = true; + break; + case HDCP_2_2_AKE_SEND_PAIRING_INFO: + if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) + *msg_ready = true; + break; + case HDCP_2_2_REP_SEND_RECVID_LIST: + if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) + *msg_ready = true; + break; + default: + DRM_ERROR("Unidentified msg_id: %d\n", msg_id); + return -EINVAL; + } + + return 0; +} + +static ssize_t +intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, + struct hdcp2_dp_msg_data *hdcp2_msg_data) +{ + struct intel_dp *dp = &intel_dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; + u8 msg_id = hdcp2_msg_data->msg_id; + int ret, timeout; + bool msg_ready = false; + + if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) + timeout = hdcp2_msg_data->timeout2; + else + timeout = hdcp2_msg_data->timeout; + + /* + * There is no way to detect the CERT, LPRIME and STREAM_READY + * availability. So Wait for timeout and read the msg. + */ + if (!hdcp2_msg_data->msg_detectable) { + mdelay(timeout); + ret = 0; + } else { + /* + * As we want to check the msg availability at timeout, Ignoring + * the timeout at wait for CP_IRQ. + */ + intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); + ret = hdcp2_detect_msg_availability(intel_dig_port, + msg_id, &msg_ready); + if (!msg_ready) + ret = -ETIMEDOUT; + } + + if (ret) + DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n", + hdcp2_msg_data->msg_id, ret, timeout); + + return ret; +} + +static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++) + if (hdcp2_msg_data[i].msg_id == msg_id) + return &hdcp2_msg_data[i]; + + return NULL; +} + +static +int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port, + void *buf, size_t size) +{ + struct intel_dp *dp = &intel_dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; + unsigned int offset; + u8 *byte = buf; + ssize_t ret, bytes_to_write, len; + struct hdcp2_dp_msg_data *hdcp2_msg_data; + + hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); + if (!hdcp2_msg_data) + return -EINVAL; + + offset = hdcp2_msg_data->offset; + + /* No msg_id in DP HDCP2.2 msgs */ + bytes_to_write = size - 1; + byte++; + + hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); + + while (bytes_to_write) { + len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? + DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; + + ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, + offset, (void *)byte, len); + if (ret < 0) + return ret; + + bytes_to_write -= ret; + byte += ret; + offset += ret; + } + + return size; +} + +static +ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port) +{ + u8 rx_info[HDCP_2_2_RXINFO_LEN]; + u32 dev_cnt; + ssize_t ret; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + DP_HDCP_2_2_REG_RXINFO_OFFSET, + (void *)rx_info, HDCP_2_2_RXINFO_LEN); + if (ret != HDCP_2_2_RXINFO_LEN) + return ret >= 0 ? -EIO : ret; + + dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | + HDCP_2_2_DEV_COUNT_LO(rx_info[1])); + + if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) + dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; + + ret = sizeof(struct hdcp2_rep_send_receiverid_list) - + HDCP_2_2_RECEIVER_IDS_MAX_LEN + + (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); + + return ret; +} + +static +int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, + u8 msg_id, void *buf, size_t size) +{ + unsigned int offset; + u8 *byte = buf; + ssize_t ret, bytes_to_recv, len; + struct hdcp2_dp_msg_data *hdcp2_msg_data; + + hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); + if (!hdcp2_msg_data) + return -EINVAL; + offset = hdcp2_msg_data->offset; + + ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data); + if (ret < 0) + return ret; + + if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { + ret = get_receiver_id_list_size(intel_dig_port); + if (ret < 0) + return ret; + + size = ret; + } + bytes_to_recv = size - 1; + + /* DP adaptation msgs has no msg_id */ + byte++; + + while (bytes_to_recv) { + len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? + DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset, + (void *)byte, len); + if (ret < 0) { + DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret); + return ret; + } + + bytes_to_recv -= ret; + byte += ret; + offset += ret; + } + byte = buf; + *byte = msg_id; + + return size; +} + +static +int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, + bool is_repeater, u8 content_type) +{ + struct hdcp2_dp_errata_stream_type stream_type_msg; + + if (is_repeater) + return 0; + + /* + * Errata for DP: As Stream type is used for encryption, Receiver + * should be communicated with stream type for the decryption of the + * content. + * Repeater will be communicated with stream type as a part of it's + * auth later in time. + */ + stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; + stream_type_msg.stream_type = content_type; + + return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg, + sizeof(stream_type_msg)); +} + +static +int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port) +{ + u8 rx_status; + int ret; + + ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); + if (ret) + return ret; + + if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) + ret = HDCP_REAUTH_REQUEST; + else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) + ret = HDCP_LINK_INTEGRITY_FAILURE; + else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) + ret = HDCP_TOPOLOGY_CHANGE; + + return ret; +} + +static +int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port, + bool *capable) +{ + u8 rx_caps[3]; + int ret; + + *capable = false; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + DP_HDCP_2_2_REG_RX_CAPS_OFFSET, + rx_caps, HDCP_2_2_RXCAPS_LEN); + if (ret != HDCP_2_2_RXCAPS_LEN) + return ret >= 0 ? -EIO : ret; + + if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && + HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) + *capable = true; + + return 0; +} + +static const struct intel_hdcp_shim intel_dp_hdcp_shim = { + .write_an_aksv = intel_dp_hdcp_write_an_aksv, + .read_bksv = intel_dp_hdcp_read_bksv, + .read_bstatus = intel_dp_hdcp_read_bstatus, + .repeater_present = intel_dp_hdcp_repeater_present, + .read_ri_prime = intel_dp_hdcp_read_ri_prime, + .read_ksv_ready = intel_dp_hdcp_read_ksv_ready, + .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo, + .read_v_prime_part = intel_dp_hdcp_read_v_prime_part, + .toggle_signalling = intel_dp_hdcp_toggle_signalling, + .check_link = intel_dp_hdcp_check_link, + .hdcp_capable = intel_dp_hdcp_capable, + .write_2_2_msg = intel_dp_hdcp2_write_msg, + .read_2_2_msg = intel_dp_hdcp2_read_msg, + .config_stream_type = intel_dp_hdcp2_config_stream_type, + .check_2_2_link = intel_dp_hdcp2_check_link, + .hdcp_2_2_capable = intel_dp_hdcp2_capable, + .protocol = HDCP_PROTOCOL_DP, +}; + +static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + + lockdep_assert_held(&dev_priv->pps_mutex); + + if (!edp_have_panel_vdd(intel_dp)) + return; + + /* + * The VDD bit needs a power domain reference, so if the bit is + * already enabled when we boot or resume, grab this reference and + * schedule a vdd off, so we don't hold on to the reference + * indefinitely. + */ + DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); + intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); + + edp_panel_vdd_schedule_off(intel_dp); +} + +static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + enum pipe pipe; + + if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg, + encoder->port, &pipe)) + return pipe; + + return INVALID_PIPE; +} + +void intel_dp_encoder_reset(struct drm_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); + intel_wakeref_t wakeref; + + if (!HAS_DDI(dev_priv)) + intel_dp->DP = I915_READ(intel_dp->output_reg); + + if (lspcon->active) + lspcon_resume(lspcon); + + intel_dp->reset_link_params = true; + + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && + !intel_dp_is_edp(intel_dp)) + return; + + with_pps_lock(intel_dp, wakeref) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + intel_dp->active_pipe = vlv_active_pipe(intel_dp); + + if (intel_dp_is_edp(intel_dp)) { + /* + * Reinit the power sequencer, in case BIOS did + * something nasty with it. + */ + intel_dp_pps_init(intel_dp); + intel_edp_panel_vdd_sanitize(intel_dp); + } + } +} + +static const struct drm_connector_funcs intel_dp_connector_funcs = { + .force = intel_dp_force, + .fill_modes = drm_helper_probe_single_connector_modes, + .atomic_get_property = intel_digital_connector_atomic_get_property, + .atomic_set_property = intel_digital_connector_atomic_set_property, + .late_register = intel_dp_connector_register, + .early_unregister = intel_dp_connector_unregister, + .destroy = intel_connector_destroy, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = intel_digital_connector_duplicate_state, +}; + +static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { + .detect_ctx = intel_dp_detect, + .get_modes = intel_dp_get_modes, + .mode_valid = intel_dp_mode_valid, + .atomic_check = intel_digital_connector_atomic_check, +}; + +static const struct drm_encoder_funcs intel_dp_enc_funcs = { + .reset = intel_dp_encoder_reset, + .destroy = intel_dp_encoder_destroy, +}; + +enum irqreturn +intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) +{ + struct intel_dp *intel_dp = &intel_dig_port->dp; + + if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { + /* + * vdd off can generate a long pulse on eDP which + * would require vdd on to handle it, and thus we + * would end up in an endless cycle of + * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..." + */ + DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n", + port_name(intel_dig_port->base.port)); + return IRQ_HANDLED; + } + + DRM_DEBUG_KMS("got hpd irq on port %c - %s\n", + port_name(intel_dig_port->base.port), + long_hpd ? "long" : "short"); + + if (long_hpd) { + intel_dp->reset_link_params = true; + return IRQ_NONE; + } + + if (intel_dp->is_mst) { + if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { + /* + * If we were in MST mode, and device is not + * there, get out of MST mode + */ + DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", + intel_dp->is_mst, intel_dp->mst_mgr.mst_state); + intel_dp->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, + intel_dp->is_mst); + + return IRQ_NONE; + } + } + + if (!intel_dp->is_mst) { + bool handled; + + handled = intel_dp_short_pulse(intel_dp); + + if (!handled) + return IRQ_NONE; + } + + return IRQ_HANDLED; +} + +/* check the VBT to see whether the eDP is on another port */ +bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) +{ + /* + * eDP not supported on g4x. so bail out early just + * for a bit extra safety in case the VBT is bonkers. + */ + if (INTEL_GEN(dev_priv) < 5) + return false; + + if (INTEL_GEN(dev_priv) < 9 && port == PORT_A) + return true; + + return intel_bios_is_port_edp(dev_priv, port); +} + +static void +intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->dev); + enum port port = dp_to_dig_port(intel_dp)->base.port; + + if (!IS_G4X(dev_priv) && port != PORT_A) + intel_attach_force_audio_property(connector); + + intel_attach_broadcast_rgb_property(connector); + if (HAS_GMCH(dev_priv)) + drm_connector_attach_max_bpc_property(connector, 6, 10); + else if (INTEL_GEN(dev_priv) >= 5) + drm_connector_attach_max_bpc_property(connector, 6, 12); + + if (intel_dp_is_edp(intel_dp)) { + u32 allowed_scalers; + + allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); + if (!HAS_GMCH(dev_priv)) + allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); + + drm_connector_attach_scaling_mode_property(connector, allowed_scalers); + + connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; + + } +} + +static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) +{ + intel_dp->panel_power_off_time = ktime_get_boottime(); + intel_dp->last_power_on = jiffies; + intel_dp->last_backlight_off = jiffies; +} + +static void +intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u32 pp_on, pp_off, pp_ctl; + struct pps_registers regs; + + intel_pps_get_registers(intel_dp, ®s); + + pp_ctl = ironlake_get_pp_control(intel_dp); + + /* Ensure PPS is unlocked */ + if (!HAS_DDI(dev_priv)) + I915_WRITE(regs.pp_ctrl, pp_ctl); + + pp_on = I915_READ(regs.pp_on); + pp_off = I915_READ(regs.pp_off); + + /* Pull timing values out of registers */ + seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); + seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); + seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); + seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); + + if (i915_mmio_reg_valid(regs.pp_div)) { + u32 pp_div; + + pp_div = I915_READ(regs.pp_div); + + seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; + } else { + seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; + } +} + +static void +intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) +{ + DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", + state_name, + seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); +} + +static void +intel_pps_verify_state(struct intel_dp *intel_dp) +{ + struct edp_power_seq hw; + struct edp_power_seq *sw = &intel_dp->pps_delays; + + intel_pps_readout_hw_state(intel_dp, &hw); + + if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || + hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { + DRM_ERROR("PPS state mismatch\n"); + intel_pps_dump_state("sw", sw); + intel_pps_dump_state("hw", &hw); + } +} + +static void +intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct edp_power_seq cur, vbt, spec, + *final = &intel_dp->pps_delays; + + lockdep_assert_held(&dev_priv->pps_mutex); + + /* already initialized? */ + if (final->t11_t12 != 0) + return; + + intel_pps_readout_hw_state(intel_dp, &cur); + + intel_pps_dump_state("cur", &cur); + + vbt = dev_priv->vbt.edp.pps; + /* On Toshiba Satellite P50-C-18C system the VBT T12 delay + * of 500ms appears to be too short. Ocassionally the panel + * just fails to power back on. Increasing the delay to 800ms + * seems sufficient to avoid this problem. + */ + if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { + vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); + DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", + vbt.t11_t12); + } + /* T11_T12 delay is special and actually in units of 100ms, but zero + * based in the hw (so we need to add 100 ms). But the sw vbt + * table multiplies it with 1000 to make it in units of 100usec, + * too. */ + vbt.t11_t12 += 100 * 10; + + /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of + * our hw here, which are all in 100usec. */ + spec.t1_t3 = 210 * 10; + spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ + spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ + spec.t10 = 500 * 10; + /* This one is special and actually in units of 100ms, but zero + * based in the hw (so we need to add 100 ms). But the sw vbt + * table multiplies it with 1000 to make it in units of 100usec, + * too. */ + spec.t11_t12 = (510 + 100) * 10; + + intel_pps_dump_state("vbt", &vbt); + + /* Use the max of the register settings and vbt. If both are + * unset, fall back to the spec limits. */ +#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \ + spec.field : \ + max(cur.field, vbt.field)) + assign_final(t1_t3); + assign_final(t8); + assign_final(t9); + assign_final(t10); + assign_final(t11_t12); +#undef assign_final + +#define get_delay(field) (DIV_ROUND_UP(final->field, 10)) + intel_dp->panel_power_up_delay = get_delay(t1_t3); + intel_dp->backlight_on_delay = get_delay(t8); + intel_dp->backlight_off_delay = get_delay(t9); + intel_dp->panel_power_down_delay = get_delay(t10); + intel_dp->panel_power_cycle_delay = get_delay(t11_t12); +#undef get_delay + + DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", + intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, + intel_dp->panel_power_cycle_delay); + + DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", + intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); + + /* + * We override the HW backlight delays to 1 because we do manual waits + * on them. For T8, even BSpec recommends doing it. For T9, if we + * don't do this, we'll end up waiting for the backlight off delay + * twice: once when we do the manual sleep, and once when we disable + * the panel and wait for the PP_STATUS bit to become zero. + */ + final->t8 = 1; + final->t9 = 1; + + /* + * HW has only a 100msec granularity for t11_t12 so round it up + * accordingly. + */ + final->t11_t12 = roundup(final->t11_t12, 100 * 10); +} + +static void +intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, + bool force_disable_vdd) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u32 pp_on, pp_off, port_sel = 0; + int div = dev_priv->rawclk_freq / 1000; + struct pps_registers regs; + enum port port = dp_to_dig_port(intel_dp)->base.port; + const struct edp_power_seq *seq = &intel_dp->pps_delays; + + lockdep_assert_held(&dev_priv->pps_mutex); + + intel_pps_get_registers(intel_dp, ®s); + + /* + * On some VLV machines the BIOS can leave the VDD + * enabled even on power sequencers which aren't + * hooked up to any port. This would mess up the + * power domain tracking the first time we pick + * one of these power sequencers for use since + * edp_panel_vdd_on() would notice that the VDD was + * already on and therefore wouldn't grab the power + * domain reference. Disable VDD first to avoid this. + * This also avoids spuriously turning the VDD on as + * soon as the new power sequencer gets initialized. + */ + if (force_disable_vdd) { + u32 pp = ironlake_get_pp_control(intel_dp); + + WARN(pp & PANEL_POWER_ON, "Panel power already on\n"); + + if (pp & EDP_FORCE_VDD) + DRM_DEBUG_KMS("VDD already on, disabling first\n"); + + pp &= ~EDP_FORCE_VDD; + + I915_WRITE(regs.pp_ctrl, pp); + } + + pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | + REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); + pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | + REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); + + /* Haswell doesn't have any port selection bits for the panel + * power sequencer any more. */ + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + port_sel = PANEL_PORT_SELECT_VLV(port); + } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { + switch (port) { + case PORT_A: + port_sel = PANEL_PORT_SELECT_DPA; + break; + case PORT_C: + port_sel = PANEL_PORT_SELECT_DPC; + break; + case PORT_D: + port_sel = PANEL_PORT_SELECT_DPD; + break; + default: + MISSING_CASE(port); + break; + } + } + + pp_on |= port_sel; + + I915_WRITE(regs.pp_on, pp_on); + I915_WRITE(regs.pp_off, pp_off); + + /* + * Compute the divisor for the pp clock, simply match the Bspec formula. + */ + if (i915_mmio_reg_valid(regs.pp_div)) { + I915_WRITE(regs.pp_div, + REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | + REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); + } else { + u32 pp_ctl; + + pp_ctl = I915_READ(regs.pp_ctrl); + pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; + pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); + I915_WRITE(regs.pp_ctrl, pp_ctl); + } + + DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", + I915_READ(regs.pp_on), + I915_READ(regs.pp_off), + i915_mmio_reg_valid(regs.pp_div) ? + I915_READ(regs.pp_div) : + (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); +} + +static void intel_dp_pps_init(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + vlv_initial_power_sequencer_setup(intel_dp); + } else { + intel_dp_init_panel_power_sequencer(intel_dp); + intel_dp_init_panel_power_sequencer_registers(intel_dp, false); + } +} + +/** + * intel_dp_set_drrs_state - program registers for RR switch to take effect + * @dev_priv: i915 device + * @crtc_state: a pointer to the active intel_crtc_state + * @refresh_rate: RR to be programmed + * + * This function gets called when refresh rate (RR) has to be changed from + * one frequency to another. Switches can be between high and low RR + * supported by the panel or to any other RR based on media playback (in + * this case, RR value needs to be passed from user space). + * + * The caller of this function needs to take a lock on dev_priv->drrs. + */ +static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *crtc_state, + int refresh_rate) +{ + struct intel_encoder *encoder; + struct intel_digital_port *dig_port = NULL; + struct intel_dp *intel_dp = dev_priv->drrs.dp; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + enum drrs_refresh_rate_type index = DRRS_HIGH_RR; + + if (refresh_rate <= 0) { + DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n"); + return; + } + + if (intel_dp == NULL) { + DRM_DEBUG_KMS("DRRS not supported.\n"); + return; + } + + dig_port = dp_to_dig_port(intel_dp); + encoder = &dig_port->base; + + if (!intel_crtc) { + DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n"); + return; + } + + if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { + DRM_DEBUG_KMS("Only Seamless DRRS supported.\n"); + return; + } + + if (intel_dp->attached_connector->panel.downclock_mode->vrefresh == + refresh_rate) + index = DRRS_LOW_RR; + + if (index == dev_priv->drrs.refresh_rate_type) { + DRM_DEBUG_KMS( + "DRRS requested for previously set RR...ignoring\n"); + return; + } + + if (!crtc_state->base.active) { + DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n"); + return; + } + + if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { + switch (index) { + case DRRS_HIGH_RR: + intel_dp_set_m_n(crtc_state, M1_N1); + break; + case DRRS_LOW_RR: + intel_dp_set_m_n(crtc_state, M2_N2); + break; + case DRRS_MAX_RR: + default: + DRM_ERROR("Unsupported refreshrate type\n"); + } + } else if (INTEL_GEN(dev_priv) > 6) { + i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); + u32 val; + + val = I915_READ(reg); + if (index > DRRS_HIGH_RR) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; + else + val |= PIPECONF_EDP_RR_MODE_SWITCH; + } else { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; + else + val &= ~PIPECONF_EDP_RR_MODE_SWITCH; + } + I915_WRITE(reg, val); + } + + dev_priv->drrs.refresh_rate_type = index; + + DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate); +} + +/** + * intel_edp_drrs_enable - init drrs struct if supported + * @intel_dp: DP struct + * @crtc_state: A pointer to the active crtc state. + * + * Initializes frontbuffer_bits and drrs.dp + */ +void intel_edp_drrs_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (!crtc_state->has_drrs) { + DRM_DEBUG_KMS("Panel doesn't support DRRS\n"); + return; + } + + if (dev_priv->psr.enabled) { + DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n"); + return; + } + + mutex_lock(&dev_priv->drrs.mutex); + if (dev_priv->drrs.dp) { + DRM_DEBUG_KMS("DRRS already enabled\n"); + goto unlock; + } + + dev_priv->drrs.busy_frontbuffer_bits = 0; + + dev_priv->drrs.dp = intel_dp; + +unlock: + mutex_unlock(&dev_priv->drrs.mutex); +} + +/** + * intel_edp_drrs_disable - Disable DRRS + * @intel_dp: DP struct + * @old_crtc_state: Pointer to old crtc_state. + * + */ +void intel_edp_drrs_disable(struct intel_dp *intel_dp, + const struct intel_crtc_state *old_crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + if (!old_crtc_state->has_drrs) + return; + + mutex_lock(&dev_priv->drrs.mutex); + if (!dev_priv->drrs.dp) { + mutex_unlock(&dev_priv->drrs.mutex); + return; + } + + if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) + intel_dp_set_drrs_state(dev_priv, old_crtc_state, + intel_dp->attached_connector->panel.fixed_mode->vrefresh); + + dev_priv->drrs.dp = NULL; + mutex_unlock(&dev_priv->drrs.mutex); + + cancel_delayed_work_sync(&dev_priv->drrs.work); +} + +static void intel_edp_drrs_downclock_work(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), drrs.work.work); + struct intel_dp *intel_dp; + + mutex_lock(&dev_priv->drrs.mutex); + + intel_dp = dev_priv->drrs.dp; + + if (!intel_dp) + goto unlock; + + /* + * The delayed work can race with an invalidate hence we need to + * recheck. + */ + + if (dev_priv->drrs.busy_frontbuffer_bits) + goto unlock; + + if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) { + struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; + + intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, + intel_dp->attached_connector->panel.downclock_mode->vrefresh); + } + +unlock: + mutex_unlock(&dev_priv->drrs.mutex); +} + +/** + * intel_edp_drrs_invalidate - Disable Idleness DRRS + * @dev_priv: i915 device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called everytime rendering on the given planes start. + * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR). + * + * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. + */ +void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits) +{ + struct drm_crtc *crtc; + enum pipe pipe; + + if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) + return; + + cancel_delayed_work(&dev_priv->drrs.work); + + mutex_lock(&dev_priv->drrs.mutex); + if (!dev_priv->drrs.dp) { + mutex_unlock(&dev_priv->drrs.mutex); + return; + } + + crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; + pipe = to_intel_crtc(crtc)->pipe; + + frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); + dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits; + + /* invalidate means busy screen hence upclock */ + if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) + intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, + dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); + + mutex_unlock(&dev_priv->drrs.mutex); +} + +/** + * intel_edp_drrs_flush - Restart Idleness DRRS + * @dev_priv: i915 device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * This function gets called every time rendering on the given planes has + * completed or flip on a crtc is completed. So DRRS should be upclocked + * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again, + * if no other planes are dirty. + * + * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. + */ +void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits) +{ + struct drm_crtc *crtc; + enum pipe pipe; + + if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) + return; + + cancel_delayed_work(&dev_priv->drrs.work); + + mutex_lock(&dev_priv->drrs.mutex); + if (!dev_priv->drrs.dp) { + mutex_unlock(&dev_priv->drrs.mutex); + return; + } + + crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; + pipe = to_intel_crtc(crtc)->pipe; + + frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); + dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; + + /* flush means busy screen hence upclock */ + if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) + intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config, + dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh); + + /* + * flush also means no more activity hence schedule downclock, if all + * other fbs are quiescent too + */ + if (!dev_priv->drrs.busy_frontbuffer_bits) + schedule_delayed_work(&dev_priv->drrs.work, + msecs_to_jiffies(1000)); + mutex_unlock(&dev_priv->drrs.mutex); +} + +/** + * DOC: Display Refresh Rate Switching (DRRS) + * + * Display Refresh Rate Switching (DRRS) is a power conservation feature + * which enables swtching between low and high refresh rates, + * dynamically, based on the usage scenario. This feature is applicable + * for internal panels. + * + * Indication that the panel supports DRRS is given by the panel EDID, which + * would list multiple refresh rates for one resolution. + * + * DRRS is of 2 types - static and seamless. + * Static DRRS involves changing refresh rate (RR) by doing a full modeset + * (may appear as a blink on screen) and is used in dock-undock scenario. + * Seamless DRRS involves changing RR without any visual effect to the user + * and can be used during normal system usage. This is done by programming + * certain registers. + * + * Support for static/seamless DRRS may be indicated in the VBT based on + * inputs from the panel spec. + * + * DRRS saves power by switching to low RR based on usage scenarios. + * + * The implementation is based on frontbuffer tracking implementation. When + * there is a disturbance on the screen triggered by user activity or a periodic + * system activity, DRRS is disabled (RR is changed to high RR). When there is + * no movement on screen, after a timeout of 1 second, a switch to low RR is + * made. + * + * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() + * and intel_edp_drrs_flush() are called. + * + * DRRS can be further extended to support other internal panels and also + * the scenario of video playback wherein RR is set based on the rate + * requested by userspace. + */ + +/** + * intel_dp_drrs_init - Init basic DRRS work and mutex. + * @connector: eDP connector + * @fixed_mode: preferred mode of panel + * + * This function is called only once at driver load to initialize basic + * DRRS stuff. + * + * Returns: + * Downclock mode if panel supports it, else return NULL. + * DRRS support is determined by the presence of downclock mode (apart + * from VBT setting). + */ +static struct drm_display_mode * +intel_dp_drrs_init(struct intel_connector *connector, + struct drm_display_mode *fixed_mode) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_display_mode *downclock_mode = NULL; + + INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); + mutex_init(&dev_priv->drrs.mutex); + + if (INTEL_GEN(dev_priv) <= 6) { + DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n"); + return NULL; + } + + if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { + DRM_DEBUG_KMS("VBT doesn't support DRRS\n"); + return NULL; + } + + downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); + if (!downclock_mode) { + DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n"); + return NULL; + } + + dev_priv->drrs.type = dev_priv->vbt.drrs_type; + + dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; + DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n"); + return downclock_mode; +} + +static bool intel_edp_init_connector(struct intel_dp *intel_dp, + struct intel_connector *intel_connector) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct drm_device *dev = &dev_priv->drm; + struct drm_connector *connector = &intel_connector->base; + struct drm_display_mode *fixed_mode = NULL; + struct drm_display_mode *downclock_mode = NULL; + bool has_dpcd; + enum pipe pipe = INVALID_PIPE; + intel_wakeref_t wakeref; + struct edid *edid; + + if (!intel_dp_is_edp(intel_dp)) + return true; + + INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); + + /* + * On IBX/CPT we may get here with LVDS already registered. Since the + * driver uses the only internal power sequencer available for both + * eDP and LVDS bail out early in this case to prevent interfering + * with an already powered-on LVDS power sequencer. + */ + if (intel_get_lvds_encoder(dev_priv)) { + WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); + DRM_INFO("LVDS was detected, not registering eDP\n"); + + return false; + } + + with_pps_lock(intel_dp, wakeref) { + intel_dp_init_panel_power_timestamps(intel_dp); + intel_dp_pps_init(intel_dp); + intel_edp_panel_vdd_sanitize(intel_dp); + } + + /* Cache DPCD and EDID for edp. */ + has_dpcd = intel_edp_init_dpcd(intel_dp); + + if (!has_dpcd) { + /* if this fails, presume the device is a ghost */ + DRM_INFO("failed to retrieve link info, disabling eDP\n"); + goto out_vdd_off; + } + + mutex_lock(&dev->mode_config.mutex); + edid = drm_get_edid(connector, &intel_dp->aux.ddc); + if (edid) { + if (drm_add_edid_modes(connector, edid)) { + drm_connector_update_edid_property(connector, + edid); + } else { + kfree(edid); + edid = ERR_PTR(-EINVAL); + } + } else { + edid = ERR_PTR(-ENOENT); + } + intel_connector->edid = edid; + + fixed_mode = intel_panel_edid_fixed_mode(intel_connector); + if (fixed_mode) + downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); + + /* fallback to VBT if available for eDP */ + if (!fixed_mode) + fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); + mutex_unlock(&dev->mode_config.mutex); + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + intel_dp->edp_notifier.notifier_call = edp_notify_handler; + register_reboot_notifier(&intel_dp->edp_notifier); + + /* + * Figure out the current pipe for the initial backlight setup. + * If the current pipe isn't valid, try the PPS pipe, and if that + * fails just assume pipe A. + */ + pipe = vlv_active_pipe(intel_dp); + + if (pipe != PIPE_A && pipe != PIPE_B) + pipe = intel_dp->pps_pipe; + + if (pipe != PIPE_A && pipe != PIPE_B) + pipe = PIPE_A; + + DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n", + pipe_name(pipe)); + } + + intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); + intel_connector->panel.backlight.power = intel_edp_backlight_power; + intel_panel_setup_backlight(connector, pipe); + + if (fixed_mode) + drm_connector_init_panel_orientation_property( + connector, fixed_mode->hdisplay, fixed_mode->vdisplay); + + return true; + +out_vdd_off: + cancel_delayed_work_sync(&intel_dp->panel_vdd_work); + /* + * vdd might still be enabled do to the delayed vdd off. + * Make sure vdd is actually turned off here. + */ + with_pps_lock(intel_dp, wakeref) + edp_panel_vdd_off_sync(intel_dp); + + return false; +} + +static void intel_dp_modeset_retry_work_fn(struct work_struct *work) +{ + struct intel_connector *intel_connector; + struct drm_connector *connector; + + intel_connector = container_of(work, typeof(*intel_connector), + modeset_retry_work); + connector = &intel_connector->base; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, + connector->name); + + /* Grab the locks before changing connector property*/ + mutex_lock(&connector->dev->mode_config.mutex); + /* Set connector link status to BAD and send a Uevent to notify + * userspace to do a modeset. + */ + drm_connector_set_link_status_property(connector, + DRM_MODE_LINK_STATUS_BAD); + mutex_unlock(&connector->dev->mode_config.mutex); + /* Send Hotplug uevent so userspace can reprobe */ + drm_kms_helper_hotplug_event(connector->dev); +} + +bool +intel_dp_init_connector(struct intel_digital_port *intel_dig_port, + struct intel_connector *intel_connector) +{ + struct drm_connector *connector = &intel_connector->base; + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_encoder *intel_encoder = &intel_dig_port->base; + struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum port port = intel_encoder->port; + int type; + + /* Initialize the work for modeset in case of link train failure */ + INIT_WORK(&intel_connector->modeset_retry_work, + intel_dp_modeset_retry_work_fn); + + if (WARN(intel_dig_port->max_lanes < 1, + "Not enough lanes (%d) for DP on port %c\n", + intel_dig_port->max_lanes, port_name(port))) + return false; + + intel_dp_set_source_rates(intel_dp); + + intel_dp->reset_link_params = true; + intel_dp->pps_pipe = INVALID_PIPE; + intel_dp->active_pipe = INVALID_PIPE; + + /* Preserve the current hw state. */ + intel_dp->DP = I915_READ(intel_dp->output_reg); + intel_dp->attached_connector = intel_connector; + + if (intel_dp_is_port_edp(dev_priv, port)) { + /* + * Currently we don't support eDP on TypeC ports, although in + * theory it could work on TypeC legacy ports. + */ + WARN_ON(intel_port_is_tc(dev_priv, port)); + type = DRM_MODE_CONNECTOR_eDP; + } else { + type = DRM_MODE_CONNECTOR_DisplayPort; + } + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + intel_dp->active_pipe = vlv_active_pipe(intel_dp); + + /* + * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but + * for DP the encoder type can be set by the caller to + * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. + */ + if (type == DRM_MODE_CONNECTOR_eDP) + intel_encoder->type = INTEL_OUTPUT_EDP; + + /* eDP only on port B and/or C on vlv/chv */ + if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + intel_dp_is_edp(intel_dp) && + port != PORT_B && port != PORT_C)) + return false; + + DRM_DEBUG_KMS("Adding %s connector on port %c\n", + type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", + port_name(port)); + + drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); + drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); + + if (!HAS_GMCH(dev_priv)) + connector->interlace_allowed = true; + connector->doublescan_allowed = 0; + + if (INTEL_GEN(dev_priv) >= 11) + connector->ycbcr_420_allowed = true; + + intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); + + intel_dp_aux_init(intel_dp); + + intel_connector_attach_encoder(intel_connector, intel_encoder); + + if (HAS_DDI(dev_priv)) + intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; + else + intel_connector->get_hw_state = intel_connector_get_hw_state; + + /* init MST on ports that can support it */ + if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) && + (port == PORT_B || port == PORT_C || + port == PORT_D || port == PORT_F)) + intel_dp_mst_encoder_init(intel_dig_port, + intel_connector->base.base.id); + + if (!intel_edp_init_connector(intel_dp, intel_connector)) { + intel_dp_aux_fini(intel_dp); + intel_dp_mst_encoder_cleanup(intel_dig_port); + goto fail; + } + + intel_dp_add_properties(intel_dp, connector); + + if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { + int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim); + if (ret) + DRM_DEBUG_KMS("HDCP init failed, skipping.\n"); + } + + /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written + * 0xd. Failure to do so will result in spurious interrupts being + * generated on the port when a cable is not attached. + */ + if (IS_G45(dev_priv)) { + u32 temp = I915_READ(PEG_BAND_GAP_DATA); + I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); + } + + return true; + +fail: + drm_connector_cleanup(connector); + + return false; +} + +bool intel_dp_init(struct drm_i915_private *dev_priv, + i915_reg_t output_reg, + enum port port) +{ + struct intel_digital_port *intel_dig_port; + struct intel_encoder *intel_encoder; + struct drm_encoder *encoder; + struct intel_connector *intel_connector; + + intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); + if (!intel_dig_port) + return false; + + intel_connector = intel_connector_alloc(); + if (!intel_connector) + goto err_connector_alloc; + + intel_encoder = &intel_dig_port->base; + encoder = &intel_encoder->base; + + if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, + &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, + "DP %c", port_name(port))) + goto err_encoder_init; + + intel_encoder->hotplug = intel_dp_hotplug; + intel_encoder->compute_config = intel_dp_compute_config; + intel_encoder->get_hw_state = intel_dp_get_hw_state; + intel_encoder->get_config = intel_dp_get_config; + intel_encoder->update_pipe = intel_panel_update_backlight; + intel_encoder->suspend = intel_dp_encoder_suspend; + if (IS_CHERRYVIEW(dev_priv)) { + intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; + intel_encoder->pre_enable = chv_pre_enable_dp; + intel_encoder->enable = vlv_enable_dp; + intel_encoder->disable = vlv_disable_dp; + intel_encoder->post_disable = chv_post_disable_dp; + intel_encoder->post_pll_disable = chv_dp_post_pll_disable; + } else if (IS_VALLEYVIEW(dev_priv)) { + intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; + intel_encoder->pre_enable = vlv_pre_enable_dp; + intel_encoder->enable = vlv_enable_dp; + intel_encoder->disable = vlv_disable_dp; + intel_encoder->post_disable = vlv_post_disable_dp; + } else { + intel_encoder->pre_enable = g4x_pre_enable_dp; + intel_encoder->enable = g4x_enable_dp; + intel_encoder->disable = g4x_disable_dp; + intel_encoder->post_disable = g4x_post_disable_dp; + } + + intel_dig_port->dp.output_reg = output_reg; + intel_dig_port->max_lanes = 4; + + intel_encoder->type = INTEL_OUTPUT_DP; + intel_encoder->power_domain = intel_port_to_power_domain(port); + if (IS_CHERRYVIEW(dev_priv)) { + if (port == PORT_D) + intel_encoder->crtc_mask = 1 << 2; + else + intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + } else { + intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + } + intel_encoder->cloneable = 0; + intel_encoder->port = port; + + intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; + + if (port != PORT_A) + intel_infoframe_init(intel_dig_port); + + intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); + if (!intel_dp_init_connector(intel_dig_port, intel_connector)) + goto err_init_connector; + + return true; + +err_init_connector: + drm_encoder_cleanup(encoder); +err_encoder_init: + kfree(intel_connector); +err_connector_alloc: + kfree(intel_dig_port); + return false; +} + +void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) +{ + struct intel_encoder *encoder; + + for_each_intel_encoder(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp; + + if (encoder->type != INTEL_OUTPUT_DDI) + continue; + + intel_dp = enc_to_intel_dp(&encoder->base); + + if (!intel_dp->can_mst) + continue; + + if (intel_dp->is_mst) + drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); + } +} + +void intel_dp_mst_resume(struct drm_i915_private *dev_priv) +{ + struct intel_encoder *encoder; + + for_each_intel_encoder(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp; + int ret; + + if (encoder->type != INTEL_OUTPUT_DDI) + continue; + + intel_dp = enc_to_intel_dp(&encoder->base); + + if (!intel_dp->can_mst) + continue; + + ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr); + if (ret) { + intel_dp->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, + false); + } + } +} diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h new file mode 100644 index 000000000000..da70b1a41c83 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_DP_H__ +#define __INTEL_DP_H__ + +#include + +#include + +#include "i915_reg.h" + +enum pipe; +struct drm_connector_state; +struct drm_encoder; +struct drm_i915_private; +struct drm_modeset_acquire_ctx; +struct intel_connector; +struct intel_crtc_state; +struct intel_digital_port; +struct intel_dp; +struct intel_encoder; + +struct link_config_limits { + int min_clock, max_clock; + int min_lane_count, max_lane_count; + int min_bpp, max_bpp; +}; + +void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + struct link_config_limits *limits); +bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); +int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state); +bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t dp_reg, enum port port, + enum pipe *pipe); +bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, + enum port port); +bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, + struct intel_connector *intel_connector); +void intel_dp_set_link_params(struct intel_dp *intel_dp, + int link_rate, u8 lane_count, + bool link_mst); +int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, + int link_rate, u8 lane_count); +int intel_dp_retrain_link(struct intel_encoder *encoder, + struct drm_modeset_acquire_ctx *ctx); +void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); +void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + bool enable); +void intel_dp_encoder_reset(struct drm_encoder *encoder); +void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); +void intel_dp_encoder_flush_work(struct drm_encoder *encoder); +int intel_dp_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state); +bool intel_dp_is_edp(struct intel_dp *intel_dp); +bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port); +enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, + bool long_hpd); +void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); +void intel_edp_backlight_off(const struct drm_connector_state *conn_state); +void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); +void intel_edp_panel_on(struct intel_dp *intel_dp); +void intel_edp_panel_off(struct intel_dp *intel_dp); +void intel_dp_mst_suspend(struct drm_i915_private *dev_priv); +void intel_dp_mst_resume(struct drm_i915_private *dev_priv); +int intel_dp_max_link_rate(struct intel_dp *intel_dp); +int intel_dp_max_lane_count(struct intel_dp *intel_dp); +int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); +void intel_power_sequencer_reset(struct drm_i915_private *dev_priv); +u32 intel_dp_pack_aux(const u8 *src, int src_bytes); + +void intel_edp_drrs_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +void intel_edp_drrs_disable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits); +void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits); + +void +intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, + u8 dp_train_pat); +void +intel_dp_set_signal_levels(struct intel_dp *intel_dp); +void intel_dp_set_idle_link_train(struct intel_dp *intel_dp); +u8 +intel_dp_voltage_max(struct intel_dp *intel_dp); +u8 +intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing); +void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, + u8 *link_bw, u8 *rate_select); +bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); +bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); +bool +intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status); +u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count, + int mode_clock, int mode_hdisplay); +u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock, + int mode_hdisplay); + +bool intel_dp_read_dpcd(struct intel_dp *intel_dp); +bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); +int intel_dp_link_required(int pixel_clock, int bpp); +int intel_dp_max_data_rate(int max_link_clock, int max_lanes); +bool intel_digital_port_connected(struct intel_encoder *encoder); +void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, + struct intel_digital_port *dig_port); + +static inline unsigned int intel_dp_unused_lane_mask(int lane_count) +{ + return ~((1 << lane_count) - 1) & 0xf; +} + +#endif /* __INTEL_DP_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c new file mode 100644 index 000000000000..7ded95a334db --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -0,0 +1,281 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "intel_dp_aux_backlight.h" +#include "intel_drv.h" + +static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) +{ + u8 reg_val = 0; + + /* Early return when display use other mechanism to enable backlight. */ + if (!(intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP)) + return; + + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + ®_val) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_DISPLAY_CONTROL_REGISTER); + return; + } + if (enable) + reg_val |= DP_EDP_BACKLIGHT_ENABLE; + else + reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE); + + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + reg_val) != 1) { + DRM_DEBUG_KMS("Failed to %s aux backlight\n", + enable ? "enable" : "disable"); + } +} + +/* + * Read the current backlight value from DPCD register(s) based + * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported + */ +static u32 intel_dp_aux_get_backlight(struct intel_connector *connector) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + u8 read_val[2] = { 0x0 }; + u16 level = 0; + + if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + &read_val, sizeof(read_val)) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_BRIGHTNESS_MSB); + return 0; + } + level = read_val[0]; + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + level = (read_val[0] << 8 | read_val[1]); + + return level; +} + +/* + * Sends the current backlight level over the aux channel, checking if its using + * 8-bit or 16 bit value (MSB and LSB) + */ +static void +intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + u8 vals[2] = { 0x0 }; + + vals[0] = level; + + /* Write the MSB and/or LSB */ + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) { + vals[0] = (level & 0xFF00) >> 8; + vals[1] = (level & 0xFF); + } + if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + vals, sizeof(vals)) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight level\n"); + return; + } +} + +/* + * Set PWM Frequency divider to match desired frequency in vbt. + * The PWM Frequency is calculated as 27Mhz / (F x P). + * - Where F = PWM Frequency Pre-Divider value programmed by field 7:0 of the + * EDP_BACKLIGHT_FREQ_SET register (DPCD Address 00728h) + * - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the + * EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h) + */ +static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1; + u8 pn, pn_min, pn_max; + + /* Find desired value of (F x P) + * Note that, if F x P is out of supported range, the maximum value or + * minimum value will applied automatically. So no need to check that. + */ + freq = dev_priv->vbt.backlight.pwm_freq_hz; + DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq); + if (!freq) { + DRM_DEBUG_KMS("Use panel default backlight frequency\n"); + return false; + } + + fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq); + + /* Use highest possible value of Pn for more granularity of brightness + * adjustment while satifying the conditions below. + * - Pn is in the range of Pn_min and Pn_max + * - F is in the range of 1 and 255 + * - FxP is within 25% of desired value. + * Note: 25% is arbitrary value and may need some tweak. + */ + if (drm_dp_dpcd_readb(&intel_dp->aux, + DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) { + DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n"); + return false; + } + if (drm_dp_dpcd_readb(&intel_dp->aux, + DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) { + DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n"); + return false; + } + pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + + fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4); + fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4); + if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) { + DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n"); + return false; + } + + for (pn = pn_max; pn >= pn_min; pn--) { + f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255); + fxp_actual = f << pn; + if (fxp_min <= fxp_actual && fxp_actual <= fxp_max) + break; + } + + if (drm_dp_dpcd_writeb(&intel_dp->aux, + DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) { + DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n"); + return false; + } + if (drm_dp_dpcd_writeb(&intel_dp->aux, + DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight freq\n"); + return false; + } + return true; +} + +static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode; + + if (drm_dp_dpcd_readb(&intel_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_MODE_SET_REGISTER); + return; + } + + new_dpcd_buf = dpcd_buf; + edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + + switch (edp_backlight_mode) { + case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT: + new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; + break; + + /* Do nothing when it is already DPCD mode */ + case DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD: + default: + break; + } + + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP) + if (intel_dp_aux_set_pwm_freq(connector)) + new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE; + + if (new_dpcd_buf != dpcd_buf) { + if (drm_dp_dpcd_writeb(&intel_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight mode\n"); + } + } + + set_aux_backlight_enable(intel_dp, true); + intel_dp_aux_set_backlight(conn_state, connector->panel.backlight.level); +} + +static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state) +{ + set_aux_backlight_enable(enc_to_intel_dp(old_conn_state->best_encoder), false); +} + +static int intel_dp_aux_setup_backlight(struct intel_connector *connector, + enum pipe pipe) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_panel *panel = &connector->panel; + + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + panel->backlight.max = 0xFFFF; + else + panel->backlight.max = 0xFF; + + panel->backlight.min = 0; + panel->backlight.level = intel_dp_aux_get_backlight(connector); + + panel->backlight.enabled = panel->backlight.level != 0; + + return 0; +} + +static bool +intel_dp_aux_display_control_capable(struct intel_connector *connector) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + + /* Check the eDP Display control capabilities registers to determine if + * the panel can support backlight control over the aux channel + */ + if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && + (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) && + !(intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) { + DRM_DEBUG_KMS("AUX Backlight Control Supported!\n"); + return true; + } + return false; +} + +int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) +{ + struct intel_panel *panel = &intel_connector->panel; + + if (!i915_modparams.enable_dpcd_backlight) + return -ENODEV; + + if (!intel_dp_aux_display_control_capable(intel_connector)) + return -ENODEV; + + panel->backlight.setup = intel_dp_aux_setup_backlight; + panel->backlight.enable = intel_dp_aux_enable_backlight; + panel->backlight.disable = intel_dp_aux_disable_backlight; + panel->backlight.set = intel_dp_aux_set_backlight; + panel->backlight.get = intel_dp_aux_get_backlight; + + return 0; +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.h b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.h new file mode 100644 index 000000000000..ed60c2858967 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_DP_AUX_BACKLIGHT_H__ +#define __INTEL_DP_AUX_BACKLIGHT_H__ + +struct intel_connector; + +int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); + +#endif /* __INTEL_DP_AUX_BACKLIGHT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c new file mode 100644 index 000000000000..9b1fccea966b --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -0,0 +1,382 @@ +/* + * Copyright © 2008-2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "intel_dp.h" +#include "intel_dp_link_training.h" +#include "intel_drv.h" + +static void +intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE]) +{ + + DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x", + link_status[0], link_status[1], link_status[2], + link_status[3], link_status[4], link_status[5]); +} + +static void +intel_get_adjust_train(struct intel_dp *intel_dp, + const u8 link_status[DP_LINK_STATUS_SIZE]) +{ + u8 v = 0; + u8 p = 0; + int lane; + u8 voltage_max; + u8 preemph_max; + + for (lane = 0; lane < intel_dp->lane_count; lane++) { + u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane); + u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + + if (this_v > v) + v = this_v; + if (this_p > p) + p = this_p; + } + + voltage_max = intel_dp_voltage_max(intel_dp); + if (v >= voltage_max) + v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; + + preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); + if (p >= preemph_max) + p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + for (lane = 0; lane < 4; lane++) + intel_dp->train_set[lane] = v | p; +} + +static bool +intel_dp_set_link_train(struct intel_dp *intel_dp, + u8 dp_train_pat) +{ + u8 buf[sizeof(intel_dp->train_set) + 1]; + int ret, len; + + intel_dp_program_link_training_pattern(intel_dp, dp_train_pat); + + buf[0] = dp_train_pat; + if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) == + DP_TRAINING_PATTERN_DISABLE) { + /* don't write DP_TRAINING_LANEx_SET on disable */ + len = 1; + } else { + /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ + memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count); + len = intel_dp->lane_count + 1; + } + + ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET, + buf, len); + + return ret == len; +} + +static bool +intel_dp_reset_link_train(struct intel_dp *intel_dp, + u8 dp_train_pat) +{ + memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); + intel_dp_set_signal_levels(intel_dp); + return intel_dp_set_link_train(intel_dp, dp_train_pat); +} + +static bool +intel_dp_update_link_train(struct intel_dp *intel_dp) +{ + int ret; + + intel_dp_set_signal_levels(intel_dp); + + ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, + intel_dp->train_set, intel_dp->lane_count); + + return ret == intel_dp->lane_count; +} + +static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp) +{ + int lane; + + for (lane = 0; lane < intel_dp->lane_count; lane++) + if ((intel_dp->train_set[lane] & + DP_TRAIN_MAX_SWING_REACHED) == 0) + return false; + + return true; +} + +/* Enable corresponding port and start training pattern 1 */ +static bool +intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) +{ + u8 voltage; + int voltage_tries, cr_tries, max_cr_tries; + bool max_vswing_reached = false; + u8 link_config[2]; + u8 link_bw, rate_select; + + if (intel_dp->prepare_link_retrain) + intel_dp->prepare_link_retrain(intel_dp); + + intel_dp_compute_rate(intel_dp, intel_dp->link_rate, + &link_bw, &rate_select); + + if (link_bw) + DRM_DEBUG_KMS("Using LINK_BW_SET value %02x\n", link_bw); + else + DRM_DEBUG_KMS("Using LINK_RATE_SET value %02x\n", rate_select); + + /* Write the link configuration data */ + link_config[0] = link_bw; + link_config[1] = intel_dp->lane_count; + if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) + link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); + + /* eDP 1.4 rate select method. */ + if (!link_bw) + drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, + &rate_select, 1); + + link_config[0] = 0; + link_config[1] = DP_SET_ANSI_8B10B; + drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); + + intel_dp->DP |= DP_PORT_EN; + + /* clock recovery */ + if (!intel_dp_reset_link_train(intel_dp, + DP_TRAINING_PATTERN_1 | + DP_LINK_SCRAMBLING_DISABLE)) { + DRM_ERROR("failed to enable link training\n"); + return false; + } + + /* + * The DP 1.4 spec defines the max clock recovery retries value + * as 10 but for pre-DP 1.4 devices we set a very tolerant + * retry limit of 80 (4 voltage levels x 4 preemphasis levels x + * x 5 identical voltage retries). Since the previous specs didn't + * define a limit and created the possibility of an infinite loop + * we want to prevent any sync from triggering that corner case. + */ + if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) + max_cr_tries = 10; + else + max_cr_tries = 80; + + voltage_tries = 1; + for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { + u8 link_status[DP_LINK_STATUS_SIZE]; + + drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); + + if (!intel_dp_get_link_status(intel_dp, link_status)) { + DRM_ERROR("failed to get link status\n"); + return false; + } + + if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { + DRM_DEBUG_KMS("clock recovery OK\n"); + return true; + } + + if (voltage_tries == 5) { + DRM_DEBUG_KMS("Same voltage tried 5 times\n"); + return false; + } + + if (max_vswing_reached) { + DRM_DEBUG_KMS("Max Voltage Swing reached\n"); + return false; + } + + voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* Update training set as requested by target */ + intel_get_adjust_train(intel_dp, link_status); + if (!intel_dp_update_link_train(intel_dp)) { + DRM_ERROR("failed to update link training\n"); + return false; + } + + if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == + voltage) + ++voltage_tries; + else + voltage_tries = 1; + + if (intel_dp_link_max_vswing_reached(intel_dp)) + max_vswing_reached = true; + + } + DRM_ERROR("Failed clock recovery %d times, giving up!\n", max_cr_tries); + return false; +} + +/* + * Pick training pattern for channel equalization. Training pattern 4 for HBR3 + * or for 1.4 devices that support it, training Pattern 3 for HBR2 + * or 1.2 devices that support it, Training Pattern 2 otherwise. + */ +static u32 intel_dp_training_pattern(struct intel_dp *intel_dp) +{ + bool source_tps3, sink_tps3, source_tps4, sink_tps4; + + /* + * Intel platforms that support HBR3 also support TPS4. It is mandatory + * for all downstream devices that support HBR3. There are no known eDP + * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 + * specification. + */ + source_tps4 = intel_dp_source_supports_hbr3(intel_dp); + sink_tps4 = drm_dp_tps4_supported(intel_dp->dpcd); + if (source_tps4 && sink_tps4) { + return DP_TRAINING_PATTERN_4; + } else if (intel_dp->link_rate == 810000) { + if (!source_tps4) + DRM_DEBUG_KMS("8.1 Gbps link rate without source HBR3/TPS4 support\n"); + if (!sink_tps4) + DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n"); + } + /* + * Intel platforms that support HBR2 also support TPS3. TPS3 support is + * also mandatory for downstream devices that support HBR2. However, not + * all sinks follow the spec. + */ + source_tps3 = intel_dp_source_supports_hbr2(intel_dp); + sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd); + if (source_tps3 && sink_tps3) { + return DP_TRAINING_PATTERN_3; + } else if (intel_dp->link_rate >= 540000) { + if (!source_tps3) + DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n"); + if (!sink_tps3) + DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); + } + + return DP_TRAINING_PATTERN_2; +} + +static bool +intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) +{ + int tries; + u32 training_pattern; + u8 link_status[DP_LINK_STATUS_SIZE]; + bool channel_eq = false; + + training_pattern = intel_dp_training_pattern(intel_dp); + /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */ + if (training_pattern != DP_TRAINING_PATTERN_4) + training_pattern |= DP_LINK_SCRAMBLING_DISABLE; + + /* channel equalization */ + if (!intel_dp_set_link_train(intel_dp, + training_pattern)) { + DRM_ERROR("failed to start channel equalization\n"); + return false; + } + + for (tries = 0; tries < 5; tries++) { + + drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); + if (!intel_dp_get_link_status(intel_dp, link_status)) { + DRM_ERROR("failed to get link status\n"); + break; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, + intel_dp->lane_count)) { + intel_dp_dump_link_status(link_status); + DRM_DEBUG_KMS("Clock recovery check failed, cannot " + "continue channel equalization\n"); + break; + } + + if (drm_dp_channel_eq_ok(link_status, + intel_dp->lane_count)) { + channel_eq = true; + DRM_DEBUG_KMS("Channel EQ done. DP Training " + "successful\n"); + break; + } + + /* Update training set as requested by target */ + intel_get_adjust_train(intel_dp, link_status); + if (!intel_dp_update_link_train(intel_dp)) { + DRM_ERROR("failed to update link training\n"); + break; + } + } + + /* Try 5 times, else fail and try at lower BW */ + if (tries == 5) { + intel_dp_dump_link_status(link_status); + DRM_DEBUG_KMS("Channel equalization failed 5 times\n"); + } + + intel_dp_set_idle_link_train(intel_dp); + + return channel_eq; + +} + +void intel_dp_stop_link_train(struct intel_dp *intel_dp) +{ + intel_dp->link_trained = true; + + intel_dp_set_link_train(intel_dp, + DP_TRAINING_PATTERN_DISABLE); +} + +void +intel_dp_start_link_train(struct intel_dp *intel_dp) +{ + struct intel_connector *intel_connector = intel_dp->attached_connector; + + if (!intel_dp_link_training_clock_recovery(intel_dp)) + goto failure_handling; + if (!intel_dp_link_training_channel_equalization(intel_dp)) + goto failure_handling; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Passed at Link Rate = %d, Lane count = %d", + intel_connector->base.base.id, + intel_connector->base.name, + intel_dp->link_rate, intel_dp->lane_count); + return; + + failure_handling: + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", + intel_connector->base.base.id, + intel_connector->base.name, + intel_dp->link_rate, intel_dp->lane_count); + if (!intel_dp_get_link_train_fallback_values(intel_dp, + intel_dp->link_rate, + intel_dp->lane_count)) + /* Schedule a Hotplug Uevent to userspace to start modeset */ + schedule_work(&intel_connector->modeset_retry_work); + return; +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h new file mode 100644 index 000000000000..174566adcc92 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_DP_LINK_TRAINING_H__ +#define __INTEL_DP_LINK_TRAINING_H__ + +struct intel_dp; + +void intel_dp_start_link_train(struct intel_dp *intel_dp); +void intel_dp_stop_link_train(struct intel_dp *intel_dp); + +#endif /* __INTEL_DP_LINK_TRAINING_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c new file mode 100644 index 000000000000..0caf645fbbb8 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -0,0 +1,664 @@ +/* + * Copyright © 2008 Intel Corporation + * 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include +#include +#include + +#include "i915_drv.h" +#include "intel_atomic.h" +#include "intel_audio.h" +#include "intel_connector.h" +#include "intel_ddi.h" +#include "intel_dp.h" +#include "intel_dp_mst.h" +#include "intel_dpio_phy.h" +#include "intel_drv.h" + +static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + struct link_config_limits *limits) +{ + struct drm_atomic_state *state = crtc_state->base.state; + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp *intel_dp = &intel_mst->primary->dp; + struct intel_connector *connector = + to_intel_connector(conn_state->connector); + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + void *port = connector->port; + bool constant_n = drm_dp_has_quirk(&intel_dp->desc, + DP_DPCD_QUIRK_CONSTANT_N); + int bpp, slots = -EINVAL; + + crtc_state->lane_count = limits->max_lane_count; + crtc_state->port_clock = limits->max_clock; + + for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { + crtc_state->pipe_bpp = bpp; + + crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, + crtc_state->pipe_bpp); + + slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, + port, crtc_state->pbn); + if (slots == -EDEADLK) + return slots; + if (slots >= 0) + break; + } + + if (slots < 0) { + DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots); + return slots; + } + + intel_link_compute_m_n(crtc_state->pipe_bpp, + crtc_state->lane_count, + adjusted_mode->crtc_clock, + crtc_state->port_clock, + &crtc_state->dp_m_n, + constant_n); + crtc_state->dp_m_n.tu = slots; + + return 0; +} + +static int intel_dp_mst_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config, + struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp *intel_dp = &intel_mst->primary->dp; + struct intel_connector *connector = + to_intel_connector(conn_state->connector); + struct intel_digital_connector_state *intel_conn_state = + to_intel_digital_connector_state(conn_state); + const struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + void *port = connector->port; + struct link_config_limits limits; + int ret; + + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) + return -EINVAL; + + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + pipe_config->has_pch_encoder = false; + + if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) + pipe_config->has_audio = + drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port); + else + pipe_config->has_audio = + intel_conn_state->force_audio == HDMI_AUDIO_ON; + + /* + * for MST we always configure max link bw - the spec doesn't + * seem to suggest we should do otherwise. + */ + limits.min_clock = + limits.max_clock = intel_dp_max_link_rate(intel_dp); + + limits.min_lane_count = + limits.max_lane_count = intel_dp_max_lane_count(intel_dp); + + limits.min_bpp = intel_dp_min_bpp(pipe_config); + limits.max_bpp = pipe_config->pipe_bpp; + + intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); + + ret = intel_dp_mst_compute_link_config(encoder, pipe_config, + conn_state, &limits); + if (ret) + return ret; + + pipe_config->limited_color_range = + intel_dp_limited_color_range(pipe_config, conn_state); + + if (IS_GEN9_LP(dev_priv)) + pipe_config->lane_lat_optim_mask = + bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count); + + intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); + + return 0; +} + +static int +intel_dp_mst_atomic_check(struct drm_connector *connector, + struct drm_connector_state *new_conn_state) +{ + struct drm_atomic_state *state = new_conn_state->state; + struct drm_connector_state *old_conn_state = + drm_atomic_get_old_connector_state(state, connector); + struct intel_connector *intel_connector = + to_intel_connector(connector); + struct drm_crtc *new_crtc = new_conn_state->crtc; + struct drm_crtc_state *crtc_state; + struct drm_dp_mst_topology_mgr *mgr; + int ret; + + ret = intel_digital_connector_atomic_check(connector, new_conn_state); + if (ret) + return ret; + + if (!old_conn_state->crtc) + return 0; + + /* We only want to free VCPI if this state disables the CRTC on this + * connector + */ + if (new_crtc) { + crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); + + if (!crtc_state || + !drm_atomic_crtc_needs_modeset(crtc_state) || + crtc_state->enable) + return 0; + } + + mgr = &enc_to_mst(old_conn_state->best_encoder)->primary->dp.mst_mgr; + ret = drm_dp_atomic_release_vcpi_slots(state, mgr, + intel_connector->port); + + return ret; +} + +static void intel_mst_disable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_connector *connector = + to_intel_connector(old_conn_state->connector); + int ret; + + DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); + + drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port); + + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); + if (ret) { + DRM_ERROR("failed to update payload %d\n", ret); + } + if (old_crtc_state->has_audio) + intel_audio_codec_disable(encoder, + old_crtc_state, old_conn_state); +} + +static void intel_mst_post_disable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_connector *connector = + to_intel_connector(old_conn_state->connector); + + intel_ddi_disable_pipe_clock(old_crtc_state); + + /* this can fail */ + drm_dp_check_act_status(&intel_dp->mst_mgr); + /* and this can also fail */ + drm_dp_update_payload_part2(&intel_dp->mst_mgr); + + drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port); + + /* + * Power down mst path before disabling the port, otherwise we end + * up getting interrupts from the sink upon detecting link loss. + */ + drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, + false); + + intel_dp->active_mst_links--; + + intel_mst->connector = NULL; + if (intel_dp->active_mst_links == 0) { + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); + intel_dig_port->base.post_disable(&intel_dig_port->base, + old_crtc_state, NULL); + } + + DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); +} + +static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + + if (intel_dp->active_mst_links == 0) + intel_dig_port->base.pre_pll_enable(&intel_dig_port->base, + pipe_config, NULL); +} + +static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + + if (intel_dp->active_mst_links == 0) + intel_dig_port->base.post_pll_disable(&intel_dig_port->base, + old_crtc_state, + old_conn_state); +} + +static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = intel_dig_port->base.port; + struct intel_connector *connector = + to_intel_connector(conn_state->connector); + int ret; + u32 temp; + + /* MST encoders are bound to a crtc, not to a connector, + * force the mapping here for get_hw_state. + */ + connector->encoder = encoder; + intel_mst->connector = connector; + + DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); + + if (intel_dp->active_mst_links == 0) + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + + drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true); + + if (intel_dp->active_mst_links == 0) + intel_dig_port->base.pre_enable(&intel_dig_port->base, + pipe_config, NULL); + + ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr, + connector->port, + pipe_config->pbn, + pipe_config->dp_m_n.tu); + if (!ret) + DRM_ERROR("failed to allocate vcpi\n"); + + intel_dp->active_mst_links++; + temp = I915_READ(DP_TP_STATUS(port)); + I915_WRITE(DP_TP_STATUS(port), temp); + + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); + + intel_ddi_enable_pipe_clock(pipe_config); +} + +static void intel_mst_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = intel_dig_port->base.port; + + DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); + + if (intel_wait_for_register(&dev_priv->uncore, + DP_TP_STATUS(port), + DP_TP_STATUS_ACT_SENT, + DP_TP_STATUS_ACT_SENT, + 1)) + DRM_ERROR("Timed out waiting for ACT sent\n"); + + drm_dp_check_act_status(&intel_dp->mst_mgr); + + drm_dp_update_payload_part2(&intel_dp->mst_mgr); + if (pipe_config->has_audio) + intel_audio_codec_enable(encoder, pipe_config, conn_state); +} + +static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + *pipe = intel_mst->pipe; + if (intel_mst->connector) + return true; + return false; +} + +static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + + intel_ddi_get_config(&intel_dig_port->base, pipe_config); +} + +static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + struct edid *edid; + int ret; + + if (drm_connector_is_unregistered(connector)) + return intel_connector_update_modes(connector, NULL); + + edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port); + ret = intel_connector_update_modes(connector, edid); + kfree(edid); + + return ret; +} + +static enum drm_connector_status +intel_dp_mst_detect(struct drm_connector *connector, bool force) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + + if (drm_connector_is_unregistered(connector)) + return connector_status_disconnected; + return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, + intel_connector->port); +} + +static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { + .detect = intel_dp_mst_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .atomic_get_property = intel_digital_connector_atomic_get_property, + .atomic_set_property = intel_digital_connector_atomic_set_property, + .late_register = intel_connector_register, + .early_unregister = intel_connector_unregister, + .destroy = intel_connector_destroy, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = intel_digital_connector_duplicate_state, +}; + +static int intel_dp_mst_get_modes(struct drm_connector *connector) +{ + return intel_dp_mst_get_ddc_modes(connector); +} + +static enum drm_mode_status +intel_dp_mst_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; + int max_rate, mode_rate, max_lanes, max_link_clock; + + if (drm_connector_is_unregistered(connector)) + return MODE_ERROR; + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + + max_link_clock = intel_dp_max_link_rate(intel_dp); + max_lanes = intel_dp_max_lane_count(intel_dp); + + max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); + mode_rate = intel_dp_link_required(mode->clock, 18); + + /* TODO - validate mode against available PBN for link */ + if (mode->clock < 10000) + return MODE_CLOCK_LOW; + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + return MODE_H_ILLEGAL; + + if (mode_rate > max_rate || mode->clock > max_dotclk) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector, + struct drm_connector_state *state) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + struct intel_crtc *crtc = to_intel_crtc(state->crtc); + + return &intel_dp->mst_encoders[crtc->pipe]->base.base; +} + +static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { + .get_modes = intel_dp_mst_get_modes, + .mode_valid = intel_dp_mst_mode_valid, + .atomic_best_encoder = intel_mst_atomic_best_encoder, + .atomic_check = intel_dp_mst_atomic_check, +}; + +static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); + + drm_encoder_cleanup(encoder); + kfree(intel_mst); +} + +static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = { + .destroy = intel_dp_mst_encoder_destroy, +}; + +static bool intel_dp_mst_get_hw_state(struct intel_connector *connector) +{ + if (connector->encoder && connector->base.state->crtc) { + enum pipe pipe; + if (!connector->encoder->get_hw_state(connector->encoder, &pipe)) + return false; + return true; + } + return false; +} + +static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop) +{ + struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_connector *intel_connector; + struct drm_connector *connector; + enum pipe pipe; + int ret; + + intel_connector = intel_connector_alloc(); + if (!intel_connector) + return NULL; + + intel_connector->get_hw_state = intel_dp_mst_get_hw_state; + intel_connector->mst_port = intel_dp; + intel_connector->port = port; + drm_dp_mst_get_port_malloc(port); + + connector = &intel_connector->base; + ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + if (ret) { + intel_connector_free(intel_connector); + return NULL; + } + + drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); + + for_each_pipe(dev_priv, pipe) { + struct drm_encoder *enc = + &intel_dp->mst_encoders[pipe]->base.base; + + ret = drm_connector_attach_encoder(&intel_connector->base, enc); + if (ret) + goto err; + } + + drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); + drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); + + ret = drm_connector_set_path_property(connector, pathprop); + if (ret) + goto err; + + intel_attach_force_audio_property(connector); + intel_attach_broadcast_rgb_property(connector); + drm_connector_attach_max_bpc_property(connector, 6, 12); + + return connector; + +err: + drm_connector_cleanup(connector); + return NULL; +} + +static void intel_dp_register_mst_connector(struct drm_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->dev); + + if (dev_priv->fbdev) + drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, + connector); + + drm_connector_register(connector); +} + +static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, + struct drm_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->dev); + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + drm_connector_unregister(connector); + + if (dev_priv->fbdev) + drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, + connector); + + drm_connector_put(connector); +} + +static const struct drm_dp_mst_topology_cbs mst_cbs = { + .add_connector = intel_dp_add_mst_connector, + .register_connector = intel_dp_register_mst_connector, + .destroy_connector = intel_dp_destroy_mst_connector, +}; + +static struct intel_dp_mst_encoder * +intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe) +{ + struct intel_dp_mst_encoder *intel_mst; + struct intel_encoder *intel_encoder; + struct drm_device *dev = intel_dig_port->base.base.dev; + + intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL); + + if (!intel_mst) + return NULL; + + intel_mst->pipe = pipe; + intel_encoder = &intel_mst->base; + intel_mst->primary = intel_dig_port; + + drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, + DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe)); + + intel_encoder->type = INTEL_OUTPUT_DP_MST; + intel_encoder->power_domain = intel_dig_port->base.power_domain; + intel_encoder->port = intel_dig_port->base.port; + intel_encoder->crtc_mask = 0x7; + intel_encoder->cloneable = 0; + + intel_encoder->compute_config = intel_dp_mst_compute_config; + intel_encoder->disable = intel_mst_disable_dp; + intel_encoder->post_disable = intel_mst_post_disable_dp; + intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp; + intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp; + intel_encoder->pre_enable = intel_mst_pre_enable_dp; + intel_encoder->enable = intel_mst_enable_dp; + intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state; + intel_encoder->get_config = intel_dp_mst_enc_get_config; + + return intel_mst; + +} + +static bool +intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port) +{ + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) + intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(intel_dig_port, pipe); + return true; +} + +int +intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id) +{ + struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_device *dev = intel_dig_port->base.base.dev; + int ret; + + intel_dp->can_mst = true; + intel_dp->mst_mgr.cbs = &mst_cbs; + + /* create encoders */ + intel_dp_create_fake_mst_encoders(intel_dig_port); + ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev, + &intel_dp->aux, 16, 3, conn_base_id); + if (ret) { + intel_dp->can_mst = false; + return ret; + } + return 0; +} + +void +intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port) +{ + struct intel_dp *intel_dp = &intel_dig_port->dp; + + if (!intel_dp->can_mst) + return; + + drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr); + /* encoders will get killed by normal cleanup */ +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h new file mode 100644 index 000000000000..1470c6e0514b --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_DP_MST_H__ +#define __INTEL_DP_MST_H__ + +struct intel_digital_port; + +int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); +void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); + +#endif /* __INTEL_DP_MST_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c new file mode 100644 index 000000000000..5fec02aceaed --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dsi.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2018 Intel Corporation + */ + +#include +#include "intel_dsi.h" + +int intel_dsi_bitrate(const struct intel_dsi *intel_dsi) +{ + int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + + if (WARN_ON(bpp < 0)) + bpp = 16; + + return intel_dsi->pclk * bpp / intel_dsi->lane_count; +} + +int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi) +{ + switch (intel_dsi->escape_clk_div) { + default: + case 0: + return 50; + case 1: + return 100; + case 2: + return 200; + } +} + +int intel_dsi_get_modes(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_display_mode *mode; + + DRM_DEBUG_KMS("\n"); + + if (!intel_connector->panel.fixed_mode) { + DRM_DEBUG_KMS("no fixed mode\n"); + return 0; + } + + mode = drm_mode_duplicate(connector->dev, + intel_connector->panel.fixed_mode); + if (!mode) { + DRM_DEBUG_KMS("drm_mode_duplicate failed\n"); + return 0; + } + + drm_mode_probed_add(connector, mode); + return 1; +} + +enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; + + DRM_DEBUG_KMS("\n"); + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) + return MODE_NO_DBLESCAN; + + if (fixed_mode) { + if (mode->hdisplay > fixed_mode->hdisplay) + return MODE_PANEL; + if (mode->vdisplay > fixed_mode->vdisplay) + return MODE_PANEL; + if (fixed_mode->clock > max_dotclk) + return MODE_CLOCK_HIGH; + } + + return MODE_OK; +} + +struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, + const struct mipi_dsi_host_ops *funcs, + enum port port) +{ + struct intel_dsi_host *host; + struct mipi_dsi_device *device; + + host = kzalloc(sizeof(*host), GFP_KERNEL); + if (!host) + return NULL; + + host->base.ops = funcs; + host->intel_dsi = intel_dsi; + host->port = port; + + /* + * We should call mipi_dsi_host_register(&host->base) here, but we don't + * have a host->dev, and we don't have OF stuff either. So just use the + * dsi framework as a library and hope for the best. Create the dsi + * devices by ourselves here too. Need to be careful though, because we + * don't initialize any of the driver model devices here. + */ + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) { + kfree(host); + return NULL; + } + + device->host = &host->base; + host->device = device; + + return host; +} + +enum drm_panel_orientation +intel_dsi_get_panel_orientation(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + enum drm_panel_orientation orientation; + + orientation = dev_priv->vbt.dsi.orientation; + if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) + return orientation; + + orientation = dev_priv->vbt.orientation; + if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) + return orientation; + + return DRM_MODE_PANEL_ORIENTATION_NORMAL; +} diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h new file mode 100644 index 000000000000..6d20434636cd --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -0,0 +1,204 @@ +/* + * Copyright © 2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _INTEL_DSI_H +#define _INTEL_DSI_H + +#include +#include +#include "intel_drv.h" + +#define INTEL_DSI_VIDEO_MODE 0 +#define INTEL_DSI_COMMAND_MODE 1 + +/* Dual Link support */ +#define DSI_DUAL_LINK_NONE 0 +#define DSI_DUAL_LINK_FRONT_BACK 1 +#define DSI_DUAL_LINK_PIXEL_ALT 2 + +struct intel_dsi_host; + +struct intel_dsi { + struct intel_encoder base; + + struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS]; + intel_wakeref_t io_wakeref[I915_MAX_PORTS]; + + /* GPIO Desc for CRC based Panel control */ + struct gpio_desc *gpio_panel; + + struct intel_connector *attached_connector; + + /* bit mask of ports being driven */ + u16 ports; + + /* if true, use HS mode, otherwise LP */ + bool hs; + + /* virtual channel */ + int channel; + + /* Video mode or command mode */ + u16 operation_mode; + + /* number of DSI lanes */ + unsigned int lane_count; + + /* + * video mode pixel format + * + * XXX: consolidate on .format in struct mipi_dsi_device. + */ + enum mipi_dsi_pixel_format pixel_format; + + /* video mode format for MIPI_VIDEO_MODE_FORMAT register */ + u32 video_mode_format; + + /* eot for MIPI_EOT_DISABLE register */ + u8 eotp_pkt; + u8 clock_stop; + + u8 escape_clk_div; + u8 dual_link; + + u16 dcs_backlight_ports; + u16 dcs_cabc_ports; + + /* RGB or BGR */ + bool bgr_enabled; + + u8 pixel_overlap; + u32 port_bits; + u32 bw_timer; + u32 dphy_reg; + + /* data lanes dphy timing */ + u32 dphy_data_lane_reg; + u32 video_frmt_cfg_bits; + u16 lp_byte_clk; + + /* timeouts in byte clocks */ + u16 hs_tx_timeout; + u16 lp_rx_timeout; + u16 turn_arnd_val; + u16 rst_timer_val; + u16 hs_to_lp_count; + u16 clk_lp_to_hs_count; + u16 clk_hs_to_lp_count; + + u16 init_count; + u32 pclk; + u16 burst_mode_ratio; + + /* all delays in ms */ + u16 backlight_off_delay; + u16 backlight_on_delay; + u16 panel_on_delay; + u16 panel_off_delay; + u16 panel_pwr_cycle_delay; +}; + +struct intel_dsi_host { + struct mipi_dsi_host base; + struct intel_dsi *intel_dsi; + enum port port; + + /* our little hack */ + struct mipi_dsi_device *device; +}; + +static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h) +{ + return container_of(h, struct intel_dsi_host, base); +} + +#define for_each_dsi_port(__port, __ports_mask) for_each_port_masked(__port, __ports_mask) + +static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) +{ + return container_of(encoder, struct intel_dsi, base.base); +} + +static inline bool is_vid_mode(struct intel_dsi *intel_dsi) +{ + return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE; +} + +static inline bool is_cmd_mode(struct intel_dsi *intel_dsi) +{ + return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE; +} + +static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder) +{ + return enc_to_intel_dsi(&encoder->base)->ports; +} + +/* icl_dsi.c */ +void icl_dsi_init(struct drm_i915_private *dev_priv); + +/* intel_dsi.c */ +int intel_dsi_bitrate(const struct intel_dsi *intel_dsi); +int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi); +enum drm_panel_orientation +intel_dsi_get_panel_orientation(struct intel_connector *connector); + +/* vlv_dsi.c */ +void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); +enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); +int intel_dsi_get_modes(struct drm_connector *connector); +enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode); +struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, + const struct mipi_dsi_host_ops *funcs, + enum port port); +void vlv_dsi_init(struct drm_i915_private *dev_priv); + +/* vlv_dsi_pll.c */ +int vlv_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void vlv_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config); +void vlv_dsi_pll_disable(struct intel_encoder *encoder); +u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); + +bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); +int bxt_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void bxt_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config); +void bxt_dsi_pll_disable(struct intel_encoder *encoder); +u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); + +/* intel_dsi_vbt.c */ +bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); +void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, + enum mipi_seq seq_id); +void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); +void intel_dsi_log_params(struct intel_dsi *intel_dsi); + +#endif /* _INTEL_DSI_H */ diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c new file mode 100644 index 000000000000..8c33262cb0b2 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c @@ -0,0 +1,179 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Author: Deepak M + */ + +#include +#include