summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-04-10 06:42:52 +1000
committerDave Airlie <airlied@redhat.com>2020-04-10 06:42:53 +1000
commit74bd4f0c921cc9cf5f99ba4129dafe35496de6f3 (patch)
tree45f17b3da19fca4f6fdc2d12f702da6c4a0e6268 /drivers/gpu
parent1287c880ba45924ab41166cdf02766b2e3ac9d56 (diff)
parent7e7ea24f0b46cd3078bc9af29d1c1aced89d1c8e (diff)
downloadlinux-74bd4f0c921cc9cf5f99ba4129dafe35496de6f3.tar.bz2
Merge tag 'amd-drm-fixes-5.7-2020-04-08' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-fixes-5.7-2020-04-08: amdgpu: - Various Renoir fixes - Fix gfx clockgating sequence on gfx10 - RAS fixes - Avoid MST property creation after registration - Various cursor/viewport fixes - Fix a confusing log message about optional firmwares Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200408222240.3942-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c47
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.h2
22 files changed, 217 insertions, 48 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index faa3e7102156..559dc24ef436 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2340,8 +2340,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
- amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
- amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
@@ -3356,6 +3354,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
}
}
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
amdgpu_amdkfd_suspend(adev, !fbcon);
amdgpu_ras_suspend(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index f197f1be0969..abe94a55ecad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -89,7 +89,8 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
adev->pm.ac_power = true;
else
adev->pm.ac_power = false;
- if (adev->powerplay.pp_funcs->enable_bapm)
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
mutex_unlock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index be50867ea644..deaa26808841 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -818,7 +818,7 @@ static int psp_ras_initialize(struct psp_context *psp)
if (!psp->adev->psp.ta_ras_ucode_size ||
!psp->adev->psp.ta_ras_start_addr) {
- dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n");
+ dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n");
return 0;
}
@@ -902,7 +902,7 @@ static int psp_hdcp_initialize(struct psp_context *psp)
if (!psp->adev->psp.ta_hdcp_ucode_size ||
!psp->adev->psp.ta_hdcp_start_addr) {
- dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n");
+ dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
return 0;
}
@@ -1048,7 +1048,7 @@ static int psp_dtm_initialize(struct psp_context *psp)
if (!psp->adev->psp.ta_dtm_ucode_size ||
!psp->adev->psp.ta_dtm_start_addr) {
- dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n");
+ dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 3c32a94d2424..ab379b44679c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1424,12 +1424,22 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
{
struct amdgpu_ras *ras =
container_of(work, struct amdgpu_ras, recovery_work);
+ struct amdgpu_device *remote_adev = NULL;
+ struct amdgpu_device *adev = ras->adev;
+ struct list_head device_list, *device_list_handle = NULL;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
+
+ /* Build list of devices to query RAS related errors */
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
+ device_list_handle = &hive->device_list;
+ } else {
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ device_list_handle = &device_list;
+ }
- /*
- * Query and print non zero error counter per IP block for
- * awareness before recovering GPU.
- */
- amdgpu_ras_log_on_err_counter(ras->adev);
+ list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) {
+ amdgpu_ras_log_on_err_counter(remote_adev);
+ }
if (amdgpu_device_should_recover_gpu(ras->adev))
amdgpu_device_gpu_recover(ras->adev, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f6e3f59efa2f..d78059fd2c72 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -279,7 +279,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
- (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
+ (SH_MEM_ALIGNMENT_MODE_DWORD << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
(SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
(3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
@@ -4104,6 +4104,12 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+ /* 0 - Disable some blocks' MGCG */
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+ WREG32_SOC15(GC, 0, mmCGTT_WD_CLK_CTRL, 0xff000000);
+ WREG32_SOC15(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xff000000);
+ WREG32_SOC15(GC, 0, mmCGTT_IA_CLK_CTRL, 0xff000000);
+
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
@@ -4143,19 +4149,20 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
- /* 2 - disable MGLS in RLC */
+ /* 2 - disable MGLS in CP */
+ data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
+ if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
+ data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
+ WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
+ }
+
+ /* 3 - disable MGLS in RLC */
data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
}
- /* 3 - disable MGLS in CP */
- data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
- if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
- data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
- WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
- }
}
}
@@ -4266,7 +4273,7 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
/* === CGCG /CGLS for GFX 3D Only === */
gfx_v10_0_update_3d_clock_gating(adev, enable);
/* === MGCG + MGLS === */
- gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
+ /* gfx_v10_0_update_medium_grain_clock_gating(adev, enable); */
}
if (adev->cg_flags &
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 608ffe3b684e..e6b113ed2f40 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1217,6 +1217,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.mec_fw_write_wait = true;
break;
default:
+ adev->gfx.me_fw_write_wait = true;
+ adev->gfx.mec_fw_write_wait = true;
break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index cceb46faf212..dce945ef21a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -710,14 +710,16 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
vml2_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
vml2_mems[i], ded_count);
err_data->ue_count += ded_count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 0d413fabd015..c0e3efcb09bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -1539,8 +1539,11 @@ static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 },
};
-static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
- uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
+static int mmhub_v9_4_get_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
+ uint32_t value,
+ uint32_t *sec_count,
+ uint32_t *ded_count)
{
uint32_t i;
uint32_t sec_cnt, ded_cnt;
@@ -1553,7 +1556,7 @@ static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
mmhub_v9_4_ras_fields[i].sec_count_mask) >>
mmhub_v9_4_ras_fields[i].sec_count_shift;
if (sec_cnt) {
- DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+ dev_info(adev->dev, "MMHUB SubBlock %s, SEC %d\n",
mmhub_v9_4_ras_fields[i].name,
sec_cnt);
*sec_count += sec_cnt;
@@ -1563,7 +1566,7 @@ static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
mmhub_v9_4_ras_fields[i].ded_count_mask) >>
mmhub_v9_4_ras_fields[i].ded_count_shift;
if (ded_cnt) {
- DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+ dev_info(adev->dev, "MMHUB SubBlock %s, DED %d\n",
mmhub_v9_4_ras_fields[i].name,
ded_cnt);
*ded_count += ded_cnt;
@@ -1588,7 +1591,7 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
if (reg_value)
- mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i],
+ mmhub_v9_4_get_ras_error_count(adev, &mmhub_v9_4_edc_cnt_regs[i],
reg_value, &sec_count, &ded_count);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index bab587ab6e8d..f7c5cdc10a70 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4723,10 +4723,10 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
static int
amdgpu_dm_connector_late_register(struct drm_connector *connector)
{
+#if defined(CONFIG_DEBUG_FS)
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
-#if defined(CONFIG_DEBUG_FS)
connector_debugfs_init(amdgpu_dm_connector);
#endif
@@ -5929,7 +5929,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
adev->mode_info.underscan_vborder_property,
0);
- drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
+ if (!aconnector->mst_port)
+ drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
/* This defaults to the max in the range, but we want 8bpc for non-edp. */
aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
@@ -5948,8 +5949,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
&aconnector->base.base,
dm->ddev->mode_config.hdr_output_metadata_property, 0);
- drm_connector_attach_vrr_capable_property(
- &aconnector->base);
+ if (!aconnector->mst_port)
+ drm_connector_attach_vrr_capable_property(&aconnector->base);
+
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
@@ -6272,12 +6274,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
y <= -amdgpu_crtc->max_cursor_height)
return 0;
- if (crtc->primary->state) {
- /* avivo cursor are offset into the total surface */
- x += crtc->primary->state->src_x >> 16;
- y += crtc->primary->state->src_y >> 16;
- }
-
if (x < 0) {
xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
x = 0;
@@ -6287,6 +6283,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
y = 0;
}
position->enable = true;
+ position->translate_by_source = true;
position->x = x;
position->y = y;
position->x_hotspot = xorigin;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index e8208df420d9..fabbe78d5aef 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -410,6 +410,14 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_attach_encoder(&aconnector->base,
&aconnector->mst_encoder->base);
+ connector->max_bpc_property = master->base.max_bpc_property;
+ if (connector->max_bpc_property)
+ drm_connector_attach_max_bpc_property(connector, 8, 16);
+
+ connector->vrr_capable_property = master->base.vrr_capable_property;
+ if (connector->vrr_capable_property)
+ drm_connector_attach_vrr_capable_property(connector);
+
drm_object_attach_property(
&connector->base,
dev->mode_config.path_property,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index ab267ddd4abe..24c5765890fa 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -643,7 +643,7 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
/* Find lowest DPM, FCLK is filled in reverse order*/
for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) {
- if (clock_table->FClocks[i].Freq != 0) {
+ if (clock_table->FClocks[i].Freq != 0 && clock_table->FClocks[i].Vol != 0) {
j = i;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index f21bbb295ad3..8489f1e56892 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -283,6 +283,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
int i = 0;
bool ret = false;
+ stream->adjust = *adjust;
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -1859,8 +1861,9 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
// Else we fallback to mem compare.
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
dc->optimized_required = true;
- } else if (dc->wm_optimized_required)
- dc->optimized_required = true;
+ }
+
+ dc->optimized_required |= dc->wm_optimized_required;
}
return type;
@@ -2462,7 +2465,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
enum surface_update_type update_type;
struct dc_state *context;
struct dc_context *dc_ctx = dc->ctx;
- int i;
+ int i, j;
stream_status = dc_stream_get_status(stream);
context = dc->current_state;
@@ -2500,6 +2503,17 @@ void dc_commit_updates_for_stream(struct dc *dc,
copy_surface_update_to_plane(surface, &srf_updates[i]);
+ if (update_type >= UPDATE_TYPE_MED) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx =
+ &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != surface)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
+ }
}
copy_stream_update_to_stream(dc, context, stream, stream_update);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 25c50bcab9e9..a8dc3082e3e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -385,6 +385,8 @@ struct dc_cursor_position {
*/
bool enable;
+ /* Translate cursor x/y by the source rectangle for each plane. */
+ bool translate_by_source;
};
struct dc_cursor_mi_param {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 0976e378659f..c279982947e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2685,6 +2685,23 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
+ /**
+ * If the cursor's source viewport is clipped then we need to
+ * translate the cursor to appear in the correct position on
+ * the screen.
+ *
+ * This translation isn't affected by scaling so it needs to be
+ * done *after* we adjust the position for the scale factor.
+ *
+ * This is only done by opt-in for now since there are still
+ * some usecases like tiled display that might enable the
+ * cursor on both streams while expecting dc to clip it.
+ */
+ if (pos_cpy.translate_by_source) {
+ pos_cpy.x += pipe_ctx->plane_state->src_rect.x;
+ pos_cpy.y += pipe_ctx->plane_state->src_rect.y;
+ }
+
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
pos_cpy.enable = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 0be010085575..b0357546471b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -3021,12 +3021,50 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
- // translate cursor from stream space to plane space
+ /**
+ * DC cursor is stream space, HW cursor is plane space and drawn
+ * as part of the framebuffer.
+ *
+ * Cursor position can't be negative, but hotspot can be used to
+ * shift cursor out of the plane bounds. Hotspot must be smaller
+ * than the cursor size.
+ */
+
+ /**
+ * Translate cursor from stream space to plane space.
+ *
+ * If the cursor is scaled then we need to scale the position
+ * to be in the approximately correct place. We can't do anything
+ * about the actual size being incorrect, that's a limitation of
+ * the hardware.
+ */
x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
pipe_ctx->plane_state->dst_rect.width;
y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
pipe_ctx->plane_state->dst_rect.height;
+ /**
+ * If the cursor's source viewport is clipped then we need to
+ * translate the cursor to appear in the correct position on
+ * the screen.
+ *
+ * This translation isn't affected by scaling so it needs to be
+ * done *after* we adjust the position for the scale factor.
+ *
+ * This is only done by opt-in for now since there are still
+ * some usecases like tiled display that might enable the
+ * cursor on both streams while expecting dc to clip it.
+ */
+ if (pos_cpy.translate_by_source) {
+ x_pos += pipe_ctx->plane_state->src_rect.x;
+ y_pos += pipe_ctx->plane_state->src_rect.y;
+ }
+
+ /**
+ * If the position is negative then we need to add to the hotspot
+ * to shift the cursor outside the plane.
+ */
+
if (x_pos < 0) {
pos_cpy.x_hotspot -= x_pos;
x_pos = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 8b7122249ddc..07265ca7d28c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -585,7 +585,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_pplib_clock_request = false,
.disable_pplib_wm_range = false,
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = true,
.disable_dcc = DCC_ENABLE,
.voltage_align_fclk = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 233318260da4..22f421e82733 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1373,6 +1373,7 @@ static void dcn20_update_dchubp_dpp(
}
if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 8a87d0ed90ae..2359e88d6029 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -136,6 +136,7 @@
#define RAVEN2_A0 0x81
#define RAVEN1_F0 0xF0
#define RAVEN_UNKNOWN 0xFF
+#define RENOIR_A0 0x91
#ifndef ASICREV_IS_RAVEN
#define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
#endif
@@ -171,8 +172,6 @@ enum {
#define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI12_P_A0)
#define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0))
#define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
-#define RENOIR_A0 0x91
-#define DEVICE_ID_RENOIR_1636 0x1636 // Renoir
#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < RAVEN1_F0))
/*
@@ -183,6 +182,9 @@ enum {
#define DEVICE_ID_TEMASH_9839 0x9839
#define DEVICE_ID_TEMASH_983D 0x983D
+/* RENOIR */
+#define DEVICE_ID_RENOIR_1636 0x1636
+
/* Asic Family IDs for different asic family. */
#define FAMILY_CI 120 /* Sea Islands: Hawaii (P), Bonaire (M) */
#define FAMILY_KV 125 /* Fusion => Kaveri: Spectre, Spooky; Kabini: Kalindi */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index c195575366a3..2a12614a12c2 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -1452,7 +1452,8 @@ static int pp_get_asic_baco_state(void *handle, int *state)
if (!hwmgr)
return -EINVAL;
- if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
+ if (!(hwmgr->not_vf && amdgpu_dpm) ||
+ !hwmgr->hwmgr_func->get_asic_baco_state)
return 0;
mutex_lock(&hwmgr->smu_lock);
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 5db8c56066ee..1ef0923f7190 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -794,8 +794,21 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
struct arcturus_dpm_table *dpm_table;
struct arcturus_single_dpm_table *single_dpm_table;
uint32_t soft_min_level, soft_max_level;
+ uint32_t smu_version;
int ret = 0;
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ if (smu_version >= 0x361200) {
+ pr_err("Forcing clock level is not supported with "
+ "54.18 and onwards SMU firmwares\n");
+ return -EOPNOTSUPP;
+ }
+
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -1512,6 +1525,38 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
return 0;
}
+static int arcturus_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ if (smu_version >= 0x361200) {
+ pr_err("Forcing clock level is not supported with "
+ "54.18 and onwards SMU firmwares\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return smu_v11_0_set_performance_level(smu, level);
+}
+
static void arcturus_dump_pptable(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -2285,7 +2330,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_profiling_clk_mask = arcturus_get_profiling_clk_mask,
.get_power_profile_mode = arcturus_get_power_profile_mode,
.set_power_profile_mode = arcturus_set_power_profile_mode,
- .set_performance_level = smu_v11_0_set_performance_level,
+ .set_performance_level = arcturus_set_performance_level,
/* debug (internal used) */
.dump_pptable = arcturus_dump_pptable,
.get_power_limit = arcturus_get_power_limit,
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 7bf52ecba01d..ff73a735b888 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -239,6 +239,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
+ bool cur_value_match_level = false;
if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
@@ -297,8 +298,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
+ if (cur_value == value)
+ cur_value_match_level = true;
}
+ if (!cur_value_match_level)
+ size += sprintf(buf + size, " %uMhz *\n", cur_value);
+
return size;
}
@@ -887,6 +893,17 @@ static int renoir_read_sensor(struct smu_context *smu,
return ret;
}
+static bool renoir_is_dpm_running(struct smu_context *smu)
+{
+ /*
+ * Util now, the pmfw hasn't exported the interface of SMU
+ * feature mask to APU SKU so just force on all the feature
+ * at early initial stage.
+ */
+ return true;
+
+}
+
static const struct pptable_funcs renoir_ppt_funcs = {
.get_smu_msg_index = renoir_get_smu_msg_index,
.get_smu_clk_index = renoir_get_smu_clk_index,
@@ -927,6 +944,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.mode2_reset = smu_v12_0_mode2_reset,
.set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
.set_driver_table_location = smu_v12_0_set_driver_table_location,
+ .is_dpm_running = renoir_is_dpm_running,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
index 2a390ddd37dd..89cd6da118a3 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
@@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
freq = table->SocClocks[dpm_level].Freq; \
break; \
case SMU_MCLK: \
- freq = table->MemClocks[dpm_level].Freq; \
+ freq = table->FClocks[dpm_level].Freq; \
break; \
case SMU_DCEFCLK: \
freq = table->DcfClocks[dpm_level].Freq; \