summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
diff options
context:
space:
mode:
authorJun Lei <jun.lei@amd.com>2022-06-24 16:28:50 -0400
committerAlex Deucher <alexander.deucher@amd.com>2022-07-25 09:31:03 -0400
commitf4b4e41a2e05270cd90c5817ab514ace95555874 (patch)
treedb9887c62c65c5ef073a7821e83bae4831e75217 /drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
parent37edc99979b717312e60cf3463ab756f5a3d6de6 (diff)
downloadlinux-f4b4e41a2e05270cd90c5817ab514ace95555874.tar.bz2
drm/amd/display: Update DML logic for unbounded req handling
[why] Unbounded request logic in resource/DML has some issues where unbounded request is being enabled incorrectly. SW today enables unbounded request unconditionally in hardware, on the assumption that HW can always support it in single pipe scenarios. This worked until now because the same assumption is made in DML. A new DML update is needed to fix a bug, where there are single pipe scenarios where unbounded cannot be enabled, and this change in DML needs to be ported in, and dcn32 resource logic fixed. [how] First, dcn32_resource should program unbounded req in HW according to unbounded req enablement output from DML, as opposed to DML input. Second, port in DML update which disables unbounded req in some scenarios to fix an issue with poor stutter performance Tested-by: Daniel Wheeler <daniel.wheeler@amd.com> Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com> Signed-off-by: Jun Lei <jun.lei@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c')
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 32da47e24839..39214a0dcdf2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -3322,6 +3322,7 @@ void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, display
{
int i, pipe_idx;
bool usr_retraining_support = false;
+ bool unbounded_req_enabled = false;
/* Writeback MCIF_WB arbitration parameters */
dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
@@ -3357,6 +3358,14 @@ void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, display
if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
+ unbounded_req_enabled = get_unbounded_request_enabled(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+ if (unbounded_req_enabled && pipe_cnt > 1) {
+ // Unbounded requesting should not ever be used when more than 1 pipe is enabled.
+ ASSERT(false);
+ unbounded_req_enabled = false;
+ }
+
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
@@ -3375,7 +3384,7 @@ void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, display
} else {
context->res_ctx.pipe_ctx[i].det_buffer_size_kb = get_det_buffer_size_kbytes(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
- context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode;
+ context->res_ctx.pipe_ctx[i].unbounded_req = unbounded_req_enabled;
}
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;