diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc')
80 files changed, 3345 insertions, 774 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index f33847299bca..505158a5fcc1 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -33,6 +33,7 @@ DC_LIBS += dcn21 DC_LIBS += dcn30 DC_LIBS += dcn301 DC_LIBS += dcn302 +DC_LIBS += dcn303 endif DC_LIBS += dce120 @@ -54,7 +55,7 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI include $(AMD_DC) -DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ +DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \ dc_link_enc_cfg.o diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index d79f4fe06c47..49126a0f66af 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -836,8 +836,10 @@ static enum bp_result bios_parser_get_spread_spectrum_info( return get_ss_info_v4_1(bp, signal, index, ss_info); case 2: case 3: + case 4: return get_ss_info_v4_2(bp, signal, index, ss_info); default: + ASSERT(0); break; } break; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index 455ee2be15a3..00706b072b5f 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -73,6 +73,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2( case DCN_VERSION_3_0: case DCN_VERSION_3_01: case DCN_VERSION_3_02: + case DCN_VERSION_3_03: *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; #endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 7d6c68c5dea9..dd52ebf56d62 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -241,6 +241,10 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base; } + if (ASICREV_IS_BEIGE_GOBY_P(asic_id.hw_internal_rev)) { + dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); + return &clk_mgr->base; + } dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base; } @@ -278,6 +282,9 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base) if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { dcn3_clk_mgr_destroy(clk_mgr); } + if (ASICREV_IS_BEIGE_GOBY_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { + dcn3_clk_mgr_destroy(clk_mgr); + } break; case FAMILY_VGH: diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index a06e86853bb9..416a24db17a9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -106,10 +106,10 @@ static void rn_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { int dpp_inst, dppclk_khz, prev_dppclk_khz; - /* Loop index will match dpp->inst if resource exists, - * and we want to avoid dependency on dpp object + /* Loop index may not match dpp->inst if some pipes disabled, + * so select correct inst from res_pool */ - dpp_inst = i; + dpp_inst = clk_mgr->base.ctx->dc->res_pool->dpps[i]->inst; dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i]; @@ -128,7 +128,7 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; - int display_count, i; + int display_count; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; @@ -210,14 +210,6 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, clk_mgr_base->clks.dppclk_khz, safe_to_lower); - for (i = 0; i < context->stream_count; i++) { - if (context->streams[i]->signal == SIGNAL_TYPE_EDP && - context->streams[i]->apply_seamless_boot_optimization) { - dc_wait_for_vblank(dc, context->streams[i]); - break; - } - } - clk_mgr_base->clks.actual_dppclk_khz = rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); @@ -842,46 +834,67 @@ static struct wm_table lpddr4_wm_table_rn = { }, } }; -static unsigned int find_socclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage) + +static unsigned int find_max_fclk_for_voltage(struct dpm_clocks *clock_table, + unsigned int voltage) { int i; + uint32_t max_clk = 0; - for (i = 0; i < PP_SMU_NUM_SOCCLK_DPM_LEVELS; i++) { - if (clock_table->SocClocks[i].Vol == voltage) - return clock_table->SocClocks[i].Freq; + for (i = 0; i < PP_SMU_NUM_FCLK_DPM_LEVELS; i++) { + if (clock_table->FClocks[i].Vol <= voltage) { + max_clk = clock_table->FClocks[i].Freq > max_clk ? + clock_table->FClocks[i].Freq : max_clk; + } + } + + return max_clk; +} + +static unsigned int find_max_memclk_for_voltage(struct dpm_clocks *clock_table, + unsigned int voltage) +{ + int i; + uint32_t max_clk = 0; + + for (i = 0; i < PP_SMU_NUM_MEMCLK_DPM_LEVELS; i++) { + if (clock_table->MemClocks[i].Vol <= voltage) { + max_clk = clock_table->MemClocks[i].Freq > max_clk ? + clock_table->MemClocks[i].Freq : max_clk; + } } - ASSERT(0); - return 0; + return max_clk; } -static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage) + +static unsigned int find_max_socclk_for_voltage(struct dpm_clocks *clock_table, + unsigned int voltage) { int i; + uint32_t max_clk = 0; - for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) { - if (clock_table->DcfClocks[i].Vol == voltage) - return clock_table->DcfClocks[i].Freq; + for (i = 0; i < PP_SMU_NUM_SOCCLK_DPM_LEVELS; i++) { + if (clock_table->SocClocks[i].Vol <= voltage) { + max_clk = clock_table->SocClocks[i].Freq > max_clk ? + clock_table->SocClocks[i].Freq : max_clk; + } } - ASSERT(0); - return 0; + return max_clk; } static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct integrated_info *bios_info) { int i, j = 0; + unsigned int volt; j = -1; - ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL); - - /* Find lowest DPM, FCLK is filled in reverse order*/ - - for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) { - if (clock_table->FClocks[i].Freq != 0 && clock_table->FClocks[i].Vol != 0) { + /* Find max DPM */ + for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; ++i) { + if (clock_table->DcfClocks[i].Freq != 0 && + clock_table->DcfClocks[i].Vol != 0) j = i; - break; - } } if (j == -1) { @@ -892,13 +905,18 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params bw_params->clk_table.num_entries = j + 1; - for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) { - bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[j].Freq; - bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq; - bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol; - bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol); - bw_params->clk_table.entries[i].socclk_mhz = find_socclk_for_voltage(clock_table, - bw_params->clk_table.entries[i].voltage); + for (i = 0; i < bw_params->clk_table.num_entries; i++) { + volt = clock_table->DcfClocks[i].Vol; + + bw_params->clk_table.entries[i].voltage = volt; + bw_params->clk_table.entries[i].dcfclk_mhz = + clock_table->DcfClocks[i].Freq; + bw_params->clk_table.entries[i].fclk_mhz = + find_max_fclk_for_voltage(clock_table, volt); + bw_params->clk_table.entries[i].memclk_mhz = + find_max_memclk_for_voltage(clock_table, volt); + bw_params->clk_table.entries[i].socclk_mhz = + find_max_socclk_for_voltage(clock_table, volt); } bw_params->vram_type = bios_info->memory_type; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 4713f09bcbf1..ef157b83bacd 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -59,7 +59,6 @@ #include "dc_link_ddc.h" #include "dm_helpers.h" #include "mem_input.h" -#include "hubp.h" #include "dc_link_dp.h" #include "dc_dmub_srv.h" @@ -2664,7 +2663,6 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.interdependent_update_lock(dc, context, false); else dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); - dc->hwss.post_unlock_program_front_end(dc, context); return; } @@ -2765,6 +2763,7 @@ static void commit_planes_for_stream(struct dc *dc, plane_state->flip_immediate); } } + /* Perform requested Updates */ for (i = 0; i < surface_count; i++) { struct dc_plane_state *plane_state = srf_updates[i].surface; @@ -2787,6 +2786,7 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.update_plane_addr(dc, pipe_ctx); } } + } if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock) @@ -3219,19 +3219,6 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink) } } -void dc_wait_for_vblank(struct dc *dc, struct dc_stream_state *stream) -{ - int i; - - for (i = 0; i < dc->res_pool->pipe_count; i++) - if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { - struct timing_generator *tg = - dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg; - tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK); - break; - } -} - void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info) { info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz; @@ -3287,7 +3274,7 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow) if (dc->debug.disable_idle_power_optimizations) return; - if (dc->clk_mgr->funcs->is_smu_present) + if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) return; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index f4374d83662a..c07b45c021d5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -48,6 +48,7 @@ #include "dce/dmub_psr.h" #include "dmub/dmub_srv.h" #include "inc/hw/panel_cntl.h" +#include "inc/link_enc_cfg.h" #define DC_LOGGER_INIT(logger) @@ -247,6 +248,16 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) link->dc->hwss.edp_wait_for_hpd_ready(link, true); } + /* Link may not have physical HPD pin. */ + if (link->ep_type != DISPLAY_ENDPOINT_PHY) { + if (link->hpd_status) + *type = dc_connection_single; + else + *type = dc_connection_none; + + return true; + } + /* todo: may need to lock gpio access */ hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); @@ -432,8 +443,18 @@ bool dc_link_is_dp_sink_present(struct dc_link *link) static enum signal_type link_detect_sink(struct dc_link *link, enum dc_detect_reason reason) { - enum signal_type result = get_basic_signal_type(link->link_enc->id, - link->link_id); + enum signal_type result; + struct graphics_object_id enc_id; + + if (link->is_dig_mapping_flexible) + enc_id = (struct graphics_object_id){.id = ENCODER_ID_UNKNOWN}; + else + enc_id = link->link_enc->id; + result = get_basic_signal_type(enc_id, link->link_id); + + /* Use basic signal type for link without physical connector. */ + if (link->ep_type != DISPLAY_ENDPOINT_PHY) + return result; /* Internal digital encoder will detect only dongles * that require digital signal @@ -762,19 +783,20 @@ static bool detect_dp(struct dc_link *link, } if (link->type != dc_connection_mst_branch && - is_dp_active_dongle(link)) { - /* DP active dongles */ - link->type = dc_connection_active_dongle; + is_dp_branch_device(link)) { + /* DP SST branch */ + link->type = dc_connection_sst_branch; if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) { /* - * active dongle unplug processing for short irq + * SST branch unplug processing for short irq */ link_disconnect_sink(link); return true; } - if (link->dpcd_caps.dongle_type != - DISPLAY_DONGLE_DP_HDMI_CONVERTER) + if (is_dp_active_dongle(link) && + (link->dpcd_caps.dongle_type != + DISPLAY_DONGLE_DP_HDMI_CONVERTER)) *converter_disable_audio = true; } } else { @@ -954,7 +976,8 @@ static bool dc_link_detect_helper(struct dc_link *link, case SIGNAL_TYPE_DISPLAY_PORT: { /* wa HPD high coming too early*/ - if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) { + if (link->ep_type == DISPLAY_ENDPOINT_PHY && + link->link_enc->features.flags.bits.DP_IS_USB_C == 1) { /* if alt mode times out, return false */ if (!wait_for_entering_dp_alt_mode(link)) return false; @@ -974,8 +997,8 @@ static bool dc_link_detect_helper(struct dc_link *link, sizeof(struct dpcd_caps))) same_dpcd = false; } - /* Active dongle downstream unplug*/ - if (link->type == dc_connection_active_dongle && + /* Active SST downstream branch device unplug*/ + if (link->type == dc_connection_sst_branch && link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { if (prev_sink) /* Downstream unplug */ @@ -1076,6 +1099,24 @@ static bool dc_link_detect_helper(struct dc_link *link, dc_is_dvi_signal(link->connector_signal)) { if (prev_sink) dc_sink_release(prev_sink); + link_disconnect_sink(link); + + return false; + } + /* + * Abort detection for DP connectors if we have + * no EDID and connector is active converter + * as there are no display downstream + * + */ + if (dc_is_dp_sst_signal(link->connector_signal) && + (link->dpcd_caps.dongle_type == + DISPLAY_DONGLE_DP_VGA_CONVERTER || + link->dpcd_caps.dongle_type == + DISPLAY_DONGLE_DP_DVI_CONVERTER)) { + if (prev_sink) + dc_sink_release(prev_sink); + link_disconnect_sink(link); return false; } @@ -1206,14 +1247,25 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) { const struct dc *dc = link->dc; bool ret; + bool can_apply_seamless_boot = false; + int i; + + for (i = 0; i < dc->current_state->stream_count; i++) { + if (dc->current_state->streams[i]->apply_seamless_boot_optimization) { + can_apply_seamless_boot = true; + break; + } + } /* get out of low power state */ - clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); + if (!can_apply_seamless_boot && reason != DETECT_REASON_BOOT) + clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); ret = dc_link_detect_helper(link, reason); /* Go back to power optimized state */ - clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); + if (!can_apply_seamless_boot && reason != DETECT_REASON_BOOT) + clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); return ret; } @@ -1716,6 +1768,8 @@ static enum dc_status enable_link_dp(struct dc_state *state, bool apply_seamless_boot_optimization = false; uint32_t bl_oled_enable_delay = 50; // in ms const uint32_t post_oui_delay = 30; // 30ms + /* Reduce link bandwidth between failed link training attempts. */ + bool do_fallback = false; // check for seamless boot for (i = 0; i < state->stream_count; i++) { @@ -1754,7 +1808,8 @@ static enum dc_status enable_link_dp(struct dc_state *state, skip_video_pattern, LINK_TRAINING_ATTEMPTS, pipe_ctx, - pipe_ctx->stream->signal)) { + pipe_ctx->stream->signal, + do_fallback)) { link->cur_link_settings = link_settings; status = DC_OK; } else { @@ -3475,9 +3530,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing( uint32_t kbps; #if defined(CONFIG_DRM_AMD_DC_DCN) - if (timing->flags.DSC) { - return dc_dsc_stream_bandwidth_in_kbps(timing->pix_clk_100hz, timing->dsc_cfg.bits_per_pixel); - } + if (timing->flags.DSC) + return dc_dsc_stream_bandwidth_in_kbps(timing, + timing->dsc_cfg.bits_per_pixel, + timing->dsc_cfg.num_slices_h, + timing->dsc_cfg.is_dp); #endif switch (timing->display_color_depth) { @@ -3539,19 +3596,6 @@ void dc_link_set_drive_settings(struct dc *dc, dc_link_dp_set_drive_settings(dc->links[i], lt_settings); } -void dc_link_perform_link_training(struct dc *dc, - struct dc_link_settings *link_setting, - bool skip_video_pattern) -{ - int i; - - for (i = 0; i < dc->link_count; i++) - dc_link_dp_perform_link_training( - dc->links[i], - link_setting, - skip_video_pattern); -} - void dc_link_set_preferred_link_settings(struct dc *dc, struct dc_link_settings *link_setting, struct dc_link *link) @@ -3702,8 +3746,22 @@ void dc_link_overwrite_extended_receiver_cap( bool dc_link_is_fec_supported(const struct dc_link *link) { + struct link_encoder *link_enc = NULL; + + /* Links supporting dynamically assigned link encoder will be assigned next + * available encoder if one not already assigned. + */ + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) { + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); + if (link_enc == NULL) + link_enc = link_enc_cfg_get_next_avail_link_enc(link->dc, link->dc->current_state); + } else + link_enc = link->link_enc; + ASSERT(link_enc); + return (dc_is_dp_signal(link->connector_signal) && - link->link_enc->features.fec_supported && + link_enc->features.fec_supported && link->dpcd_caps.fec_cap.bits.FEC_CAPABLE && !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment)); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 64414c51312d..ba6b56f20269 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -658,7 +658,10 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *payload, enum aux_return_code_type *operation_result) { - return dce_aux_transfer_raw(ddc, payload, operation_result); + if (dc_enable_dmub_notifications(ddc->ctx->dc)) + return dce_aux_transfer_dmub_raw(ddc, payload, operation_result); + else + return dce_aux_transfer_raw(ddc, payload, operation_result); } /* dc_link_aux_transfer_with_retries() - Attempt to submit an @@ -682,6 +685,10 @@ bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc, bool result = false; struct ddc *ddc_pin = ddc->ddc_pin; + /* Do not try to access nonexistent DDC pin. */ + if (ddc->link->ep_type != DISPLAY_ENDPOINT_PHY) + return true; + if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout) { ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout); result = true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 3ff3d9e90983..9e08410bfdfd 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -14,6 +14,7 @@ #include "dpcd_defs.h" #include "dc_dmub_srv.h" #include "dce/dmub_hw_lock_mgr.h" +#include "inc/link_enc_cfg.h" /*Travis*/ static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT"; @@ -107,10 +108,50 @@ static void wait_for_training_aux_rd_interval( wait_in_micro_secs); } +static enum dpcd_training_patterns + dc_dp_training_pattern_to_dpcd_training_pattern( + struct dc_link *link, + enum dc_dp_training_pattern pattern) +{ + enum dpcd_training_patterns dpcd_tr_pattern = + DPCD_TRAINING_PATTERN_VIDEOIDLE; + + switch (pattern) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_2: + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2; + break; + case DP_TRAINING_PATTERN_SEQUENCE_3: + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4; + break; + case DP_TRAINING_PATTERN_VIDEOIDLE: + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE; + break; + default: + ASSERT(0); + DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", + __func__, pattern); + break; + } + + return dpcd_tr_pattern; +} + static void dpcd_set_training_pattern( struct dc_link *link, - union dpcd_training_pattern dpcd_pattern) + enum dc_dp_training_pattern training_pattern) { + union dpcd_training_pattern dpcd_pattern = { {0} }; + + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = + dc_dp_training_pattern_to_dpcd_training_pattern( + link, training_pattern); + core_link_write_dpcd( link, DP_TRAINING_PATTERN_SET, @@ -132,10 +173,22 @@ static enum dc_dp_training_pattern decide_cr_training_pattern( static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, const struct dc_link_settings *link_settings) { + struct link_encoder *link_enc; enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2; - struct encoder_feature_support *features = &link->link_enc->features; + struct encoder_feature_support *features; struct dpcd_caps *dpcd_caps = &link->dpcd_caps; + /* Access link encoder capability based on whether it is statically + * or dynamically assigned to a link. + */ + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link); + else + link_enc = link->link_enc; + ASSERT(link_enc); + features = &link_enc->features; + if (features->flags.bits.IS_TPS3_CAPABLE) highest_tp = DP_TRAINING_PATTERN_SEQUENCE_3; @@ -227,37 +280,6 @@ static void dpcd_set_link_settings( } } -static enum dpcd_training_patterns - dc_dp_training_pattern_to_dpcd_training_pattern( - struct dc_link *link, - enum dc_dp_training_pattern pattern) -{ - enum dpcd_training_patterns dpcd_tr_pattern = - DPCD_TRAINING_PATTERN_VIDEOIDLE; - - switch (pattern) { - case DP_TRAINING_PATTERN_SEQUENCE_1: - dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1; - break; - case DP_TRAINING_PATTERN_SEQUENCE_2: - dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2; - break; - case DP_TRAINING_PATTERN_SEQUENCE_3: - dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3; - break; - case DP_TRAINING_PATTERN_SEQUENCE_4: - dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4; - break; - default: - ASSERT(0); - DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", - __func__, pattern); - break; - } - - return dpcd_tr_pattern; -} - static uint8_t dc_dp_initialize_scrambling_data_symbols( struct dc_link *link, enum dc_dp_training_pattern pattern) @@ -420,20 +442,30 @@ static bool is_cr_done(enum dc_lane_count ln_count, } static bool is_ch_eq_done(enum dc_lane_count ln_count, - union lane_status *dpcd_lane_status, - union lane_align_status_updated *lane_status_updated) + union lane_status *dpcd_lane_status) { + bool done = true; uint32_t lane; - if (!lane_status_updated->bits.INTERLANE_ALIGN_DONE) - return false; - else { - for (lane = 0; lane < (uint32_t)(ln_count); lane++) { - if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0 || - !dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0) - return false; - } - } - return true; + for (lane = 0; lane < (uint32_t)(ln_count); lane++) + if (!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0) + done = false; + return done; +} + +static bool is_symbol_locked(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status) +{ + bool locked = true; + uint32_t lane; + for (lane = 0; lane < (uint32_t)(ln_count); lane++) + if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0) + locked = false; + return locked; +} + +static inline bool is_interlane_aligned(union lane_align_status_updated align_status) +{ + return align_status.bits.INTERLANE_ALIGN_DONE == 1; } static void update_drive_settings( @@ -835,10 +867,9 @@ static bool perform_post_lt_adj_req_sequence( if (!is_cr_done(lane_count, dpcd_lane_status)) return false; - if (!is_ch_eq_done( - lane_count, - dpcd_lane_status, - &dpcd_lane_status_updated)) + if (!is_ch_eq_done(lane_count, dpcd_lane_status) || + !is_symbol_locked(lane_count, dpcd_lane_status) || + !is_interlane_aligned(dpcd_lane_status_updated)) return false; for (lane = 0; lane < (uint32_t)(lane_count); lane++) { @@ -992,9 +1023,9 @@ static enum link_training_result perform_channel_equalization_sequence( return LINK_TRAINING_EQ_FAIL_CR; /* 6. check CHEQ done*/ - if (is_ch_eq_done(lane_count, - dpcd_lane_status, - &dpcd_lane_status_updated)) + if (is_ch_eq_done(lane_count, dpcd_lane_status) && + is_symbol_locked(lane_count, dpcd_lane_status) && + is_interlane_aligned(dpcd_lane_status_updated)) return LINK_TRAINING_SUCCESS; /* 7. update VS/PE/PC2 in lt_settings*/ @@ -1126,7 +1157,7 @@ static enum link_training_result perform_clock_recovery_sequence( return get_cr_failure(lane_count, dpcd_lane_status); } -static inline enum link_training_result perform_link_training_int( +static inline enum link_training_result dp_transition_to_video_idle( struct dc_link *link, struct link_training_settings *lt_settings, enum link_training_result status) @@ -1162,7 +1193,7 @@ static inline enum link_training_result perform_link_training_int( return status; } -static enum link_training_result check_link_loss_status( +enum link_training_result dp_check_link_loss_status( struct dc_link *link, const struct link_training_settings *link_training_setting) { @@ -1200,7 +1231,7 @@ static enum link_training_result check_link_loss_status( return status; } -static void initialize_training_settings( +static inline void decide_8b_10b_training_settings( struct dc_link *link, const struct dc_link_settings *link_setting, const struct dc_link_training_overrides *overrides, @@ -1244,6 +1275,8 @@ static void initialize_training_settings( else lt_settings->link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ; + lt_settings->lttpr_mode = link->lttpr_mode; + /* Initialize lane settings overrides */ if (overrides->voltage_swing != NULL) lt_settings->voltage_swing = overrides->voltage_swing; @@ -1296,7 +1329,18 @@ static void initialize_training_settings( lt_settings->enhanced_framing = 1; } -static uint8_t convert_to_count(uint8_t lttpr_repeater_count) +static void decide_training_settings( + struct dc_link *link, + const struct dc_link_settings *link_settings, + const struct dc_link_training_overrides *overrides, + struct link_training_settings *lt_settings) +{ + if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) + decide_8b_10b_training_settings(link, link_settings, overrides, lt_settings); +} + + +uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count) { switch (lttpr_repeater_count) { case 0x80: // 1 lttpr repeater @@ -1325,13 +1369,16 @@ static void configure_lttpr_mode_transparent(struct dc_link *link) { uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); core_link_write_dpcd(link, DP_PHY_REPEATER_MODE, (uint8_t *)&repeater_mode, sizeof(repeater_mode)); } -static void configure_lttpr_mode_non_transparent(struct dc_link *link) +static void configure_lttpr_mode_non_transparent( + struct dc_link *link, + const struct link_training_settings *lt_settings) { /* aux timeout is already set to extended */ /* RESET/SET lttpr mode to enable non transparent mode */ @@ -1341,11 +1388,16 @@ static void configure_lttpr_mode_non_transparent(struct dc_link *link) enum dc_status result = DC_ERROR_UNEXPECTED; uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; - DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); - result = core_link_write_dpcd(link, - DP_PHY_REPEATER_MODE, - (uint8_t *)&repeater_mode, - sizeof(repeater_mode)); + enum dp_link_encoding encoding = dp_get_link_encoding_format(<_settings->link_settings); + + if (encoding == DP_8b_10b_ENCODING) { + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); + result = core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); + + } if (result == DC_OK) { link->dpcd_caps.lttpr_caps.mode = repeater_mode; @@ -1365,16 +1417,18 @@ static void configure_lttpr_mode_non_transparent(struct dc_link *link) link->dpcd_caps.lttpr_caps.mode = repeater_mode; } - repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); - for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { - aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + - ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); - core_link_read_dpcd( - link, - aux_interval_address, - (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1], - sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1])); - link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F; + if (encoding == DP_8b_10b_ENCODING) { + repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { + aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); + core_link_read_dpcd( + link, + aux_interval_address, + (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1], + sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1])); + link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F; + } } } } @@ -1510,7 +1564,7 @@ bool dc_link_dp_perform_link_training_skip_aux( { struct link_training_settings lt_settings; - initialize_training_settings( + decide_training_settings( link, link_setting, &link->preferred_training_settings, @@ -1555,13 +1609,12 @@ enum link_training_result dc_link_dp_perform_link_training( { enum link_training_result status = LINK_TRAINING_SUCCESS; struct link_training_settings lt_settings; - union dpcd_training_pattern dpcd_pattern = { { 0 } }; bool fec_enable; uint8_t repeater_cnt; uint8_t repeater_id; - initialize_training_settings( + decide_training_settings( link, link_setting, &link->preferred_training_settings, @@ -1569,7 +1622,7 @@ enum link_training_result dc_link_dp_perform_link_training( /* Configure lttpr mode */ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) - configure_lttpr_mode_non_transparent(link); + configure_lttpr_mode_non_transparent(link, <_settings); else if (link->lttpr_mode == LTTPR_MODE_TRANSPARENT) configure_lttpr_mode_transparent(link); @@ -1591,7 +1644,7 @@ enum link_training_result dc_link_dp_perform_link_training( /* 2. perform link training (set link training done * to false is done as well) */ - repeater_cnt = convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); repeater_id--) { @@ -1621,10 +1674,9 @@ enum link_training_result dc_link_dp_perform_link_training( } /* 3. set training not in progress*/ - dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE; - dpcd_set_training_pattern(link, dpcd_pattern); + dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) { - status = perform_link_training_int(link, + status = dp_transition_to_video_idle(link, <_settings, status); } @@ -1634,7 +1686,7 @@ enum link_training_result dc_link_dp_perform_link_training( */ if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) { msleep(5); - status = check_link_loss_status(link, <_settings); + status = dp_check_link_loss_status(link, <_settings); } /* 6. print status message*/ @@ -1687,18 +1739,31 @@ bool perform_link_training_with_retries( bool skip_video_pattern, int attempts, struct pipe_ctx *pipe_ctx, - enum signal_type signal) + enum signal_type signal, + bool do_fallback) { uint8_t j; uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; enum dp_panel_mode panel_mode; + struct link_encoder *link_enc; + enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; + struct dc_link_settings currnet_setting = *link_setting; + + /* Dynamically assigned link encoders associated with stream rather than + * link. + */ + if (link->dc->res_pool->funcs->link_encs_assign) + link_enc = stream->link_enc; + else + link_enc = link->link_enc; + ASSERT(link_enc); /* We need to do this before the link training to ensure the idle pattern in SST * mode will be sent right after the link training */ - link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, + link_enc->funcs->connect_dig_be_to_fe(link_enc, pipe_ctx->stream_res.stream_enc->id, true); for (j = 0; j < attempts; ++j) { @@ -1710,7 +1775,7 @@ bool perform_link_training_with_retries( link, signal, pipe_ctx->clock_source->id, - link_setting); + &currnet_setting); if (stream->sink_patches.dppowerup_delay > 0) { int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; @@ -1725,14 +1790,12 @@ bool perform_link_training_with_retries( panel_mode != DP_PANEL_MODE_DEFAULT); if (link->aux_access_disabled) { - dc_link_dp_perform_link_training_skip_aux(link, link_setting); + dc_link_dp_perform_link_training_skip_aux(link, &currnet_setting); return true; } else { - enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; - status = dc_link_dp_perform_link_training( link, - link_setting, + &currnet_setting, skip_video_pattern); if (status == LINK_TRAINING_SUCCESS) return true; @@ -1740,7 +1803,7 @@ bool perform_link_training_with_retries( /* latest link training still fail, skip delay and keep PHY on */ - if (j == (attempts - 1)) + if (j == (attempts - 1) && link->ep_type == DISPLAY_ENDPOINT_PHY) break; DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n", @@ -1748,6 +1811,19 @@ bool perform_link_training_with_retries( dp_disable_link_phy(link, signal); + /* Abort link training if failure due to sink being unplugged. */ + if (status == LINK_TRAINING_ABORT) + break; + else if (do_fallback) { + decide_fallback_link_setting(*link_setting, &currnet_setting, status); + /* Fail link training if reduced link bandwidth no longer meets + * stream requirements. + */ + if (dc_bandwidth_in_kbps_from_timing(&stream->timing) < + dc_link_bandwidth_kbps(link, &currnet_setting)) + break; + } + msleep(delay_between_attempts); delay_between_attempts += LINK_TRAINING_RETRY_DELAY; @@ -1823,7 +1899,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt( enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL; bool fec_enable = false; - initialize_training_settings( + decide_training_settings( link, link_settings, lt_overrides, @@ -1893,6 +1969,24 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) return true; } +bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) +{ + if (!max_link_enc_cap) { + DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__); + return false; + } + + if (link->link_enc->funcs->get_max_link_cap) { + link->link_enc->funcs->get_max_link_cap(link->link_enc, max_link_enc_cap); + return true; + } + + DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__); + max_link_enc_cap->lane_count = 1; + max_link_enc_cap->link_rate = 6; + return false; +} + static struct dc_link_settings get_max_link_cap(struct dc_link *link) { struct dc_link_settings max_link_cap = {0}; @@ -2411,6 +2505,12 @@ bool dp_validate_mode_timing( const struct dc_link_settings *link_setting; + /* According to spec, VSC SDP should be used if pixel format is YCbCr420 */ + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && + !link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && + dal_graphics_object_id_get_connector_id(link->link_id) != CONNECTOR_ID_VIRTUAL) + return false; + /*always DP fail safe mode*/ if ((timing->pix_clk_100hz / 10) == (uint32_t) 25175 && timing->h_addressable == (uint32_t) 640 && @@ -2495,7 +2595,11 @@ bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *lin struct dc_link_settings current_link_setting; uint32_t link_bw; - if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14 || + /* + * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. + * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" + */ + if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || link->dpcd_caps.edp_supported_link_rates_count == 0) { *link_setting = link->verified_link_cap; return true; @@ -2593,13 +2697,11 @@ static bool allow_hpd_rx_irq(const struct dc_link *link) /* * Don't handle RX IRQ unless one of following is met: * 1) The link is established (cur_link_settings != unknown) - * 2) We kicked off MST detection - * 3) We know we're dealing with an active dongle + * 2) We know we're dealing with a branch device, SST or MST */ if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || - (link->type == dc_connection_mst_branch) || - is_dp_active_dongle(link)) + is_dp_branch_device(link)) return true; return false; @@ -2697,9 +2799,10 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) union phy_test_pattern dpcd_test_pattern; union lane_adjust dpcd_lane_adjustment[2]; unsigned char dpcd_post_cursor_2_adjustment = 0; - unsigned char test_80_bit_pattern[ + unsigned char test_pattern_buffer[ (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0}; + unsigned int test_pattern_size = 0; enum dp_test_pattern test_pattern; struct dc_link_training_settings link_settings; union lane_adjust dpcd_lane_adjust; @@ -2769,12 +2872,15 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) break; } - if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) + if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) { + test_pattern_size = (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - + DP_TEST_80BIT_CUSTOM_PATTERN_7_0) + 1; core_link_read_dpcd( link, DP_TEST_80BIT_CUSTOM_PATTERN_7_0, - test_80_bit_pattern, - sizeof(test_80_bit_pattern)); + test_pattern_buffer, + test_pattern_size); + } /* prepare link training settings */ link_settings.link = link->cur_link_settings; @@ -2812,9 +2918,8 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) test_pattern, DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED, &link_training_settings, - test_80_bit_pattern, - (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - - DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1); + test_pattern_buffer, + test_pattern_size); } static void dp_test_send_link_test_pattern(struct dc_link *link) @@ -2899,6 +3004,22 @@ static void dp_test_send_link_test_pattern(struct dc_link *link) break; } + switch (dpcd_test_params.bits.CLR_FORMAT) { + case 0: + pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB; + break; + case 1: + pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR422; + break; + case 2: + pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR444; + break; + default: + pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB; + break; + } + + if (requestColorDepth != COLOR_DEPTH_UNDEFINED && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) { DC_LOG_DEBUG("%s: original bpc %d, changing to %d\n", @@ -2906,9 +3027,10 @@ static void dp_test_send_link_test_pattern(struct dc_link *link) pipe_ctx->stream->timing.display_color_depth, requestColorDepth); pipe_ctx->stream->timing.display_color_depth = requestColorDepth; - dp_update_dsc_config(pipe_ctx); } + dp_update_dsc_config(pipe_ctx); + dc_link_dp_set_test_pattern( link, test_pattern, @@ -3164,7 +3286,7 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd *out_link_loss = true; } - if (link->type == dc_connection_active_dongle && + if (link->type == dc_connection_sst_branch && hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT != link->dpcd_sink_count) status = true; @@ -3215,6 +3337,12 @@ bool is_mst_supported(struct dc_link *link) bool is_dp_active_dongle(const struct dc_link *link) { + return (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_VGA_CONVERTER) && + (link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_CONVERTER); +} + +bool is_dp_branch_device(const struct dc_link *link) +{ return link->dpcd_caps.is_branch_dev; } @@ -3575,7 +3703,9 @@ static bool retrieve_link_cap(struct dc_link *link) lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ is_lttpr_present = (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0 && + link->dpcd_caps.lttpr_caps.phy_repeater_cnt < 0xff && link->dpcd_caps.lttpr_caps.max_lane_count > 0 && link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && link->dpcd_caps.lttpr_caps.revision.raw >= 0x14); @@ -3892,7 +4022,11 @@ void detect_edp_sink_caps(struct dc_link *link) link->dpcd_caps.edp_supported_link_rates_count = 0; memset(supported_link_rates, 0, sizeof(supported_link_rates)); - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && + /* + * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. + * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" + */ + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && (link->dc->debug.optimize_edp_link_rate || link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) { // Read DPCD 00010h - 0001Fh 16 bytes at one shot @@ -4766,4 +4900,11 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin return false; } +enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings) +{ + if ((link_settings->link_rate >= LINK_RATE_LOW) && + (link_settings->link_rate <= LINK_RATE_HIGH3)) + return DP_8b_10b_ENCODING; + return DP_UNKNOWN_ENCODING; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index 1361b87d86d7..1a89d565c92e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -112,8 +112,8 @@ static void update_link_enc_assignment( /* Return first available DIG link encoder. */ static enum engine_id find_first_avail_link_enc( - struct dc_context *ctx, - struct dc_state *state) + const struct dc_context *ctx, + const struct dc_state *state) { enum engine_id eng_id = ENGINE_ID_UNKNOWN; int i; @@ -270,7 +270,7 @@ struct dc_link *link_enc_cfg_get_link_using_link_enc( struct link_encoder *link_enc_cfg_get_link_enc_used_by_link( struct dc_state *state, - struct dc_link *link) + const struct dc_link *link) { struct link_encoder *link_enc = NULL; struct display_endpoint_id ep_id; @@ -296,8 +296,20 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_link( if (stream_idx != -1) link_enc = state->streams[stream_idx]->link_enc; - else - dm_output_to_console("%s: No link encoder used by link(%d).\n", __func__, link->link_index); + + return link_enc; +} + +struct link_encoder *link_enc_cfg_get_next_avail_link_enc( + const struct dc *dc, + const struct dc_state *state) +{ + struct link_encoder *link_enc = NULL; + enum engine_id eng_id = ENGINE_ID_UNKNOWN; + + eng_id = find_first_avail_link_enc(dc->ctx, state); + if (eng_id != ENGINE_ID_UNKNOWN) + link_enc = dc->res_pool->link_encoders[eng_id - ENGINE_ID_DIGA]; return link_enc; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index b426f878fb99..13c5c4a34a58 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -384,7 +384,8 @@ void dp_retrain_link_dp_test(struct dc_link *link, skip_video_pattern, LINK_TRAINING_ATTEMPTS, &pipes[i], - SIGNAL_TYPE_DISPLAY_PORT); + SIGNAL_TYPE_DISPLAY_PORT, + false); link->dc->hwss.enable_stream(&pipes[i]); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 8cb937c046aa..8a158a192ec0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -57,6 +57,7 @@ #include "dcn30/dcn30_resource.h" #include "dcn301/dcn301_resource.h" #include "dcn302/dcn302_resource.h" +#include "dcn303/dcn303_resource.h" #endif #define DC_LOGGER_INIT(logger) @@ -130,6 +131,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) dc_version = DCN_VERSION_3_0; if (ASICREV_IS_DIMGREY_CAVEFISH_P(asic_id.hw_internal_rev)) dc_version = DCN_VERSION_3_02; + if (ASICREV_IS_BEIGE_GOBY_P(asic_id.hw_internal_rev)) + dc_version = DCN_VERSION_3_03; break; case FAMILY_VGH: @@ -216,6 +219,9 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, case DCN_VERSION_3_02: res_pool = dcn302_create_resource_pool(init_data, dc); break; + case DCN_VERSION_3_03: + res_pool = dcn303_create_resource_pool(init_data, dc); + break; #endif default: break; @@ -1706,12 +1712,6 @@ static bool is_timing_changed(struct dc_stream_state *cur_stream, if (cur_stream == NULL) return true; - /* If sink pointer changed, it means this is a hotplug, we should do - * full hw setting. - */ - if (cur_stream->sink != new_stream->sink) - return true; - /* If output color space is changed, need to reprogram info frames */ if (cur_stream->output_color_space != new_stream->output_color_space) return true; @@ -2679,6 +2679,7 @@ void dc_resource_state_destruct(struct dc_state *context) dc_stream_release(context->streams[i]); context->streams[i] = NULL; } + context->stream_count = 0; } void dc_resource_state_copy_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 100d434f7a03..7da5e7a2e88d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -45,7 +45,7 @@ /* forward declaration */ struct aux_payload; -#define DC_VER "3.2.132" +#define DC_VER "3.2.136" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -600,7 +600,6 @@ struct dc_bounding_box_overrides { int min_dcfclk_mhz; }; -struct dc_state; struct resource_pool; struct dce_hwseq; struct gpu_info_soc_bounding_box_v1_0; @@ -719,7 +718,6 @@ void dc_init_callbacks(struct dc *dc, void dc_deinit_callbacks(struct dc *dc); void dc_destroy(struct dc **dc); -void dc_wait_for_vblank(struct dc *dc, struct dc_stream_state *stream); /******************************************************************************* * Surface Interfaces ******************************************************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 6b72af2b3f4c..c5dc3a947020 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -180,5 +180,5 @@ bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_bu void dc_dmub_trace_event_control(struct dc *dc, bool enable) { - dm_helpers_dmub_outbox0_interrupt_control(dc->ctx, enable); + dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable); } diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index cc6fb838420e..1948cd9427d7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -95,6 +95,12 @@ enum dc_dp_training_pattern { DP_TRAINING_PATTERN_SEQUENCE_2, DP_TRAINING_PATTERN_SEQUENCE_3, DP_TRAINING_PATTERN_SEQUENCE_4, + DP_TRAINING_PATTERN_VIDEOIDLE, +}; + +enum dp_link_encoding { + DP_UNKNOWN_ENCODING = 0, + DP_8b_10b_ENCODING = 1, }; struct dc_link_settings { diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index c51d2d961b7a..16cc76ce3739 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -78,7 +78,8 @@ bool dc_dsc_compute_config( const struct dc_crtc_timing *timing, struct dc_dsc_config *dsc_cfg); -uint32_t dc_dsc_stream_bandwidth_in_kbps(uint32_t pix_clk_100hz, uint32_t bpp_x16); +uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing, + uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp); void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, @@ -88,6 +89,6 @@ void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit); void dc_dsc_policy_set_enable_dsc_when_not_needed(bool enable); -uint32_t dc_dsc_stream_bandwidth_in_kbps(uint32_t pix_clk_100hz, uint32_t bpp_x16); +void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index bcec019efa6f..04957a9efab2 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -739,6 +739,7 @@ struct dc_dsc_config { uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */ bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */ int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */ + bool is_dp; /* indicate if DSC is applied based on DP's capability */ }; struct dc_crtc_timing { uint32_t h_total; diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 054bab45ee17..c871923e7db0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -36,12 +36,6 @@ enum dc_link_fec_state { dc_link_fec_enabled }; -enum lttpr_mode { - LTTPR_MODE_NON_LTTPR, - LTTPR_MODE_TRANSPARENT, - LTTPR_MODE_NON_TRANSPARENT, -}; - struct dc_link_status { bool link_active; struct dpcd_caps *dpcd_caps; @@ -113,6 +107,7 @@ struct dc_link { /* TODO: Rename. Flag an endpoint as having a programmable mapping to a * DIG encoder. */ bool is_dig_mapping_flexible; + bool hpd_status; /* HPD status of link without physical HPD pin. */ bool edp_sink_present; @@ -345,6 +340,8 @@ bool dc_link_dp_set_test_pattern( const unsigned char *p_custom_pattern, unsigned int cust_pattern_size); +bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap); + void dc_link_enable_hpd_filter(struct dc_link *link, bool enable); bool dc_link_is_dp_sink_present(struct dc_link *link); @@ -361,9 +358,6 @@ bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal); void dc_link_set_drive_settings(struct dc *dc, struct link_training_settings *lt_settings, const struct dc_link *link); -void dc_link_perform_link_training(struct dc *dc, - struct dc_link_settings *link_setting, - bool skip_video_pattern); void dc_link_set_preferred_link_settings(struct dc *dc, struct dc_link_settings *link_setting, struct dc_link *link); diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 432754eaf10b..535da8db70b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -404,7 +404,7 @@ enum dc_connection_type { dc_connection_none, dc_connection_single, dc_connection_mst_branch, - dc_connection_active_dongle + dc_connection_sst_branch }; struct dc_csc_adjustments { @@ -909,6 +909,7 @@ struct dsc_dec_dpcd_caps { uint32_t branch_overall_throughput_0_mps; /* In MPs */ uint32_t branch_overall_throughput_1_mps; /* In MPs */ uint32_t branch_max_line_width; + bool is_dp; }; struct dc_golden_table { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index 87d57e81de12..83d97dfe328f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -595,6 +595,25 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, return res; } +int dce_aux_transfer_dmub_raw(struct ddc_service *ddc, + struct aux_payload *payload, + enum aux_return_code_type *operation_result) +{ + struct ddc *ddc_pin = ddc->ddc_pin; + + if (ddc_pin != NULL) { + struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; + /* XXX: Workaround to configure ddc channels for aux transactions */ + if (!acquire(aux_engine, ddc_pin)) { + *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; + return -1; + } + release_engine(aux_engine); + } + + return dm_helper_dmub_aux_transfer_sync(ddc->ctx, ddc->link, payload, operation_result); +} + #define AUX_MAX_RETRIES 7 #define AUX_MAX_DEFER_RETRIES 7 #define AUX_MAX_I2C_DEFER_RETRIES 7 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index 566b1bddd8cc..e69f1899fbf0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -304,6 +304,9 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *cmd, enum aux_return_code_type *operation_result); +int dce_aux_transfer_dmub_raw(struct ddc_service *ddc, + struct aux_payload *payload, + enum aux_return_code_type *operation_result); bool dce_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *cmd); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h index 7fe5a07e2233..3139285bd403 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h @@ -142,6 +142,15 @@ SRII(PIXEL_RATE_CNTL, OTG, 3),\ SRII(PIXEL_RATE_CNTL, OTG, 4) +#define CS_COMMON_REG_LIST_DCN3_03(index, pllid) \ + SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\ + SRII(PHASE, DP_DTO, 0),\ + SRII(PHASE, DP_DTO, 1),\ + SRII(MODULO, DP_DTO, 0),\ + SRII(MODULO, DP_DTO, 1),\ + SRII(PIXEL_RATE_CNTL, OTG, 0),\ + SRII(PIXEL_RATE_CNTL, OTG, 1) + #endif #define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\ CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index fe31abfa6c85..0db1bad7a93c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -480,6 +480,35 @@ SR(AZALIA_AUDIO_DTO), \ SR(AZALIA_CONTROLLER_CLOCK_GATING) +#define HWSEQ_DCN303_REG_LIST() \ + HWSEQ_DCN_REG_LIST(), \ + HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \ + HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 1), \ + SR(MICROSECOND_TIME_BASE_DIV), \ + SR(MILLISECOND_TIME_BASE_DIV), \ + SR(DISPCLK_FREQ_CHANGE_CNTL), \ + SR(RBBMIF_TIMEOUT_DIS), \ + SR(RBBMIF_TIMEOUT_DIS_2), \ + SR(DCHUBBUB_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_CTRL), \ + SR(DPP_TOP0_DPP_CRC_VAL_B_A), \ + SR(DPP_TOP0_DPP_CRC_VAL_R_G), \ + SR(MPC_CRC_CTRL), \ + SR(MPC_CRC_RESULT_GB), \ + SR(MPC_CRC_RESULT_C), \ + SR(MPC_CRC_RESULT_AR), \ + SR(D1VGA_CONTROL), \ + SR(D2VGA_CONTROL), \ + SR(D3VGA_CONTROL), \ + SR(D4VGA_CONTROL), \ + SR(D5VGA_CONTROL), \ + SR(D6VGA_CONTROL), \ + HWSEQ_PIXEL_RATE_REG_LIST_303(OTG), \ + HWSEQ_PHYPLL_REG_LIST_303(OTG), \ + SR(AZALIA_AUDIO_DTO), \ + SR(AZALIA_CONTROLLER_CLOCK_GATING), \ + SR(HPO_TOP_CLOCK_CONTROL) + #define HWSEQ_PIXEL_RATE_REG_LIST_302(blk) \ SRII(PIXEL_RATE_CNTL, blk, 0), \ SRII(PIXEL_RATE_CNTL, blk, 1),\ @@ -494,6 +523,14 @@ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4) +#define HWSEQ_PIXEL_RATE_REG_LIST_303(blk) \ + SRII(PIXEL_RATE_CNTL, blk, 0), \ + SRII(PIXEL_RATE_CNTL, blk, 1) + +#define HWSEQ_PHYPLL_REG_LIST_303(blk) \ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \ + SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1) + struct dce_hwseq_registers { uint32_t DCFE_CLOCK_CONTROL[6]; uint32_t DCFEV_CLOCK_CONTROL; @@ -934,6 +971,12 @@ struct dce_hwseq_registers { HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh) +#define HWSEQ_DCN303_MASK_SH_LIST(mask_sh) \ + HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ + HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \ + HWS_SF(, HPO_TOP_CLOCK_CONTROL, HPO_HDMISTREAMCLK_GATE_DIS, mask_sh) + #define HWSEQ_REG_FIELD_LIST(type) \ type DCFE_CLOCK_ENABLE; \ type DCFEV_CLOCK_ENABLE; \ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c index eb1698d54a48..6939ca2e8212 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c @@ -56,11 +56,19 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc) { union dmub_rb_cmd cmd; uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0; + uint32_t edp_id_count = dc->dc_edp_id_count; + int i; + uint8_t panel_mask = 0; + + for (i = 0; i < edp_id_count; i++) + panel_mask |= 0x01 << i; memset(&cmd, 0, sizeof(cmd)); cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM; cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC; cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm; + cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask; cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data); dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); @@ -135,11 +143,24 @@ static bool dmub_abm_set_level(struct abm *abm, uint32_t level) { union dmub_rb_cmd cmd; struct dc_context *dc = abm->ctx; + struct dc_link *edp_links[MAX_NUM_EDP]; + int i; + int edp_num; + uint8_t panel_mask = 0; + + get_edp_links(dc->dc, edp_links, &edp_num); + + for (i = 0; i < edp_num; i++) { + if (edp_links[i]->link_status.link_active) + panel_mask |= (0x01 << i); + } memset(&cmd, 0, sizeof(cmd)); cmd.abm_set_level.header.type = DMUB_CMD__ABM; cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL; cmd.abm_set_level.abm_set_level_data.level = level; + cmd.abm_set_level.abm_set_level_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask; cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data); dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); @@ -155,6 +176,12 @@ static bool dmub_abm_init_config(struct abm *abm, { union dmub_rb_cmd cmd; struct dc_context *dc = abm->ctx; + uint32_t edp_id_count = dc->dc_edp_id_count; + int i; + uint8_t panel_mask = 0; + + for (i = 0; i < edp_id_count; i++) + panel_mask |= 0x01 << i; // TODO: Optimize by only reading back final 4 bytes dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb); @@ -168,6 +195,9 @@ static bool dmub_abm_init_config(struct abm *abm, cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG; cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr; cmd.abm_init_config.abm_init_config_data.bytes = bytes; + cmd.abm_init_config.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; + cmd.abm_init_config.abm_init_config_data.panel_mask = panel_mask; + cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data); dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index efa86d5c6847..0bd8de4c73a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -496,10 +496,13 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d int vtaps_c = scl_data->taps.v_taps_c; int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert); int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c); - enum lb_memory_config mem_cfg = LB_MEMORY_CONFIG_0; - if (dpp->base.ctx->dc->debug.use_max_lb) - return mem_cfg; + if (dpp->base.ctx->dc->debug.use_max_lb) { + if (scl_data->format == PIXEL_FORMAT_420BPP8 + || scl_data->format == PIXEL_FORMAT_420BPP10) + return LB_MEMORY_CONFIG_3; + return LB_MEMORY_CONFIG_0; + } dpp->base.caps->dscl_calc_lb_num_partitions( scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c); @@ -650,33 +653,50 @@ static void dpp1_dscl_set_manual_ratio_init( } } - - -static void dpp1_dscl_set_recout( - struct dcn10_dpp *dpp, const struct rect *recout) +/** + * dpp1_dscl_set_recout - Set the first pixel of RECOUT in the OTG active area + * + * @dpp: DPP data struct + * @recount: Rectangle information + * + * This function sets the MPC RECOUT_START and RECOUT_SIZE registers based on + * the values specified in the recount parameter. + * + * Note: This function only have effect if AutoCal is disabled. + */ +static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, + const struct rect *recout) { int visual_confirm_on = 0; if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) visual_confirm_on = 1; REG_SET_2(RECOUT_START, 0, - /* First pixel of RECOUT */ - RECOUT_START_X, recout->x, - /* First line of RECOUT */ - RECOUT_START_Y, recout->y); + /* First pixel of RECOUT in the active OTG area */ + RECOUT_START_X, recout->x, + /* First line of RECOUT in the active OTG area */ + RECOUT_START_Y, recout->y); REG_SET_2(RECOUT_SIZE, 0, - /* Number of RECOUT horizontal pixels */ - RECOUT_WIDTH, recout->width, - /* Number of RECOUT vertical lines */ - RECOUT_HEIGHT, recout->height + /* Number of RECOUT horizontal pixels */ + RECOUT_WIDTH, recout->width, + /* Number of RECOUT vertical lines */ + RECOUT_HEIGHT, recout->height - visual_confirm_on * 2 * (dpp->base.inst + 1)); } -/* Main function to program scaler and line buffer in manual scaling mode */ -void dpp1_dscl_set_scaler_manual_scale( - struct dpp *dpp_base, - const struct scaler_data *scl_data) +/** + * dpp1_dscl_set_scaler_manual_scale - Manually program scaler and line buffer + * + * @dpp_base: High level DPP struct + * @scl_data: scalaer_data info + * + * This is the primary function to program scaler and line buffer in manual + * scaling mode. To execute the required operations for manual scale, we need + * to disable AutoCal first. + */ +void dpp1_dscl_set_scaler_manual_scale(struct dpp *dpp_base, + const struct scaler_data *scl_data) { enum lb_memory_config lb_config; struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 714c71a5fbde..e39e8a2f715d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -1226,6 +1226,14 @@ void hubp1_cursor_set_position( /* TODO Handle surface pixel formats other than 4:4:4 */ } +/** + * hubp1_clk_cntl - Disable or enable clocks for DCHUBP + * + * @hubp: hubp struct reference. + * @enable: Set true for enabling gate clock. + * + * When enabling/disabling DCHUBP clock, we affect dcfclk/dppclk. + */ void hubp1_clk_cntl(struct hubp *hubp, bool enable) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); @@ -1257,6 +1265,11 @@ void hubp1_soft_reset(struct hubp *hubp, bool reset) REG_UPDATE(DCHUBP_CNTL, HUBP_DISABLE, reset ? 1 : 0); } +/** + * hubp1_set_flip_int - Enable surface flip interrupt + * + * @hubp: hubp struct reference. + */ void hubp1_set_flip_int(struct hubp *hubp) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 7c939c0a977b..81803463ca9b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -72,6 +72,9 @@ #define GAMMA_HW_POINTS_NUM 256 +#define PGFSM_POWER_ON 0 +#define PGFSM_POWER_OFF 2 + void print_microsec(struct dc_context *dc_ctx, struct dc_log_buffer_ctx *log_ctx, uint32_t ref_cycle) @@ -536,13 +539,22 @@ void dcn10_disable_vga( REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1); } +/** + * dcn10_dpp_pg_control - DPP power gate control. + * + * @hws: dce_hwseq reference. + * @dpp_inst: DPP instance reference. + * @power_on: true if we want to enable power gate, false otherwise. + * + * Enable or disable power gate in the specific DPP instance. + */ void dcn10_dpp_pg_control( struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on) { uint32_t power_gate = power_on ? 0 : 1; - uint32_t pwr_status = power_on ? 0 : 2; + uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF; if (hws->ctx->dc->debug.disable_dpp_power_gate) return; @@ -588,13 +600,22 @@ void dcn10_dpp_pg_control( } } +/** + * dcn10_hubp_pg_control - HUBP power gate control. + * + * @hws: dce_hwseq reference. + * @hubp_inst: DPP instance reference. + * @power_on: true if we want to enable power gate, false otherwise. + * + * Enable or disable power gate in the specific HUBP instance. + */ void dcn10_hubp_pg_control( struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) { uint32_t power_gate = power_on ? 0 : 1; - uint32_t pwr_status = power_on ? 0 : 2; + uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF; if (hws->ctx->dc->debug.disable_hubp_power_gate) return; @@ -1078,6 +1099,19 @@ void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) hws->funcs.verify_allow_pstate_change_high(dc); } +/** + * dcn10_plane_atomic_power_down - Power down plane components. + * + * @dc: dc struct reference. used for grab hwseq. + * @dpp: dpp struct reference. + * @hubp: hubp struct reference. + * + * Keep in mind that this operation requires a power gate configuration; + * however, requests for switch power gate are precisely controlled to avoid + * problems. For this reason, power gate request is usually disabled. This + * function first needs to enable the power gate request before disabling DPP + * and HUBP. Finally, it disables the power gate request again. + */ void dcn10_plane_atomic_power_down(struct dc *dc, struct dpp *dpp, struct hubp *hubp) @@ -2165,129 +2199,6 @@ void dcn10_enable_per_frame_crtc_position_reset( DC_SYNC_INFO("Multi-display sync is complete\n"); } -/*static void print_rq_dlg_ttu( - struct dc *dc, - struct pipe_ctx *pipe_ctx) -{ - DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, - "\n============== DML TTU Output parameters [%d] ==============\n" - "qos_level_low_wm: %d, \n" - "qos_level_high_wm: %d, \n" - "min_ttu_vblank: %d, \n" - "qos_level_flip: %d, \n" - "refcyc_per_req_delivery_l: %d, \n" - "qos_level_fixed_l: %d, \n" - "qos_ramp_disable_l: %d, \n" - "refcyc_per_req_delivery_pre_l: %d, \n" - "refcyc_per_req_delivery_c: %d, \n" - "qos_level_fixed_c: %d, \n" - "qos_ramp_disable_c: %d, \n" - "refcyc_per_req_delivery_pre_c: %d\n" - "=============================================================\n", - pipe_ctx->pipe_idx, - pipe_ctx->ttu_regs.qos_level_low_wm, - pipe_ctx->ttu_regs.qos_level_high_wm, - pipe_ctx->ttu_regs.min_ttu_vblank, - pipe_ctx->ttu_regs.qos_level_flip, - pipe_ctx->ttu_regs.refcyc_per_req_delivery_l, - pipe_ctx->ttu_regs.qos_level_fixed_l, - pipe_ctx->ttu_regs.qos_ramp_disable_l, - pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l, - pipe_ctx->ttu_regs.refcyc_per_req_delivery_c, - pipe_ctx->ttu_regs.qos_level_fixed_c, - pipe_ctx->ttu_regs.qos_ramp_disable_c, - pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c - ); - - DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, - "\n============== DML DLG Output parameters [%d] ==============\n" - "refcyc_h_blank_end: %d, \n" - "dlg_vblank_end: %d, \n" - "min_dst_y_next_start: %d, \n" - "refcyc_per_htotal: %d, \n" - "refcyc_x_after_scaler: %d, \n" - "dst_y_after_scaler: %d, \n" - "dst_y_prefetch: %d, \n" - "dst_y_per_vm_vblank: %d, \n" - "dst_y_per_row_vblank: %d, \n" - "ref_freq_to_pix_freq: %d, \n" - "vratio_prefetch: %d, \n" - "refcyc_per_pte_group_vblank_l: %d, \n" - "refcyc_per_meta_chunk_vblank_l: %d, \n" - "dst_y_per_pte_row_nom_l: %d, \n" - "refcyc_per_pte_group_nom_l: %d, \n", - pipe_ctx->pipe_idx, - pipe_ctx->dlg_regs.refcyc_h_blank_end, - pipe_ctx->dlg_regs.dlg_vblank_end, - pipe_ctx->dlg_regs.min_dst_y_next_start, - pipe_ctx->dlg_regs.refcyc_per_htotal, - pipe_ctx->dlg_regs.refcyc_x_after_scaler, - pipe_ctx->dlg_regs.dst_y_after_scaler, - pipe_ctx->dlg_regs.dst_y_prefetch, - pipe_ctx->dlg_regs.dst_y_per_vm_vblank, - pipe_ctx->dlg_regs.dst_y_per_row_vblank, - pipe_ctx->dlg_regs.ref_freq_to_pix_freq, - pipe_ctx->dlg_regs.vratio_prefetch, - pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l, - pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l, - pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l, - pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l - ); - - DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, - "\ndst_y_per_meta_row_nom_l: %d, \n" - "refcyc_per_meta_chunk_nom_l: %d, \n" - "refcyc_per_line_delivery_pre_l: %d, \n" - "refcyc_per_line_delivery_l: %d, \n" - "vratio_prefetch_c: %d, \n" - "refcyc_per_pte_group_vblank_c: %d, \n" - "refcyc_per_meta_chunk_vblank_c: %d, \n" - "dst_y_per_pte_row_nom_c: %d, \n" - "refcyc_per_pte_group_nom_c: %d, \n" - "dst_y_per_meta_row_nom_c: %d, \n" - "refcyc_per_meta_chunk_nom_c: %d, \n" - "refcyc_per_line_delivery_pre_c: %d, \n" - "refcyc_per_line_delivery_c: %d \n" - "========================================================\n", - pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l, - pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l, - pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l, - pipe_ctx->dlg_regs.refcyc_per_line_delivery_l, - pipe_ctx->dlg_regs.vratio_prefetch_c, - pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c, - pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c, - pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c, - pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c, - pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c, - pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c, - pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c, - pipe_ctx->dlg_regs.refcyc_per_line_delivery_c - ); - - DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger, - "\n============== DML RQ Output parameters [%d] ==============\n" - "chunk_size: %d \n" - "min_chunk_size: %d \n" - "meta_chunk_size: %d \n" - "min_meta_chunk_size: %d \n" - "dpte_group_size: %d \n" - "mpte_group_size: %d \n" - "swath_height: %d \n" - "pte_row_height_linear: %d \n" - "========================================================\n", - pipe_ctx->pipe_idx, - pipe_ctx->rq_regs.rq_regs_l.chunk_size, - pipe_ctx->rq_regs.rq_regs_l.min_chunk_size, - pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size, - pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size, - pipe_ctx->rq_regs.rq_regs_l.dpte_group_size, - pipe_ctx->rq_regs.rq_regs_l.mpte_group_size, - pipe_ctx->rq_regs.rq_regs_l.swath_height, - pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear - ); -} -*/ - static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1, struct vm_system_aperture_param *apt, struct dce_hwseq *hws) @@ -2395,43 +2306,6 @@ static void dcn10_enable_plane( pipe_ctx->stream_res.opp, true); -/* TODO: enable/disable in dm as per update type. - if (plane_state) { - DC_LOG_DC(dc->ctx->logger, - "Pipe:%d 0x%x: addr hi:0x%x, " - "addr low:0x%x, " - "src: %d, %d, %d," - " %d; dst: %d, %d, %d, %d;\n", - pipe_ctx->pipe_idx, - plane_state, - plane_state->address.grph.addr.high_part, - plane_state->address.grph.addr.low_part, - plane_state->src_rect.x, - plane_state->src_rect.y, - plane_state->src_rect.width, - plane_state->src_rect.height, - plane_state->dst_rect.x, - plane_state->dst_rect.y, - plane_state->dst_rect.width, - plane_state->dst_rect.height); - - DC_LOG_DC(dc->ctx->logger, - "Pipe %d: width, height, x, y format:%d\n" - "viewport:%d, %d, %d, %d\n" - "recout: %d, %d, %d, %d\n", - pipe_ctx->pipe_idx, - plane_state->format, - pipe_ctx->plane_res.scl_data.viewport.width, - pipe_ctx->plane_res.scl_data.viewport.height, - pipe_ctx->plane_res.scl_data.viewport.x, - pipe_ctx->plane_res.scl_data.viewport.y, - pipe_ctx->plane_res.scl_data.recout.width, - pipe_ctx->plane_res.scl_data.recout.height, - pipe_ctx->plane_res.scl_data.recout.x, - pipe_ctx->plane_res.scl_data.recout.y); - print_rq_dlg_ttu(dc, pipe_ctx); - } -*/ if (dc->config.gpu_vm_support) dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp); @@ -2628,9 +2502,25 @@ static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); } -void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id) { struct dce_hwseq *hws = dc->hwseq; + struct mpc *mpc = dc->res_pool->mpc; + + if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) + hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, color); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) + hws->funcs.get_surface_visual_confirm_color(pipe_ctx, color); + else + color_space_to_black_color( + dc, pipe_ctx->stream->output_color_space, color); + + if (mpc->funcs->set_bg_color) + mpc->funcs->set_bg_color(mpc, color, mpcc_id); +} + +void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) +{ struct hubp *hubp = pipe_ctx->plane_res.hubp; struct mpcc_blnd_cfg blnd_cfg = {{0}}; bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; @@ -2639,18 +2529,6 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) struct mpc *mpc = dc->res_pool->mpc; struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); - if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { - hws->funcs.get_hdr_visual_confirm_color( - pipe_ctx, &blnd_cfg.black_color); - } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { - hws->funcs.get_surface_visual_confirm_color( - pipe_ctx, &blnd_cfg.black_color); - } else { - color_space_to_black_color( - dc, pipe_ctx->stream->output_color_space, - &blnd_cfg.black_color); - } - if (per_pixel_alpha) blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; else @@ -2682,6 +2560,8 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) */ mpcc_id = hubp->inst; + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + /* If there is no full update, don't need to touch MPC tree*/ if (!pipe_ctx->plane_state->update_flags.bits.full_update) { mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); @@ -2956,35 +2836,6 @@ void dcn10_program_pipe( { struct dce_hwseq *hws = dc->hwseq; - if (pipe_ctx->plane_state->update_flags.bits.full_update) - dcn10_enable_plane(dc, pipe_ctx, context); - - dcn10_update_dchubp_dpp(dc, pipe_ctx, context); - - hws->funcs.set_hdr_multiplier(pipe_ctx); - - if (pipe_ctx->plane_state->update_flags.bits.full_update || - pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || - pipe_ctx->plane_state->update_flags.bits.gamma_change) - hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); - - /* dcn10_translate_regamma_to_hw_format takes 750us to finish - * only do gamma programming for full update. - * TODO: This can be further optimized/cleaned up - * Always call this for now since it does memcmp inside before - * doing heavy calculation and programming - */ - if (pipe_ctx->plane_state->update_flags.bits.full_update) - hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); -} - -static void dcn10_program_all_pipe_in_tree( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context) -{ - struct dce_hwseq *hws = dc->hwseq; - if (pipe_ctx->top_pipe == NULL) { bool blank = !is_pipe_tree_visible(pipe_ctx); @@ -3004,35 +2855,26 @@ static void dcn10_program_all_pipe_in_tree( hws->funcs.blank_pixel_data(dc, pipe_ctx, blank); } - if (pipe_ctx->plane_state != NULL) - hws->funcs.program_pipe(dc, pipe_ctx, context); - - if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) - dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); -} - -static struct pipe_ctx *dcn10_find_top_pipe_for_stream( - struct dc *dc, - struct dc_state *context, - const struct dc_stream_state *stream) -{ - int i; + if (pipe_ctx->plane_state->update_flags.bits.full_update) + dcn10_enable_plane(dc, pipe_ctx, context); - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe_ctx = - &dc->current_state->res_ctx.pipe_ctx[i]; + dcn10_update_dchubp_dpp(dc, pipe_ctx, context); - if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) - continue; + hws->funcs.set_hdr_multiplier(pipe_ctx); - if (pipe_ctx->stream != stream) - continue; + if (pipe_ctx->plane_state->update_flags.bits.full_update || + pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || + pipe_ctx->plane_state->update_flags.bits.gamma_change) + hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state); - if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe) - return pipe_ctx; - } - return NULL; + /* dcn10_translate_regamma_to_hw_format takes 750us to finish + * only do gamma programming for full update. + * TODO: This can be further optimized/cleaned up + * Always call this for now since it does memcmp inside before + * doing heavy calculation and programming + */ + if (pipe_ctx->plane_state->update_flags.bits.full_update) + hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream); } void dcn10_wait_for_pending_cleared(struct dc *dc, @@ -3067,91 +2909,6 @@ void dcn10_wait_for_pending_cleared(struct dc *dc, } } -void dcn10_apply_ctx_for_surface( - struct dc *dc, - const struct dc_stream_state *stream, - int num_planes, - struct dc_state *context) -{ - struct dce_hwseq *hws = dc->hwseq; - int i; - struct timing_generator *tg; - uint32_t underflow_check_delay_us; - bool interdependent_update = false; - struct pipe_ctx *top_pipe_to_program = - dcn10_find_top_pipe_for_stream(dc, context, stream); - DC_LOGGER_INIT(dc->ctx->logger); - - // Clear pipe_ctx flag - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - pipe_ctx->update_flags.raw = 0; - } - - if (!top_pipe_to_program) - return; - - tg = top_pipe_to_program->stream_res.tg; - - interdependent_update = top_pipe_to_program->plane_state && - top_pipe_to_program->plane_state->update_flags.bits.full_update; - - underflow_check_delay_us = dc->debug.underflow_assert_delay_us; - - if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur) - ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program)); - - if (underflow_check_delay_us != 0xFFFFFFFF) - udelay(underflow_check_delay_us); - - if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur) - ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program)); - - if (num_planes == 0) { - /* OTG blank before remove all front end */ - hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true); - } - - /* Disconnect unused mpcc */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe_ctx = - &dc->current_state->res_ctx.pipe_ctx[i]; - - if ((!pipe_ctx->plane_state || - pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) && - old_pipe_ctx->plane_state && - old_pipe_ctx->stream_res.tg == tg) { - - hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx); - pipe_ctx->update_flags.bits.disable = 1; - - DC_LOG_DC("Reset mpcc for pipe %d\n", - old_pipe_ctx->pipe_idx); - } - } - - if (num_planes > 0) - dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context); - - /* Program secondary blending tree and writeback pipes */ - if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree)) - hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context); - if (interdependent_update) - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - /* Skip inactive pipes and ones already updated */ - if (!pipe_ctx->stream || pipe_ctx->stream == stream || - !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg)) - continue; - - pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( - pipe_ctx->plane_res.hubp, - &pipe_ctx->dlg_regs, - &pipe_ctx->ttu_regs); - } -} - void dcn10_post_unlock_program_front_end( struct dc *dc, struct dc_state *context) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 37bec421fde8..478180b96d8d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -80,11 +80,6 @@ void dcn10_lock_all_pipes( struct dc *dc, struct dc_state *context, bool lock); -void dcn10_apply_ctx_for_surface( - struct dc *dc, - const struct dc_stream_state *stream, - int num_planes, - struct dc_state *context); void dcn10_post_unlock_program_front_end( struct dc *dc, struct dc_state *context); @@ -211,4 +206,10 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc); void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits); +void dcn10_update_visual_confirm_color( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct tg_color *color, + int mpcc_id); + #endif /* __DC_HWSS_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c index d532c78ee764..4ff3ebc25438 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -26,13 +26,15 @@ #include "hw_sequencer_private.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn10_hw_sequencer.h" +#include "dcn20/dcn20_hwseq.h" static const struct hw_sequencer_funcs dcn10_funcs = { .program_gamut_remap = dcn10_program_gamut_remap, .init_hw = dcn10_init_hw, .power_down_on_boot = dcn10_power_down_on_boot, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, - .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, + .apply_ctx_for_surface = NULL, + .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, .post_unlock_program_front_end = dcn10_post_unlock_program_front_end, .wait_for_pending_cleared = dcn10_wait_for_pending_cleared, .update_plane_addr = dcn10_update_plane_addr, @@ -80,6 +82,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .set_abm_immediate_disable = dce110_set_abm_immediate_disable, .set_pipe = dce110_set_pipe, .get_dcc_en_bits = dcn10_get_dcc_en_bits, + .update_visual_confirm_color = dcn10_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn10_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index b096011acb49..da74269feb75 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -64,6 +64,8 @@ void mpc1_set_bg_color(struct mpc *mpc, MPCC_BG_G_Y, bg_g_y); REG_SET(MPCC_BG_B_CB[bottommost_mpcc->mpcc_id], 0, MPCC_BG_B_CB, bg_b_cb); + + bottommost_mpcc->blnd_cfg.black_color = *bg_color; } static void mpc1_update_blending( @@ -246,6 +248,8 @@ struct mpcc *mpc1_insert_plane( } } + mpc->funcs->set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id); + /* update the blending configuration */ mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id); @@ -495,6 +499,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = { .set_output_csc = NULL, .set_output_gamma = NULL, .get_mpc_out_mux = mpc1_get_mpc_out_mux, + .set_bg_color = mpc1_set_bg_color, }; void dcn10_mpc_construct(struct dcn10_mpc *mpc10, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c index 8dc3d1f73984..2feb051a2002 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c @@ -482,7 +482,7 @@ bool dpp20_program_blnd_lut( next_mode = LUT_RAM_A; dpp20_power_on_blnd_lut(dpp_base, true); - dpp20_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A ? true:false); + dpp20_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A); if (next_mode == LUT_RAM_A) dpp20_program_blnd_luta_settings(dpp_base, params); @@ -893,7 +893,7 @@ bool dpp20_program_shaper( else next_mode = LUT_RAM_A; - dpp20_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A ? true:false); + dpp20_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A); if (next_mode == LUT_RAM_A) dpp20_program_shaper_luta_settings(dpp_base, params); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 6a10daec15cc..25de15158801 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1700,7 +1700,11 @@ void dcn20_program_front_end_for_ctx( if (pipe->plane_state && !pipe->top_pipe) { while (pipe) { - dcn20_program_pipe(dc, pipe, context); + if (hws->funcs.program_pipe) + hws->funcs.program_pipe(dc, pipe, context); + else + dcn20_program_pipe(dc, pipe, context); + pipe = pipe->bottom_pipe; } /* Program secondary blending tree and writeback pipes */ @@ -2263,9 +2267,25 @@ void dcn20_get_mpctree_visual_confirm_color( *color = pipe_colors[top_pipe->pipe_idx]; } -void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) +void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id) { struct dce_hwseq *hws = dc->hwseq; + struct mpc *mpc = dc->res_pool->mpc; + + /* input to MPCC is always RGB, by default leave black_color at 0 */ + if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) + hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, color); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) + hws->funcs.get_surface_visual_confirm_color(pipe_ctx, color); + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) + dcn20_get_mpctree_visual_confirm_color(pipe_ctx, color); + + if (mpc->funcs->set_bg_color) + mpc->funcs->set_bg_color(mpc, color, mpcc_id); +} + +void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) +{ struct hubp *hubp = pipe_ctx->plane_res.hubp; struct mpcc_blnd_cfg blnd_cfg = { {0} }; bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; @@ -2274,15 +2294,6 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) struct mpc *mpc = dc->res_pool->mpc; struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); - // input to MPCC is always RGB, by default leave black_color at 0 - if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { - hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color); - } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { - hws->funcs.get_surface_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color); - } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) { - dcn20_get_mpctree_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color); - } - if (per_pixel_alpha) blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; else @@ -2316,6 +2327,8 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) */ mpcc_id = hubp->inst; + dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id); + /* If there is no full update, don't need to touch MPC tree*/ if (!pipe_ctx->plane_state->update_flags.bits.full_update && !pipe_ctx->update_flags.bits.mpcc) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index c69f766a40ce..6bba191cd33e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -146,5 +146,10 @@ void dcn20_set_disp_pattern_generator(const struct dc *dc, const struct tg_color *solid_color, int width, int height, int offset); +void dcn20_update_visual_confirm_color(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct tg_color *color, + int mpcc_id); + #endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index b5bb613eed4d..2f59f10e5f09 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -96,6 +96,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = { #endif .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, + .update_visual_confirm_color = dcn20_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn20_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 6a99fdd55e8c..947eb0df3f12 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -67,7 +67,6 @@ void mpc2_update_blending( REG_SET(MPCC_BOT_GAIN_INSIDE[mpcc_id], 0, MPCC_BOT_GAIN_INSIDE, blnd_cfg->bottom_inside_gain); REG_SET(MPCC_BOT_GAIN_OUTSIDE[mpcc_id], 0, MPCC_BOT_GAIN_OUTSIDE, blnd_cfg->bottom_outside_gain); - mpc1_set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id); mpcc->blnd_cfg = *blnd_cfg; } @@ -557,6 +556,7 @@ const struct mpc_funcs dcn20_mpc_funcs = { .set_output_gamma = mpc2_set_output_gamma, .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, + .set_bg_color = mpc1_set_bg_color, }; void dcn20_mpc_construct(struct dcn20_mpc *mpc20, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 527e56c353cb..6a56a03cfba3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -3667,7 +3667,7 @@ static bool dcn20_resource_construct( int i; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; - struct ddc_service_init_data ddc_init_data; + struct ddc_service_init_data ddc_init_data = {0}; struct _vcs_dpi_soc_bounding_box_st *loaded_bb = get_asic_rev_soc_bb(ctx->asic_id.hw_internal_rev); struct _vcs_dpi_ip_params_st *loaded_ip = diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c index 8fccee5a3036..69cc192a7e71 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -218,7 +218,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx, cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT; cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp; cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16; - cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_SET_BACKLIGHT_VERSION_1; + cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1; cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_cntl->inst); cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index 4f20a85ff396..523e25f7e410 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -100,6 +100,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .is_abm_supported = dcn21_is_abm_supported, .set_disp_pattern_generator = dcn20_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, + .update_visual_confirm_color = dcn20_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn21_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 8e3f1d0b4cc3..38a2aa87f5f5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -1575,10 +1575,12 @@ static struct _vcs_dpi_voltage_scaling_st construct_low_pstate_lvl(struct clk_li low_pstate_lvl.phyclk_d18_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].phyclk_d18_mhz; low_pstate_lvl.phyclk_mhz = dcn2_1_soc.clock_limits[high_voltage_lvl].phyclk_mhz; - for (i = clk_table->num_entries; i > 1; i--) - clk_table->entries[i] = clk_table->entries[i-1]; - clk_table->entries[1] = clk_table->entries[0]; - clk_table->num_entries++; + if (clk_table->num_entries < MAX_NUM_DPM_LVL) { + for (i = clk_table->num_entries; i > 1; i--) + clk_table->entries[i] = clk_table->entries[i-1]; + clk_table->entries[1] = clk_table->entries[0]; + clk_table->num_entries++; + } return low_pstate_lvl; } @@ -1610,10 +1612,6 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param } } - /* clk_table[1] is reserved for min DF PState. skip here to fill in later. */ - if (i == 1) - k++; - clock_limits[k].state = k; clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz; @@ -1630,14 +1628,25 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param k++; } - for (i = 0; i < clk_table->num_entries + 1; i++) - dcn2_1_soc.clock_limits[i] = clock_limits[i]; + + if (clk_table->num_entries >= MAX_NUM_DPM_LVL) { + for (i = 0; i < clk_table->num_entries + 1; i++) + dcn2_1_soc.clock_limits[i] = clock_limits[i]; + } else { + dcn2_1_soc.clock_limits[0] = clock_limits[0]; + for (i = 2; i < clk_table->num_entries + 1; i++) { + dcn2_1_soc.clock_limits[i] = clock_limits[i - 1]; + dcn2_1_soc.clock_limits[i].state = i; + } + } + if (clk_table->num_entries) { - dcn2_1_soc.num_states = clk_table->num_entries + 1; /* fill in min DF PState */ dcn2_1_soc.clock_limits[1] = construct_low_pstate_lvl(clk_table, closest_clk_lvl); + dcn2_1_soc.num_states = clk_table->num_entries; /* duplicate last level */ - dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1]; + dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = + dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1]; dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h index 0b1755f1dea8..9566b9037458 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h @@ -85,7 +85,9 @@ SRI(DP_MSE_RATE_UPDATE, DP, id), \ SRI(DP_PIXEL_FORMAT, DP, id), \ SRI(DP_SEC_CNTL, DP, id), \ + SRI(DP_SEC_CNTL1, DP, id), \ SRI(DP_SEC_CNTL2, DP, id), \ + SRI(DP_SEC_CNTL5, DP, id), \ SRI(DP_SEC_CNTL6, DP, id), \ SRI(DP_STEER_FIFO, DP, id), \ SRI(DP_VID_M, DP, id), \ @@ -169,7 +171,9 @@ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, mask_sh),\ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP7_SEND, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, mask_sh),\ SE_SF(DP0_DP_SEC_CNTL6, DP_SEC_GSP7_LINE_NUM, mask_sh),\ SE_SF(DP0_DP_SEC_CNTL2, DP_SEC_GSP11_PPS, mask_sh),\ SE_SF(DP0_DP_GSP11_CNTL, DP_SEC_GSP11_ENABLE, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index d53f8b39699b..83685310a391 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -813,6 +813,15 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) (100LL + dc->debug.mall_additional_timer_percent) + denom - 1), denom) - 64LL; + /* In some cases the stutter period is really big (tiny modes) in these + * cases MALL cant be enabled, So skip these cases to avoid a ASSERT() + * + * We can check if stutter_period is more than 1/10th the frame time to + * consider if we can actually meet the range of hysteresis timer + */ + if (stutter_period > 100000/refresh_hz) + return false; + /* scale should be increased until it fits into 6 bits */ while (tmr_delay & ~0x3F) { tmr_scale++; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index bf7fa98b39eb..a978d848d370 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -99,6 +99,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .set_pipe = dcn21_set_pipe, .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, .get_dcc_en_bits = dcn10_get_dcc_en_bits, + .update_visual_confirm_color = dcn20_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn30_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c index 910c17fd4278..a82319f4d081 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c @@ -874,7 +874,7 @@ bool mpc3_program_shaper( else next_mode = LUT_RAM_A; - mpc3_configure_shaper_lut(mpc, next_mode == LUT_RAM_A ? true:false, rmu_idx); + mpc3_configure_shaper_lut(mpc, next_mode == LUT_RAM_A, rmu_idx); if (next_mode == LUT_RAM_A) mpc3_program_shaper_luta_settings(mpc, params, rmu_idx); @@ -1431,7 +1431,7 @@ const struct mpc_funcs dcn30_mpc_funcs = { .release_rmu = mpcc3_release_rmu, .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, - + .set_bg_color = mpc1_set_bg_color, }; void dcn30_mpc_construct(struct dcn30_mpc *mpc30, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h index d1fd0b9aa0f9..b7dc78624963 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h @@ -585,6 +585,181 @@ type MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS;\ type MPC_RMU_SHAPER_MODE_CURRENT +#define MPC_COMMON_MASK_SH_LIST_DCN303(mask_sh) \ + MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_BG_BPC, mask_sh),\ + SF(MPCC0_MPCC_CONTROL, MPCC_BOT_GAIN_MODE, mask_sh),\ + SF(MPCC0_MPCC_TOP_GAIN, MPCC_TOP_GAIN, mask_sh),\ + SF(MPCC0_MPCC_BOT_GAIN_INSIDE, MPCC_BOT_GAIN_INSIDE, mask_sh),\ + SF(MPCC0_MPCC_BOT_GAIN_OUTSIDE, MPCC_BOT_GAIN_OUTSIDE, mask_sh),\ + SF(MPC_OUT0_CSC_MODE, MPC_OCSC_MODE, mask_sh),\ + SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C11_A, mask_sh),\ + SF(MPC_OUT0_CSC_C11_C12_A, MPC_OCSC_C12_A, mask_sh),\ + SF(MPCC0_MPCC_STATUS, MPCC_DISABLED, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_FORCE, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_DIS, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_LOW_PWR_MODE, mask_sh),\ + SF(MPCC0_MPCC_MEM_PWR_CTRL, MPCC_OGAM_MEM_PWR_STATE, mask_sh),\ + SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_MODE, mask_sh),\ + SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MAX_R_CR, mask_sh),\ + SF(MPC_OUT0_DENORM_CONTROL, MPC_OUT_DENORM_CLAMP_MIN_R_CR, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\ + SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\ + SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE, mask_sh),\ + SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_MODE, MPCC_GAMUT_REMAP_MODE_CURRENT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_GAMUT_REMAP_COEF_FORMAT, MPCC_GAMUT_REMAP_COEF_FORMAT, mask_sh),\ + SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C11_A, mask_sh),\ + SF(MPCC_OGAM0_MPC_GAMUT_REMAP_C11_C12_A, MPCC_GAMUT_REMAP_C12_A, mask_sh),\ + SF(MPC_DWB0_MUX, MPC_DWB0_MUX, mask_sh),\ + SF(MPC_DWB0_MUX, MPC_DWB0_MUX_STATUS, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL_DISABLE, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_MODE, mask_sh),\ + SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT, mask_sh), \ + SF(MPC_RMU_CONTROL, MPC_RMU0_MUX, mask_sh), \ + SF(MPC_RMU_CONTROL, MPC_RMU0_MUX_STATUS, mask_sh), \ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1, MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B, MPCC_OGAM_RAMA_EXP_REGION_END_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B, MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_SLOPE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_BASE_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B, MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_B, MPCC_OGAM_RAMA_OFFSET_B, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_G, MPCC_OGAM_RAMA_OFFSET_G, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_RAMA_OFFSET_R, MPCC_OGAM_RAMA_OFFSET_R, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_INDEX, MPCC_OGAM_LUT_INDEX, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_PWL_DISABLE, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_MODE_CURRENT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_CONTROL, MPCC_OGAM_SELECT_CURRENT, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_WRITE_COLOR_MASK, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_COLOR_SEL, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_READ_DBG, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_HOST_SEL, mask_sh),\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_CONFIG_MODE, mask_sh),\ + /*SF(MPCC_OGAM0_MPCC_OGAM_LUT_CONTROL, MPCC_OGAM_LUT_STATUS, mask_sh),*/\ + SF(MPCC_OGAM0_MPCC_OGAM_LUT_DATA, MPCC_OGAM_LUT_DATA, mask_sh),\ + SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE, mask_sh),\ + SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_SIZE, mask_sh),\ + /*SF(MPC_RMU0_3DLUT_MODE, MPC_RMU_3DLUT_MODE_CURRENT, mask_sh),*/\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_WRITE_EN_MASK, mask_sh),\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_RAM_SEL, mask_sh),\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_30BIT_EN, mask_sh),\ + /*SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_CONFIG_STATUS, mask_sh),*/\ + SF(MPC_RMU0_3DLUT_READ_WRITE_CONTROL, MPC_RMU_3DLUT_READ_SEL, mask_sh),\ + SF(MPC_RMU0_3DLUT_INDEX, MPC_RMU_3DLUT_INDEX, mask_sh),\ + SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA0, mask_sh),\ + SF(MPC_RMU0_3DLUT_DATA, MPC_RMU_3DLUT_DATA1, mask_sh),\ + SF(MPC_RMU0_3DLUT_DATA_30BIT, MPC_RMU_3DLUT_DATA_30BIT, mask_sh),\ + SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE, mask_sh),\ + /*SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_LUT_MODE_CURRENT, mask_sh),*/\ + SF(MPC_RMU0_SHAPER_OFFSET_R, MPC_RMU_SHAPER_OFFSET_R, mask_sh),\ + SF(MPC_RMU0_SHAPER_OFFSET_G, MPC_RMU_SHAPER_OFFSET_G, mask_sh),\ + SF(MPC_RMU0_SHAPER_OFFSET_B, MPC_RMU_SHAPER_OFFSET_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_SCALE_R, MPC_RMU_SHAPER_SCALE_R, mask_sh),\ + SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_G, mask_sh),\ + SF(MPC_RMU0_SHAPER_SCALE_G_B, MPC_RMU_SHAPER_SCALE_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_INDEX, MPC_RMU_SHAPER_LUT_INDEX, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_DATA, MPC_RMU_SHAPER_LUT_DATA, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_EN_MASK, mask_sh),\ + SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_LUT_WRITE_SEL, mask_sh),\ + /*SF(MPC_RMU0_SHAPER_LUT_WRITE_EN_MASK, MPC_RMU_SHAPER_CONFIG_STATUS, mask_sh),*/\ + SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_START_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_END_CNTL_B, MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ + SF(MPC_RMU0_SHAPER_RAMA_REGION_0_1, MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_FORCE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_PWR_DIS, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_SHAPER_MEM_PWR_STATE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_3DLUT_MEM_PWR_STATE, mask_sh),\ + SF(MPC_RMU_MEM_PWR_CTRL, MPC_RMU0_MEM_LOW_PWR_MODE, mask_sh),\ + SF(MPC_RMU0_SHAPER_CONTROL, MPC_RMU_SHAPER_MODE_CURRENT, mask_sh),\ + SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh) + +#define MPC_REG_FIELD_LIST_DCN3_03(type) \ + MPC_REG_FIELD_LIST_DCN2_0(type) \ + type MPC_DWB0_MUX;\ + type MPC_DWB0_MUX_STATUS;\ + type MPC_OUT_RATE_CONTROL;\ + type MPC_OUT_RATE_CONTROL_DISABLE;\ + type MPC_OUT_FLOW_CONTROL_MODE;\ + type MPC_OUT_FLOW_CONTROL_COUNT; \ + type MPCC_GAMUT_REMAP_MODE; \ + type MPCC_GAMUT_REMAP_MODE_CURRENT;\ + type MPCC_GAMUT_REMAP_COEF_FORMAT; \ + type MPCC_GAMUT_REMAP_C11_A; \ + type MPCC_GAMUT_REMAP_C12_A; \ + type MPC_RMU0_MUX; \ + type MPC_RMU0_MUX_STATUS; \ + type MPC_RMU0_MEM_PWR_FORCE;\ + type MPC_RMU0_MEM_PWR_DIS;\ + type MPC_RMU0_MEM_LOW_PWR_MODE;\ + type MPC_RMU0_SHAPER_MEM_PWR_STATE;\ + type MPC_RMU0_3DLUT_MEM_PWR_STATE;\ + type MPCC_OGAM_RAMA_EXP_REGION_START_SLOPE_B; \ + type MPCC_OGAM_RAMA_EXP_REGION_START_BASE_B;\ + type MPCC_OGAM_RAMA_OFFSET_B;\ + type MPCC_OGAM_RAMA_OFFSET_G;\ + type MPCC_OGAM_RAMA_OFFSET_R;\ + type MPCC_OGAM_SELECT; \ + type MPCC_OGAM_PWL_DISABLE; \ + type MPCC_OGAM_MODE_CURRENT; \ + type MPCC_OGAM_SELECT_CURRENT; \ + type MPCC_OGAM_LUT_WRITE_COLOR_MASK; \ + type MPCC_OGAM_LUT_READ_COLOR_SEL; \ + type MPCC_OGAM_LUT_READ_DBG; \ + type MPCC_OGAM_LUT_HOST_SEL; \ + type MPCC_OGAM_LUT_CONFIG_MODE; \ + type MPCC_OGAM_LUT_STATUS; \ + type MPCC_OGAM_RAMA_START_BASE_CNTL_B;\ + type MPCC_OGAM_MEM_LOW_PWR_MODE;\ + type MPCC_OGAM_MEM_PWR_STATE;\ + type MPC_RMU_3DLUT_MODE; \ + type MPC_RMU_3DLUT_SIZE; \ + type MPC_RMU_3DLUT_MODE_CURRENT; \ + type MPC_RMU_3DLUT_WRITE_EN_MASK;\ + type MPC_RMU_3DLUT_RAM_SEL;\ + type MPC_RMU_3DLUT_30BIT_EN;\ + type MPC_RMU_3DLUT_CONFIG_STATUS;\ + type MPC_RMU_3DLUT_READ_SEL;\ + type MPC_RMU_3DLUT_INDEX;\ + type MPC_RMU_3DLUT_DATA0;\ + type MPC_RMU_3DLUT_DATA1;\ + type MPC_RMU_3DLUT_DATA_30BIT;\ + type MPC_RMU_SHAPER_LUT_MODE;\ + type MPC_RMU_SHAPER_LUT_MODE_CURRENT;\ + type MPC_RMU_SHAPER_OFFSET_R;\ + type MPC_RMU_SHAPER_OFFSET_G;\ + type MPC_RMU_SHAPER_OFFSET_B;\ + type MPC_RMU_SHAPER_SCALE_R;\ + type MPC_RMU_SHAPER_SCALE_G;\ + type MPC_RMU_SHAPER_SCALE_B;\ + type MPC_RMU_SHAPER_LUT_INDEX;\ + type MPC_RMU_SHAPER_LUT_DATA;\ + type MPC_RMU_SHAPER_LUT_WRITE_EN_MASK;\ + type MPC_RMU_SHAPER_LUT_WRITE_SEL;\ + type MPC_RMU_SHAPER_CONFIG_STATUS;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION_START_B;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION_END_B;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION_END_BASE_B;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET;\ + type MPC_RMU_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS;\ + type MPC_RMU_SHAPER_MODE_CURRENT struct dcn30_mpc_registers { MPC_REG_VARIABLE_LIST_DCN3_0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index 8980c90b2277..ac478bdcfb2a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -97,7 +97,7 @@ void optc3_lock_doublebuffer_disable(struct timing_generator *optc) MASTER_UPDATE_LOCK_DB_END_Y, 0); REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 0); - REG_UPDATE(OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, 1); + REG_UPDATE(OTG_GLOBAL_CONTROL0, MASTER_UPDATE_LOCK_DB_EN, 0); } void optc3_lock(struct timing_generator *optc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 4a5fa23d8e7b..39920422409d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -826,10 +826,11 @@ static const struct dc_plane_cap plane_cap = { .fp16 = 16000 }, + /* 6:1 downscaling ratio: 1000/6 = 166.666 */ .max_downscale_factor = { - .argb8888 = 600, - .nv12 = 600, - .fp16 = 600 + .argb8888 = 167, + .nv12 = 167, + .fp16 = 167 } }; @@ -1724,7 +1725,7 @@ static bool init_soc_bounding_box(struct dc *dc, DC_LOGGER_INIT(dc->ctx->logger); if (!is_soc_bounding_box_valid(dc)) { - DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__); + DC_LOG_ERROR("%s: not valid soc bounding box\n", __func__); return false; } @@ -2538,7 +2539,7 @@ static bool dcn30_resource_construct( int i; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; - struct ddc_service_init_data ddc_init_data; + struct ddc_service_init_data ddc_init_data = {0}; uint32_t pipe_fuses = read_pipe_fuses(ctx); uint32_t num_pipes = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c index 70b053d9ba40..181f2175ac95 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c @@ -101,6 +101,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = { .get_dcc_en_bits = dcn10_get_dcc_en_bits, .optimize_pwr_state = dcn21_optimize_pwr_state, .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, + .update_visual_confirm_color = dcn20_update_visual_confirm_color, }; static const struct hwseq_private_funcs dcn301_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 5b54b7fc5105..9776d1737818 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -843,10 +843,11 @@ static const struct dc_plane_cap plane_cap = { .fp16 = 16000 }, + /* 6:1 downscaling ratio: 1000/6 = 166.666 */ .max_downscale_factor = { - .argb8888 = 600, - .nv12 = 600, - .fp16 = 600 + .argb8888 = 167, + .nv12 = 167, + .fp16 = 167 }, 64, 64 @@ -1497,7 +1498,7 @@ static bool init_soc_bounding_box(struct dc *dc, DC_LOGGER_INIT(dc->ctx->logger); if (!is_soc_bounding_box_valid(dc)) { - DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__); + DC_LOG_ERROR("%s: not valid soc bounding box\n", __func__); return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index fc2dea243d1b..00cb6d11ed0d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -284,10 +284,11 @@ static const struct dc_plane_cap plane_cap = { .nv12 = 16000, .fp16 = 16000 }, + /* 6:1 downscaling ratio: 1000/6 = 166.666 */ .max_downscale_factor = { - .argb8888 = 600, - .nv12 = 600, - .fp16 = 600 + .argb8888 = 167, + .nv12 = 167, + .fp16 = 167 }, 16, 16 @@ -1093,7 +1094,7 @@ static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool) DC_LOGGER_INIT(dc->ctx->logger); if (!is_soc_bounding_box_valid(dc)) { - DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__); + DC_LOG_ERROR("%s: not valid soc bounding box\n", __func__); return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile new file mode 100644 index 000000000000..6f7a1f2b49f0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: MIT +# +# Copyright (C) 2021 Advanced Micro Devices, Inc. All the rights reserved +# +# Authors: AMD +# +# Makefile for dcn303. + +DCN3_03 = dcn303_init.o dcn303_hwseq.o dcn303_resource.o + +ifdef CONFIG_X86 +CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o := -msse +endif + +ifdef CONFIG_PPC64 +CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o := -mhard-float -maltivec +endif + +ifdef CONFIG_CC_IS_GCC +ifeq ($(call cc-ifversion, -lt, 0701, y), y) +IS_OLD_GCC = 1 +endif +CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o += -mhard-float +endif + +ifdef CONFIG_X86 +ifdef IS_OLD_GCC +# Stack alignment mismatch, proceed with caution. +# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 +# (8B stack alignment). +CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o += -mpreferred-stack-boundary=4 +else +CFLAGS_$(AMDDALPATH)/dc/dcn303/dcn303_resource.o += -msse2 +endif +endif + +AMD_DAL_DCN3_03 = $(addprefix $(AMDDALPATH)/dc/dcn303/,$(DCN3_03)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_03) diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h new file mode 100644 index 000000000000..a79c54bbc899 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Authors: AMD + */ + +#ifndef __DCN303_DCCG_H__ +#define __DCN303_DCCG_H__ + +#include "dcn30/dcn30_dccg.h" + + +#define DCCG_REG_LIST_DCN3_03() \ + SR(DPPCLK_DTO_CTRL),\ + DCCG_SRII(DTO_PARAM, DPPCLK, 0),\ + DCCG_SRII(DTO_PARAM, DPPCLK, 1),\ + SR(REFCLK_CNTL) + +#define DCCG_MASK_SH_LIST_DCN3_03(mask_sh) \ + DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\ + DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\ + DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 1, mask_sh),\ + DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 1, mask_sh),\ + DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\ + DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\ + DCCG_SF(REFCLK_CNTL, REFCLK_CLOCK_EN, mask_sh),\ + DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh) + +#endif //__DCN303_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c new file mode 100644 index 000000000000..dc33ec8b7bdb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Authors: AMD + */ + +#include "dcn303_hwseq.h" + +#include "dce/dce_hwseq.h" + +#include "reg_helper.h" +#include "dc.h" + +#define DC_LOGGER_INIT(logger) + +#define CTX \ + hws->ctx +#define REG(reg)\ + hws->regs->reg + +#undef FN +#define FN(reg_name, field_name) \ + hws->shifts->field_name, hws->masks->field_name + + +void dcn303_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on) +{ + /*DCN303 removes PG registers*/ +} + +void dcn303_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) +{ + /*DCN303 removes PG registers*/ +} + +void dcn303_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on) +{ + /*DCN303 removes PG registers*/ +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.h new file mode 100644 index 000000000000..fc6cab720b6d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Authors: AMD + */ + +#ifndef __DC_HWSS_DCN303_H__ +#define __DC_HWSS_DCN303_H__ + +#include "hw_sequencer_private.h" + +void dcn303_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on); +void dcn303_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on); +void dcn303_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on); + +#endif /* __DC_HWSS_DCN303_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c new file mode 100644 index 000000000000..86d4b303d02f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Authors: AMD + */ + +#include "dcn303_hwseq.h" +#include "dcn30/dcn30_init.h" +#include "dc.h" + +void dcn303_hw_sequencer_construct(struct dc *dc) +{ + dcn30_hw_sequencer_construct(dc); + + dc->hwseq->funcs.dpp_pg_control = dcn303_dpp_pg_control; + dc->hwseq->funcs.hubp_pg_control = dcn303_hubp_pg_control; + dc->hwseq->funcs.dsc_pg_control = dcn303_dsc_pg_control; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h new file mode 100644 index 000000000000..66b1e3604f07 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Authors: AMD + */ + +#ifndef __DC_DCN303_INIT_H__ +#define __DC_DCN303_INIT_H__ + +struct dc; + +void dcn303_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_DCN303_INIT_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c new file mode 100644 index 000000000000..aff0230c9193 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -0,0 +1,1675 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Authors: AMD + */ + +#include "dcn303_init.h" +#include "dcn303_resource.h" +#include "dcn303_dccg.h" +#include "irq/dcn303/irq_service_dcn303.h" + +#include "dcn30/dcn30_dio_link_encoder.h" +#include "dcn30/dcn30_dio_stream_encoder.h" +#include "dcn30/dcn30_dpp.h" +#include "dcn30/dcn30_dwb.h" +#include "dcn30/dcn30_hubbub.h" +#include "dcn30/dcn30_hubp.h" +#include "dcn30/dcn30_mmhubbub.h" +#include "dcn30/dcn30_mpc.h" +#include "dcn30/dcn30_opp.h" +#include "dcn30/dcn30_optc.h" +#include "dcn30/dcn30_resource.h" + +#include "dcn20/dcn20_dsc.h" +#include "dcn20/dcn20_resource.h" + +#include "dcn10/dcn10_resource.h" + +#include "dc_link_ddc.h" + +#include "dce/dce_abm.h" +#include "dce/dce_audio.h" +#include "dce/dce_aux.h" +#include "dce/dce_clock_source.h" +#include "dce/dce_hwseq.h" +#include "dce/dce_i2c_hw.h" +#include "dce/dce_panel_cntl.h" +#include "dce/dmub_abm.h" +#include "dce/dmub_psr.h" +#include "clk_mgr.h" + +#include "hw_sequencer_private.h" +#include "reg_helper.h" +#include "resource.h" +#include "vm_helper.h" + +#include "sienna_cichlid_ip_offset.h" +#include "dcn/dcn_3_0_3_offset.h" +#include "dcn/dcn_3_0_3_sh_mask.h" +#include "dcn/dpcs_3_0_3_offset.h" +#include "dcn/dpcs_3_0_3_sh_mask.h" +#include "nbio/nbio_2_3_offset.h" + +#define DC_LOGGER_INIT(logger) + +struct _vcs_dpi_ip_params_st dcn3_03_ip = { + .use_min_dcfclk = 0, + .clamp_min_dcfclk = 0, + .odm_capable = 1, + .gpuvm_enable = 1, + .hostvm_enable = 0, + .gpuvm_max_page_table_levels = 4, + .hostvm_max_page_table_levels = 4, + .hostvm_cached_page_table_levels = 0, + .pte_group_size_bytes = 2048, + .num_dsc = 2, + .rob_buffer_size_kbytes = 184, + .det_buffer_size_kbytes = 184, + .dpte_buffer_size_in_pte_reqs_luma = 64, + .dpte_buffer_size_in_pte_reqs_chroma = 34, + .pde_proc_buffer_size_64k_reqs = 48, + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .pte_enable = 1, + .max_page_table_levels = 2, + .pte_chunk_size_kbytes = 2, // ? + .meta_chunk_size_kbytes = 2, + .writeback_chunk_size_kbytes = 8, + .line_buffer_size_bits = 789504, + .is_line_buffer_bpp_fixed = 0, // ? + .line_buffer_fixed_bpp = 0, // ? + .dcc_supported = true, + .writeback_interface_buffer_size_kbytes = 90, + .writeback_line_buffer_buffer_size = 0, + .max_line_buffer_lines = 12, + .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 + .writeback_chroma_buffer_size_kbytes = 8, + .writeback_chroma_line_buffer_width_pixels = 4, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_taps = 1, + .writeback_max_vscl_taps = 1, + .writeback_line_buffer_luma_buffer_size = 0, + .writeback_line_buffer_chroma_buffer_size = 14643, + .cursor_buffer_size = 8, + .cursor_chunk_size = 2, + .max_num_otg = 2, + .max_num_dpp = 2, + .max_num_wb = 1, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 6, + .max_vscl_ratio = 6, + .hscl_mults = 4, + .vscl_mults = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dispclk_ramp_margin_percent = 1, + .underscan_factor = 1.11, + .min_vblank_lines = 32, + .dppclk_delay_subtotal = 46, + .dynamic_metadata_vm_enabled = true, + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_scl = 50, + .dppclk_delay_cnvc_formatter = 27, + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 119, + .dcfclk_cstate_latency = 5.2, // SRExitTime + .max_inter_dcn_tile_repeaters = 8, + .max_num_hdmi_frl_outputs = 1, + .odm_combine_4to1_supported = false, + .xfc_supported = false, + .xfc_fill_bw_overhead_percent = 10.0, + .xfc_fill_constant_bytes = 0, + .gfx7_compat_tiling_supported = 0, + .number_of_cursors = 1, +}; + +struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc = { + .clock_limits = { + { + .state = 0, + .dispclk_mhz = 1217.0, + .dppclk_mhz = 1217.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 405.6, + }, + }, + + .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */ + .num_states = 1, + .sr_exit_time_us = 15.5, + .sr_enter_plus_exit_time_us = 20, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 60.0, + .max_avg_dram_bw_use_normal_percent = 40.0, + .writeback_latency_us = 12.0, + .max_request_size_bytes = 256, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.38, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 156, + .urgent_out_of_order_return_per_channel_bytes = 4096, + .channel_interleave_bytes = 256, + .num_banks = 8, + .gpuvm_min_page_size_bytes = 4096, + .hostvm_min_page_size_bytes = 4096, + .dram_clock_change_latency_us = 404, + .dummy_pstate_latency_us = 5, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3650, + .xfc_bus_transport_time_us = 20, // ? + .xfc_xbuf_latency_tolerance_us = 4, // ? + .use_urgent_burst_bw = 1, // ? + .do_urgent_latency_adjustment = true, + .urgent_latency_adjustment_fabric_clock_component_us = 1.0, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000, +}; + +static const struct dc_debug_options debug_defaults_drv = { + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = false, + .clock_trace = true, + .disable_pplib_clock_request = true, + .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .force_single_disp_pipe_split = false, + .disable_dcc = DCC_ENABLE, + .vsr_support = true, + .performance_trace = false, + .max_downscale_src_width = 7680,/*upto 8K*/ + .disable_pplib_wm_range = false, + .scl_reset_length10 = true, + .sanity_checks = false, + .underflow_assert_delay_us = 0xFFFFFFFF, + .dwb_fi_phase = -1, // -1 = disable, + .dmub_command_table = true, + .disable_idle_power_optimizations = false, +}; + +static const struct dc_debug_options debug_defaults_diags = { + .disable_dmcu = true, + .force_abm_enable = false, + .timing_trace = true, + .clock_trace = true, + .disable_dpp_power_gate = true, + .disable_hubp_power_gate = true, + .disable_clock_gate = true, + .disable_pplib_clock_request = true, + .disable_pplib_wm_range = true, + .disable_stutter = false, + .scl_reset_length10 = true, + .dwb_fi_phase = -1, // -1 = disable + .dmub_command_table = true, + .enable_tri_buf = true, + .disable_psr = true, +}; + +enum dcn303_clk_src_array_id { + DCN303_CLK_SRC_PLL0, + DCN303_CLK_SRC_PLL1, + DCN303_CLK_SRC_TOTAL +}; + +static const struct resource_caps res_cap_dcn303 = { + .num_timing_generator = 2, + .num_opp = 2, + .num_video_plane = 2, + .num_audio = 2, + .num_stream_encoder = 2, + .num_dwb = 1, + .num_ddc = 2, + .num_vmid = 16, + .num_mpc_3dlut = 1, + .num_dsc = 2, +}; + +static const struct dc_plane_cap plane_cap = { + .type = DC_PLANE_TYPE_DCN_UNIVERSAL, + .blends_with_above = true, + .blends_with_below = true, + .per_pixel_alpha = true, + .pixel_format_support = { + .argb8888 = true, + .nv12 = true, + .fp16 = true, + .p010 = false, + .ayuv = false, + }, + .max_upscale_factor = { + .argb8888 = 16000, + .nv12 = 16000, + .fp16 = 16000 + }, + .max_downscale_factor = { + .argb8888 = 600, + .nv12 = 600, + .fp16 = 600 + }, + 16, + 16 +}; + +/* NBIO */ +#define NBIO_BASE_INNER(seg) \ + NBIO_BASE__INST0_SEG ## seg + +#define NBIO_BASE(seg) \ + NBIO_BASE_INNER(seg) + +#define NBIO_SR(reg_name)\ + .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + +/* DCN */ +#undef BASE_INNER +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +#define BASE(seg) BASE_INNER(seg) + +#define SR(reg_name)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name + +#define SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define SRI(reg_name, block, id)\ + .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + mm ## block ## id ## _ ## reg_name + +#define SRI2(reg_name, block, id)\ + .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name + +#define SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define DCCG_SRII(reg_name, block, id)\ + .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +#define VUPDATE_SRII(reg_name, block, id)\ + .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ + mm ## reg_name ## _ ## block ## id + +#define SRII_DWB(reg_name, temp_name, block, id)\ + .reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## temp_name + +#define SRII_MPC_RMU(reg_name, block, id)\ + .RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + +static const struct dcn_hubbub_registers hubbub_reg = { + HUBBUB_REG_LIST_DCN30(0) +}; + +static const struct dcn_hubbub_shift hubbub_shift = { + HUBBUB_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn_hubbub_mask hubbub_mask = { + HUBBUB_MASK_SH_LIST_DCN30(_MASK) +}; + +#define vmid_regs(id)\ + [id] = { DCN20_VMID_REG_LIST(id) } + +static const struct dcn_vmid_registers vmid_regs[] = { + vmid_regs(0), + vmid_regs(1), + vmid_regs(2), + vmid_regs(3), + vmid_regs(4), + vmid_regs(5), + vmid_regs(6), + vmid_regs(7), + vmid_regs(8), + vmid_regs(9), + vmid_regs(10), + vmid_regs(11), + vmid_regs(12), + vmid_regs(13), + vmid_regs(14), + vmid_regs(15) +}; + +static const struct dcn20_vmid_shift vmid_shifts = { + DCN20_VMID_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn20_vmid_mask vmid_masks = { + DCN20_VMID_MASK_SH_LIST(_MASK) +}; + +static struct hubbub *dcn303_hubbub_create(struct dc_context *ctx) +{ + int i; + + struct dcn20_hubbub *hubbub3 = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); + + if (!hubbub3) + return NULL; + + hubbub3_construct(hubbub3, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask); + + for (i = 0; i < res_cap_dcn303.num_vmid; i++) { + struct dcn20_vmid *vmid = &hubbub3->vmid[i]; + + vmid->ctx = ctx; + + vmid->regs = &vmid_regs[i]; + vmid->shifts = &vmid_shifts; + vmid->masks = &vmid_masks; + } + + return &hubbub3->base; +} + +#define vpg_regs(id)\ + [id] = { VPG_DCN3_REG_LIST(id) } + +static const struct dcn30_vpg_registers vpg_regs[] = { + vpg_regs(0), + vpg_regs(1), + vpg_regs(2) +}; + +static const struct dcn30_vpg_shift vpg_shift = { + DCN3_VPG_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn30_vpg_mask vpg_mask = { + DCN3_VPG_MASK_SH_LIST(_MASK) +}; + +static struct vpg *dcn303_vpg_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL); + + if (!vpg3) + return NULL; + + vpg3_construct(vpg3, ctx, inst, &vpg_regs[inst], &vpg_shift, &vpg_mask); + + return &vpg3->base; +} + +#define afmt_regs(id)\ + [id] = { AFMT_DCN3_REG_LIST(id) } + +static const struct dcn30_afmt_registers afmt_regs[] = { + afmt_regs(0), + afmt_regs(1), + afmt_regs(2) +}; + +static const struct dcn30_afmt_shift afmt_shift = { + DCN3_AFMT_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn30_afmt_mask afmt_mask = { + DCN3_AFMT_MASK_SH_LIST(_MASK) +}; + +static struct afmt *dcn303_afmt_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL); + + if (!afmt3) + return NULL; + + afmt3_construct(afmt3, ctx, inst, &afmt_regs[inst], &afmt_shift, &afmt_mask); + + return &afmt3->base; +} + +#define audio_regs(id)\ + [id] = { AUD_COMMON_REG_LIST(id) } + +static const struct dce_audio_registers audio_regs[] = { + audio_regs(0), + audio_regs(1), + audio_regs(2), + audio_regs(3), + audio_regs(4), + audio_regs(5), + audio_regs(6) +}; + +#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\ + SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\ + AUD_COMMON_MASK_SH_LIST_BASE(mask_sh) + +static const struct dce_audio_shift audio_shift = { + DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_audio_mask audio_mask = { + DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) +}; + +static struct audio *dcn303_create_audio(struct dc_context *ctx, unsigned int inst) +{ + return dce_audio_create(ctx, inst, &audio_regs[inst], &audio_shift, &audio_mask); +} + +#define stream_enc_regs(id)\ + [id] = { SE_DCN3_REG_LIST(id) } + +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { + stream_enc_regs(0), + stream_enc_regs(1) +}; + +static const struct dcn10_stream_encoder_shift se_shift = { + SE_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct stream_encoder *dcn303_stream_encoder_create(enum engine_id eng_id, struct dc_context *ctx) +{ + struct dcn10_stream_encoder *enc1; + struct vpg *vpg; + struct afmt *afmt; + int vpg_inst; + int afmt_inst; + + /* Mapping of VPG, AFMT, DME register blocks to DIO block instance */ + if (eng_id <= ENGINE_ID_DIGE) { + vpg_inst = eng_id; + afmt_inst = eng_id; + } else + return NULL; + + enc1 = kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); + vpg = dcn303_vpg_create(ctx, vpg_inst); + afmt = dcn303_afmt_create(ctx, afmt_inst); + + if (!enc1 || !vpg || !afmt) + return NULL; + + dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id], + &se_shift, &se_mask); + + return &enc1->base; +} + +#define clk_src_regs(index, pllid)\ + [index] = { CS_COMMON_REG_LIST_DCN3_03(index, pllid) } + +static const struct dce110_clk_src_regs clk_src_regs[] = { + clk_src_regs(0, A), + clk_src_regs(1, B) +}; + +static const struct dce110_clk_src_shift cs_shift = { + CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) +}; + +static const struct dce110_clk_src_mask cs_mask = { + CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK) +}; + +static struct clock_source *dcn303_clock_source_create(struct dc_context *ctx, struct dc_bios *bios, + enum clock_source_id id, const struct dce110_clk_src_regs *regs, bool dp_clk_src) +{ + struct dce110_clk_src *clk_src = kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); + + if (!clk_src) + return NULL; + + if (dcn3_clk_src_construct(clk_src, ctx, bios, id, regs, &cs_shift, &cs_mask)) { + clk_src->base.dp_clk_src = dp_clk_src; + return &clk_src->base; + } + + BREAK_TO_DEBUGGER(); + return NULL; +} + +static const struct dce_hwseq_registers hwseq_reg = { + HWSEQ_DCN303_REG_LIST() +}; + +static const struct dce_hwseq_shift hwseq_shift = { + HWSEQ_DCN303_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask hwseq_mask = { + HWSEQ_DCN303_MASK_SH_LIST(_MASK) +}; + +static struct dce_hwseq *dcn303_hwseq_create(struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &hwseq_reg; + hws->shifts = &hwseq_shift; + hws->masks = &hwseq_mask; + } + return hws; +} + +#define hubp_regs(id)\ + [id] = { HUBP_REG_LIST_DCN30(id) } + +static const struct dcn_hubp2_registers hubp_regs[] = { + hubp_regs(0), + hubp_regs(1) +}; + +static const struct dcn_hubp2_shift hubp_shift = { + HUBP_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn_hubp2_mask hubp_mask = { + HUBP_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct hubp *dcn303_hubp_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_hubp *hubp2 = kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); + + if (!hubp2) + return NULL; + + if (hubp3_construct(hubp2, ctx, inst, &hubp_regs[inst], &hubp_shift, &hubp_mask)) + return &hubp2->base; + + BREAK_TO_DEBUGGER(); + kfree(hubp2); + return NULL; +} + +#define dpp_regs(id)\ + [id] = { DPP_REG_LIST_DCN30(id) } + +static const struct dcn3_dpp_registers dpp_regs[] = { + dpp_regs(0), + dpp_regs(1) +}; + +static const struct dcn3_dpp_shift tf_shift = { + DPP_REG_LIST_SH_MASK_DCN30(__SHIFT) +}; + +static const struct dcn3_dpp_mask tf_mask = { + DPP_REG_LIST_SH_MASK_DCN30(_MASK) +}; + +static struct dpp *dcn303_dpp_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn3_dpp *dpp = kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); + + if (!dpp) + return NULL; + + if (dpp3_construct(dpp, ctx, inst, &dpp_regs[inst], &tf_shift, &tf_mask)) + return &dpp->base; + + BREAK_TO_DEBUGGER(); + kfree(dpp); + return NULL; +} + +#define opp_regs(id)\ + [id] = { OPP_REG_LIST_DCN30(id) } + +static const struct dcn20_opp_registers opp_regs[] = { + opp_regs(0), + opp_regs(1) +}; + +static const struct dcn20_opp_shift opp_shift = { + OPP_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn20_opp_mask opp_mask = { + OPP_MASK_SH_LIST_DCN20(_MASK) +}; + +static struct output_pixel_processor *dcn303_opp_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_opp *opp = kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); + + if (!opp) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dcn20_opp_construct(opp, ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); + return &opp->base; +} + +#define optc_regs(id)\ + [id] = { OPTC_COMMON_REG_LIST_DCN3_0(id) } + +static const struct dcn_optc_registers optc_regs[] = { + optc_regs(0), + optc_regs(1) +}; + +static const struct dcn_optc_shift optc_shift = { + OPTC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn_optc_mask optc_mask = { + OPTC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static struct timing_generator *dcn303_timing_generator_create(struct dc_context *ctx, uint32_t instance) +{ + struct optc *tgn10 = kzalloc(sizeof(struct optc), GFP_KERNEL); + + if (!tgn10) + return NULL; + + tgn10->base.inst = instance; + tgn10->base.ctx = ctx; + + tgn10->tg_regs = &optc_regs[instance]; + tgn10->tg_shift = &optc_shift; + tgn10->tg_mask = &optc_mask; + + dcn30_timing_generator_init(tgn10); + + return &tgn10->base; +} + +static const struct dcn30_mpc_registers mpc_regs = { + MPC_REG_LIST_DCN3_0(0), + MPC_REG_LIST_DCN3_0(1), + MPC_OUT_MUX_REG_LIST_DCN3_0(0), + MPC_OUT_MUX_REG_LIST_DCN3_0(1), + MPC_RMU_GLOBAL_REG_LIST_DCN3AG, + MPC_RMU_REG_LIST_DCN3AG(0), + MPC_DWB_MUX_REG_LIST_DCN3_0(0), +}; + +static const struct dcn30_mpc_shift mpc_shift = { + MPC_COMMON_MASK_SH_LIST_DCN303(__SHIFT) +}; + +static const struct dcn30_mpc_mask mpc_mask = { + MPC_COMMON_MASK_SH_LIST_DCN303(_MASK) +}; + +static struct mpc *dcn303_mpc_create(struct dc_context *ctx, int num_mpcc, int num_rmu) +{ + struct dcn30_mpc *mpc30 = kzalloc(sizeof(struct dcn30_mpc), GFP_KERNEL); + + if (!mpc30) + return NULL; + + dcn30_mpc_construct(mpc30, ctx, &mpc_regs, &mpc_shift, &mpc_mask, num_mpcc, num_rmu); + + return &mpc30->base; +} + +#define dsc_regsDCN20(id)\ +[id] = { DSC_REG_LIST_DCN20(id) } + +static const struct dcn20_dsc_registers dsc_regs[] = { + dsc_regsDCN20(0), + dsc_regsDCN20(1) +}; + +static const struct dcn20_dsc_shift dsc_shift = { + DSC_REG_LIST_SH_MASK_DCN20(__SHIFT) +}; + +static const struct dcn20_dsc_mask dsc_mask = { + DSC_REG_LIST_SH_MASK_DCN20(_MASK) +}; + +static struct display_stream_compressor *dcn303_dsc_create(struct dc_context *ctx, uint32_t inst) +{ + struct dcn20_dsc *dsc = kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); + + if (!dsc) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dsc2_construct(dsc, ctx, inst, &dsc_regs[inst], &dsc_shift, &dsc_mask); + return &dsc->base; +} + +#define dwbc_regs_dcn3(id)\ +[id] = { DWBC_COMMON_REG_LIST_DCN30(id) } + +static const struct dcn30_dwbc_registers dwbc30_regs[] = { + dwbc_regs_dcn3(0) +}; + +static const struct dcn30_dwbc_shift dwbc30_shift = { + DWBC_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_dwbc_mask dwbc30_mask = { + DWBC_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static bool dcn303_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_dwbc *dwbc30 = kzalloc(sizeof(struct dcn30_dwbc), GFP_KERNEL); + + if (!dwbc30) { + dm_error("DC: failed to create dwbc30!\n"); + return false; + } + + dcn30_dwbc_construct(dwbc30, ctx, &dwbc30_regs[i], &dwbc30_shift, &dwbc30_mask, i); + + pool->dwbc[i] = &dwbc30->base; + } + return true; +} + +#define mcif_wb_regs_dcn3(id)\ +[id] = { MCIF_WB_COMMON_REG_LIST_DCN30(id) } + +static const struct dcn30_mmhubbub_registers mcif_wb30_regs[] = { + mcif_wb_regs_dcn3(0) +}; + +static const struct dcn30_mmhubbub_shift mcif_wb30_shift = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dcn30_mmhubbub_mask mcif_wb30_mask = { + MCIF_WB_COMMON_MASK_SH_LIST_DCN30(_MASK) +}; + +static bool dcn303_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +{ + int i; + uint32_t pipe_count = pool->res_cap->num_dwb; + + for (i = 0; i < pipe_count; i++) { + struct dcn30_mmhubbub *mcif_wb30 = kzalloc(sizeof(struct dcn30_mmhubbub), GFP_KERNEL); + + if (!mcif_wb30) { + dm_error("DC: failed to create mcif_wb30!\n"); + return false; + } + + dcn30_mmhubbub_construct(mcif_wb30, ctx, &mcif_wb30_regs[i], &mcif_wb30_shift, &mcif_wb30_mask, i); + + pool->mcif_wb[i] = &mcif_wb30->base; + } + return true; +} + +#define aux_engine_regs(id)\ +[id] = {\ + AUX_COMMON_REG_LIST0(id), \ + .AUXN_IMPCAL = 0, \ + .AUXP_IMPCAL = 0, \ + .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK, \ +} + +static const struct dce110_aux_registers aux_engine_regs[] = { + aux_engine_regs(0), + aux_engine_regs(1) +}; + +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static struct dce_aux *dcn303_aux_engine_create(struct dc_context *ctx, uint32_t inst) +{ + struct aux_engine_dce110 *aux_engine = kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); + + if (!aux_engine) + return NULL; + + dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, + &aux_engine_regs[inst], &aux_mask, &aux_shift, ctx->dc->caps.extended_aux_timeout_support); + + return &aux_engine->base; +} + +#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) } + +static const struct dce_i2c_registers i2c_hw_regs[] = { + i2c_inst_regs(1), + i2c_inst_regs(2) +}; + +static const struct dce_i2c_shift i2c_shifts = { + I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT) +}; + +static const struct dce_i2c_mask i2c_masks = { + I2C_COMMON_MASK_SH_LIST_DCN2(_MASK) +}; + +static struct dce_i2c_hw *dcn303_i2c_hw_create(struct dc_context *ctx, uint32_t inst) +{ + struct dce_i2c_hw *dce_i2c_hw = kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); + + if (!dce_i2c_hw) + return NULL; + + dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst, &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks); + + return dce_i2c_hw; +} + +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .fec_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + +#define link_regs(id, phyid)\ + [id] = {\ + LE_DCN3_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ + } + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN30(__SHIFT), + DPCS_DCN2_MASK_SH_LIST(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN30(_MASK), + DPCS_DCN2_MASK_SH_LIST(_MASK) +}; + +#define aux_regs(id)\ + [id] = { DCN2_AUX_REG_LIST(id) } + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1) +}; + +#define hpd_regs(id)\ + [id] = { HPD_REG_LIST(id) } + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1) +}; + +static struct link_encoder *dcn303_link_encoder_create(const struct encoder_init_data *enc_init_data) +{ + struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + + if (!enc20) + return NULL; + + dcn30_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, + &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, &le_mask); + + return &enc20->enc10.base; +} + +static const struct dce_panel_cntl_registers panel_cntl_regs[] = { + { DCN_PANEL_CNTL_REG_LIST() } +}; + +static const struct dce_panel_cntl_shift panel_cntl_shift = { + DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_panel_cntl_mask panel_cntl_mask = { + DCE_PANEL_CNTL_MASK_SH_LIST(_MASK) +}; + +static struct panel_cntl *dcn303_panel_cntl_create(const struct panel_cntl_init_data *init_data) +{ + struct dce_panel_cntl *panel_cntl = kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL); + + if (!panel_cntl) + return NULL; + + dce_panel_cntl_construct(panel_cntl, init_data, &panel_cntl_regs[init_data->inst], + &panel_cntl_shift, &panel_cntl_mask); + + return &panel_cntl->base; +} + +static void read_dce_straps(struct dc_context *ctx, struct resource_straps *straps) +{ + generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX), + FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio); +} + +static const struct resource_create_funcs res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = dcn303_create_audio, + .create_stream_encoder = dcn303_stream_encoder_create, + .create_hwseq = dcn303_hwseq_create, +}; + +static const struct resource_create_funcs res_create_maximus_funcs = { + .read_dce_straps = NULL, + .create_audio = NULL, + .create_stream_encoder = NULL, + .create_hwseq = dcn303_hwseq_create, +}; + +static bool is_soc_bounding_box_valid(struct dc *dc) +{ + uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; + + if (ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev)) + return true; + + return false; +} + +static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool) +{ + struct _vcs_dpi_soc_bounding_box_st *loaded_bb = &dcn3_03_soc; + struct _vcs_dpi_ip_params_st *loaded_ip = &dcn3_03_ip; + + DC_LOGGER_INIT(dc->ctx->logger); + + if (!is_soc_bounding_box_valid(dc)) { + DC_LOG_ERROR("%s: not valid soc bounding box/n", __func__); + return false; + } + + loaded_ip->max_num_otg = pool->pipe_count; + loaded_ip->max_num_dpp = pool->pipe_count; + loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; + dcn20_patch_bounding_box(dc, loaded_bb); + return true; +} + +static void dcn303_resource_destruct(struct resource_pool *pool) +{ + unsigned int i; + + for (i = 0; i < pool->stream_enc_count; i++) { + if (pool->stream_enc[i] != NULL) { + if (pool->stream_enc[i]->vpg != NULL) { + kfree(DCN30_VPG_FROM_VPG(pool->stream_enc[i]->vpg)); + pool->stream_enc[i]->vpg = NULL; + } + if (pool->stream_enc[i]->afmt != NULL) { + kfree(DCN30_AFMT_FROM_AFMT(pool->stream_enc[i]->afmt)); + pool->stream_enc[i]->afmt = NULL; + } + kfree(DCN10STRENC_FROM_STRENC(pool->stream_enc[i])); + pool->stream_enc[i] = NULL; + } + } + + for (i = 0; i < pool->res_cap->num_dsc; i++) { + if (pool->dscs[i] != NULL) + dcn20_dsc_destroy(&pool->dscs[i]); + } + + if (pool->mpc != NULL) { + kfree(TO_DCN20_MPC(pool->mpc)); + pool->mpc = NULL; + } + + if (pool->hubbub != NULL) { + kfree(pool->hubbub); + pool->hubbub = NULL; + } + + for (i = 0; i < pool->pipe_count; i++) { + if (pool->dpps[i] != NULL) { + kfree(TO_DCN20_DPP(pool->dpps[i])); + pool->dpps[i] = NULL; + } + + if (pool->hubps[i] != NULL) { + kfree(TO_DCN20_HUBP(pool->hubps[i])); + pool->hubps[i] = NULL; + } + + if (pool->irqs != NULL) + dal_irq_service_destroy(&pool->irqs); + } + + for (i = 0; i < pool->res_cap->num_ddc; i++) { + if (pool->engines[i] != NULL) + dce110_engine_destroy(&pool->engines[i]); + if (pool->hw_i2cs[i] != NULL) { + kfree(pool->hw_i2cs[i]); + pool->hw_i2cs[i] = NULL; + } + if (pool->sw_i2cs[i] != NULL) { + kfree(pool->sw_i2cs[i]); + pool->sw_i2cs[i] = NULL; + } + } + + for (i = 0; i < pool->res_cap->num_opp; i++) { + if (pool->opps[i] != NULL) + pool->opps[i]->funcs->opp_destroy(&pool->opps[i]); + } + + for (i = 0; i < pool->res_cap->num_timing_generator; i++) { + if (pool->timing_generators[i] != NULL) { + kfree(DCN10TG_FROM_TG(pool->timing_generators[i])); + pool->timing_generators[i] = NULL; + } + } + + for (i = 0; i < pool->res_cap->num_dwb; i++) { + if (pool->dwbc[i] != NULL) { + kfree(TO_DCN30_DWBC(pool->dwbc[i])); + pool->dwbc[i] = NULL; + } + if (pool->mcif_wb[i] != NULL) { + kfree(TO_DCN30_MMHUBBUB(pool->mcif_wb[i])); + pool->mcif_wb[i] = NULL; + } + } + + for (i = 0; i < pool->audio_count; i++) { + if (pool->audios[i]) + dce_aud_destroy(&pool->audios[i]); + } + + for (i = 0; i < pool->clk_src_count; i++) { + if (pool->clock_sources[i] != NULL) + dcn20_clock_source_destroy(&pool->clock_sources[i]); + } + + if (pool->dp_clock_source != NULL) + dcn20_clock_source_destroy(&pool->dp_clock_source); + + for (i = 0; i < pool->res_cap->num_mpc_3dlut; i++) { + if (pool->mpc_lut[i] != NULL) { + dc_3dlut_func_release(pool->mpc_lut[i]); + pool->mpc_lut[i] = NULL; + } + if (pool->mpc_shaper[i] != NULL) { + dc_transfer_func_release(pool->mpc_shaper[i]); + pool->mpc_shaper[i] = NULL; + } + } + + for (i = 0; i < pool->pipe_count; i++) { + if (pool->multiple_abms[i] != NULL) + dce_abm_destroy(&pool->multiple_abms[i]); + } + + if (pool->psr != NULL) + dmub_psr_destroy(&pool->psr); + + if (pool->dccg != NULL) + dcn_dccg_destroy(&pool->dccg); + + if (pool->oem_device != NULL) + dal_ddc_service_destroy(&pool->oem_device); +} + +static void dcn303_destroy_resource_pool(struct resource_pool **pool) +{ + dcn303_resource_destruct(*pool); + kfree(*pool); + *pool = NULL; +} + +static void dcn303_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, + unsigned int *optimal_dcfclk, + unsigned int *optimal_fclk) +{ + double bw_from_dram, bw_from_dram1, bw_from_dram2; + + bw_from_dram1 = uclk_mts * dcn3_03_soc.num_chans * + dcn3_03_soc.dram_channel_width_bytes * (dcn3_03_soc.max_avg_dram_bw_use_normal_percent / 100); + bw_from_dram2 = uclk_mts * dcn3_03_soc.num_chans * + dcn3_03_soc.dram_channel_width_bytes * (dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100); + + bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2; + + if (optimal_fclk) + *optimal_fclk = bw_from_dram / + (dcn3_03_soc.fabric_datapath_to_dcn_data_return_bytes * + (dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100)); + + if (optimal_dcfclk) + *optimal_dcfclk = bw_from_dram / + (dcn3_03_soc.return_bus_width_bytes * (dcn3_03_soc.max_avg_sdp_bw_use_normal_percent / 100)); +} + +void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +{ + unsigned int i, j; + unsigned int num_states = 0; + + unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; + unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; + unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; + unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; + + unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200}; + unsigned int num_dcfclk_sta_targets = 4; + unsigned int num_uclk_states; + + + if (dc->ctx->dc_bios->vram_info.num_chans) + dcn3_03_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + + if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) + dcn3_03_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + + dcn3_03_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; + dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; + + if (bw_params->clk_table.entries[0].memclk_mhz) { + int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0; + + for (i = 0; i < MAX_NUM_DPM_LVL; i++) { + if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) + max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; + if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; + if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; + if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) + max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; + } + if (!max_dcfclk_mhz) + max_dcfclk_mhz = dcn3_03_soc.clock_limits[0].dcfclk_mhz; + if (!max_dispclk_mhz) + max_dispclk_mhz = dcn3_03_soc.clock_limits[0].dispclk_mhz; + if (!max_dppclk_mhz) + max_dppclk_mhz = dcn3_03_soc.clock_limits[0].dppclk_mhz; + if (!max_phyclk_mhz) + max_phyclk_mhz = dcn3_03_soc.clock_limits[0].phyclk_mhz; + + if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { + dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz; + num_dcfclk_sta_targets++; + } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { + for (i = 0; i < num_dcfclk_sta_targets; i++) { + if (dcfclk_sta_targets[i] > max_dcfclk_mhz) { + dcfclk_sta_targets[i] = max_dcfclk_mhz; + break; + } + } + /* Update size of array since we "removed" duplicates */ + num_dcfclk_sta_targets = i + 1; + } + + num_uclk_states = bw_params->clk_table.num_entries; + + /* Calculate optimal dcfclk for each uclk */ + for (i = 0; i < num_uclk_states; i++) { + dcn303_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, + &optimal_dcfclk_for_uclk[i], NULL); + if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) + optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; + } + + /* Calculate optimal uclk for each dcfclk sta target */ + for (i = 0; i < num_dcfclk_sta_targets; i++) { + for (j = 0; j < num_uclk_states; j++) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { + optimal_uclk_for_dcfclk_sta_targets[i] = + bw_params->clk_table.entries[j].memclk_mhz * 16; + break; + } + } + } + + i = 0; + j = 0; + /* create the final dcfclk and uclk table */ + while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { + dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; + } else { + if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; + dram_speed_mts[num_states++] = + bw_params->clk_table.entries[j++].memclk_mhz * 16; + } else { + j = num_uclk_states; + } + } + } + + while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { + dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; + } + + while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && + optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; + } + + dcn3_03_soc.num_states = num_states; + for (i = 0; i < dcn3_03_soc.num_states; i++) { + dcn3_03_soc.clock_limits[i].state = i; + dcn3_03_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; + dcn3_03_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i]; + dcn3_03_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i]; + + /* Fill all states with max values of all other clocks */ + dcn3_03_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; + dcn3_03_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz; + dcn3_03_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz; + dcn3_03_soc.clock_limits[i].dtbclk_mhz = dcn3_03_soc.clock_limits[0].dtbclk_mhz; + /* These clocks cannot come from bw_params, always fill from dcn3_03_soc[1] */ + /* FCLK, PHYCLK_D18, SOCCLK, DSCCLK */ + dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz; + dcn3_03_soc.clock_limits[i].socclk_mhz = dcn3_03_soc.clock_limits[0].socclk_mhz; + dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz; + } + /* re-init DML with updated bb */ + dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30); + if (dc->current_state) + dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30); + } +} + +static struct resource_funcs dcn303_res_pool_funcs = { + .destroy = dcn303_destroy_resource_pool, + .link_enc_create = dcn303_link_encoder_create, + .panel_cntl_create = dcn303_panel_cntl_create, + .validate_bandwidth = dcn30_validate_bandwidth, + .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, + .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, + .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, + .add_stream_to_ctx = dcn30_add_stream_to_ctx, + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, + .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, + .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, + .set_mcif_arb_params = dcn30_set_mcif_arb_params, + .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, + .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, + .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, + .update_bw_bounding_box = dcn303_update_bw_bounding_box, + .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, +}; + +static struct dc_cap_funcs cap_funcs = { + .get_dcc_compression_cap = dcn20_get_dcc_compression_cap +}; + +static const struct bios_registers bios_regs = { + NBIO_SR(BIOS_SCRATCH_3), + NBIO_SR(BIOS_SCRATCH_6) +}; + +static const struct dccg_registers dccg_regs = { + DCCG_REG_LIST_DCN3_03() +}; + +static const struct dccg_shift dccg_shift = { + DCCG_MASK_SH_LIST_DCN3_03(__SHIFT) +}; + +static const struct dccg_mask dccg_mask = { + DCCG_MASK_SH_LIST_DCN3_03(_MASK) +}; + +#define abm_regs(id)\ + [id] = { ABM_DCN301_REG_LIST(id) } + +static const struct dce_abm_registers abm_regs[] = { + abm_regs(0), + abm_regs(1) +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN30(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN30(_MASK) +}; + +static bool dcn303_resource_construct( + uint8_t num_virtual_links, + struct dc *dc, + struct resource_pool *pool) +{ + int i; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data init_data; + struct ddc_service_init_data ddc_init_data; + + ctx->dc_bios->regs = &bios_regs; + + pool->res_cap = &res_cap_dcn303; + + pool->funcs = &dcn303_res_pool_funcs; + + /************************************************* + * Resource + asic cap harcoding * + *************************************************/ + pool->underlay_pipe_index = NO_UNDERLAY_PIPE; + pool->pipe_count = pool->res_cap->num_timing_generator; + pool->mpcc_count = pool->res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 600; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by derfault*/ + dc->caps.max_cursor_size = 256; + dc->caps.min_horizontal_blanking_period = 80; + dc->caps.dmdata_alloc_size = 2048; +#if defined(CONFIG_DRM_AMD_DC_DCN) + dc->caps.mall_size_per_mem_channel = 4; + /* total size = mall per channel * num channels * 1024 * 1024 */ + dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * + dc->ctx->dc_bios->vram_info.num_chans * + 1024 * 1024; + dc->caps.cursor_cache_size = + dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; +#endif + dc->caps.max_slave_planes = 1; + dc->caps.post_blend_color_processing = true; + dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.extended_aux_timeout_support = true; + dc->caps.dmcub_support = true; + + /* Color pipeline capabilities */ + dc->caps.color.dpp.dcn_arch = 1; + dc->caps.color.dpp.input_lut_shared = 0; + dc->caps.color.dpp.icsc = 1; + dc->caps.color.dpp.dgam_ram = 0; // must use gamma_corr + dc->caps.color.dpp.dgam_rom_caps.srgb = 1; + dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1; + dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 1; + dc->caps.color.dpp.dgam_rom_caps.pq = 1; + dc->caps.color.dpp.dgam_rom_caps.hlg = 1; + dc->caps.color.dpp.post_csc = 1; + dc->caps.color.dpp.gamma_corr = 1; + dc->caps.color.dpp.dgam_rom_for_yuv = 0; + + dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.ogam_ram = 1; + // no OGAM ROM on DCN3 + dc->caps.color.dpp.ogam_rom_caps.srgb = 0; + dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; + dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.dpp.ogam_rom_caps.pq = 0; + dc->caps.color.dpp.ogam_rom_caps.hlg = 0; + dc->caps.color.dpp.ocsc = 0; + + dc->caps.color.mpc.gamut_remap = 1; + dc->caps.color.mpc.num_3dluts = pool->res_cap->num_mpc_3dlut; //3 + dc->caps.color.mpc.ogam_ram = 1; + dc->caps.color.mpc.ogam_rom_caps.srgb = 0; + dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0; + dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0; + dc->caps.color.mpc.ogam_rom_caps.pq = 0; + dc->caps.color.mpc.ogam_rom_caps.hlg = 0; + dc->caps.color.mpc.ocsc = 1; + + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) + dc->debug = debug_defaults_drv; + else + dc->debug = debug_defaults_diags; + + // Init the vm_helper + if (dc->vm_helper) + vm_helper_init(dc->vm_helper, 16); + + /************************************************* + * Create resources * + *************************************************/ + + /* Clock Sources for Pixel Clock*/ + pool->clock_sources[DCN303_CLK_SRC_PLL0] = + dcn303_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->clock_sources[DCN303_CLK_SRC_PLL1] = + dcn303_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + + pool->clk_src_count = DCN303_CLK_SRC_TOTAL; + + /* todo: not reuse phy_pll registers */ + pool->dp_clock_source = + dcn303_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->clk_src_count; i++) { + if (pool->clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* DCCG */ + pool->dccg = dccg30_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask); + if (pool->dccg == NULL) { + dm_error("DC: failed to create dccg!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* PP Lib and SMU interfaces */ + init_soc_bounding_box(dc, pool); + + /* DML */ + dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30); + + /* IRQ */ + init_data.ctx = dc->ctx; + pool->irqs = dal_irq_service_dcn303_create(&init_data); + if (!pool->irqs) + goto create_fail; + + /* HUBBUB */ + pool->hubbub = dcn303_hubbub_create(ctx); + if (pool->hubbub == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubbub!\n"); + goto create_fail; + } + + /* HUBPs, DPPs, OPPs and TGs */ + for (i = 0; i < pool->pipe_count; i++) { + pool->hubps[i] = dcn303_hubp_create(ctx, i); + if (pool->hubps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create hubps!\n"); + goto create_fail; + } + + pool->dpps[i] = dcn303_dpp_create(ctx, i); + if (pool->dpps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dpps!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->res_cap->num_opp; i++) { + pool->opps[i] = dcn303_opp_create(ctx, i); + if (pool->opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create output pixel processor!\n"); + goto create_fail; + } + } + + for (i = 0; i < pool->res_cap->num_timing_generator; i++) { + pool->timing_generators[i] = dcn303_timing_generator_create(ctx, i); + if (pool->timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + } + pool->timing_generator_count = i; + + /* PSR */ + pool->psr = dmub_psr_create(ctx); + if (pool->psr == NULL) { + dm_error("DC: failed to create psr!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + /* ABM */ + for (i = 0; i < pool->res_cap->num_timing_generator; i++) { + pool->multiple_abms[i] = dmub_abm_create(ctx, &abm_regs[i], &abm_shift, &abm_mask); + if (pool->multiple_abms[i] == NULL) { + dm_error("DC: failed to create abm for pipe %d!\n", i); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + } + + /* MPC and DSC */ + pool->mpc = dcn303_mpc_create(ctx, pool->mpcc_count, pool->res_cap->num_mpc_3dlut); + if (pool->mpc == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mpc!\n"); + goto create_fail; + } + + for (i = 0; i < pool->res_cap->num_dsc; i++) { + pool->dscs[i] = dcn303_dsc_create(ctx, i); + if (pool->dscs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create display stream compressor %d!\n", i); + goto create_fail; + } + } + + /* DWB and MMHUBBUB */ + if (!dcn303_dwbc_create(ctx, pool)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create dwbc!\n"); + goto create_fail; + } + + if (!dcn303_mmhubbub_create(ctx, pool)) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create mcif_wb!\n"); + goto create_fail; + } + + /* AUX and I2C */ + for (i = 0; i < pool->res_cap->num_ddc; i++) { + pool->engines[i] = dcn303_aux_engine_create(ctx, i); + if (pool->engines[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC:failed to create aux engine!!\n"); + goto create_fail; + } + pool->hw_i2cs[i] = dcn303_i2c_hw_create(ctx, i); + if (pool->hw_i2cs[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC:failed to create hw i2c!!\n"); + goto create_fail; + } + pool->sw_i2cs[i] = NULL; + } + + /* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */ + if (!resource_construct(num_virtual_links, dc, pool, + (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ? + &res_create_funcs : &res_create_maximus_funcs))) + goto create_fail; + + /* HW Sequencer and Plane caps */ + dcn303_hw_sequencer_construct(dc); + + dc->caps.max_planes = pool->pipe_count; + + for (i = 0; i < dc->caps.max_planes; ++i) + dc->caps.planes[i] = plane_cap; + + dc->cap_funcs = cap_funcs; + + if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { + ddc_init_data.ctx = dc->ctx; + ddc_init_data.link = NULL; + ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id; + ddc_init_data.id.enum_id = 0; + ddc_init_data.id.type = OBJECT_TYPE_GENERIC; + pool->oem_device = dal_ddc_service_create(&ddc_init_data); + } else { + pool->oem_device = NULL; + } + + return true; + +create_fail: + + dcn303_resource_destruct(pool); + + return false; +} + +struct resource_pool *dcn303_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc) +{ + struct resource_pool *pool = kzalloc(sizeof(struct resource_pool), GFP_KERNEL); + + if (!pool) + return NULL; + + if (dcn303_resource_construct(init_data->num_virtual_links, dc, pool)) + return pool; + + BREAK_TO_DEBUGGER(); + kfree(pool); + return NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h new file mode 100644 index 000000000000..5b590c169763 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Authors: AMD + */ + +#ifndef _DCN303_RESOURCE_H_ +#define _DCN303_RESOURCE_H_ + +#include "core_types.h" + +struct resource_pool *dcn303_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc); + +void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); + +#endif /* _DCN303_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 7617fab9e1f9..304d50d16d01 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -34,6 +34,8 @@ #include "dc.h" struct dp_mst_stream_allocation_table; +struct aux_payload; +enum aux_return_code_type; /* * Allocate memory accessible by the GPU @@ -158,6 +160,11 @@ void dm_set_dcn_clocks( struct dc_context *ctx, struct dc_clocks *clks); -bool dm_helpers_dmub_outbox0_interrupt_control(struct dc_context *ctx, bool enable); +bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable); +int dm_helper_dmub_aux_transfer_sync( + struct dc_context *ctx, + const struct dc_link *link, + struct aux_payload *payload, + enum aux_return_code_type *operation_result); #endif /* __DM_HELPERS__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c index 9729cf292e84..d3b5b6fedf04 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c @@ -2895,7 +2895,7 @@ static void dml20_DisplayPipeConfiguration(struct display_mode_lib *mode_lib) RoundedUpMaxSwathSizeBytesC = 0.0; if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC - <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) { + <= mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0) { mode_lib->vba.SwathHeightY[k] = MaximumSwathHeightY; mode_lib->vba.SwathHeightC[k] = MaximumSwathHeightC; } else { @@ -2904,17 +2904,17 @@ static void dml20_DisplayPipeConfiguration(struct display_mode_lib *mode_lib) } if (mode_lib->vba.SwathHeightC[k] == 0) { - mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte * 1024; + mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024; mode_lib->vba.DETBufferSizeC[k] = 0; } else if (mode_lib->vba.SwathHeightY[k] <= mode_lib->vba.SwathHeightC[k]) { - mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte + mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2; - mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte + mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2; } else { - mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte + mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 * 2 / 3; - mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte + mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 3; } } @@ -3819,7 +3819,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.MaximumSwathWidthInDETBuffer = dml_min( mode_lib->vba.MaximumSwathWidthSupport, - mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0 + mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0 / (locals->BytePerPixelInDETY[k] * locals->MinSwathHeightY[k] + locals->BytePerPixelInDETC[k] @@ -4322,7 +4322,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l locals->RoundedUpMaxSwathSizeBytesC = 0; } - if (locals->RoundedUpMaxSwathSizeBytesY + locals->RoundedUpMaxSwathSizeBytesC <= locals->DETBufferSizeInKByte * 1024 / 2) { + if (locals->RoundedUpMaxSwathSizeBytesY + locals->RoundedUpMaxSwathSizeBytesC <= locals->DETBufferSizeInKByte[0] * 1024 / 2) { locals->SwathHeightYPerState[i][j][k] = locals->MaxSwathHeightY[k]; locals->SwathHeightCPerState[i][j][k] = locals->MaxSwathHeightC[k]; } else { @@ -4331,15 +4331,15 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } if (locals->BytePerPixelInDETC[k] == 0) { - locals->LinesInDETLuma = locals->DETBufferSizeInKByte * 1024 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k]; + locals->LinesInDETLuma = locals->DETBufferSizeInKByte[0] * 1024 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k]; locals->LinesInDETChroma = 0; } else if (locals->SwathHeightYPerState[i][j][k] <= locals->SwathHeightCPerState[i][j][k]) { - locals->LinesInDETLuma = locals->DETBufferSizeInKByte * 1024 / 2 / locals->BytePerPixelInDETY[k] / + locals->LinesInDETLuma = locals->DETBufferSizeInKByte[0] * 1024 / 2 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k]; - locals->LinesInDETChroma = locals->DETBufferSizeInKByte * 1024 / 2 / locals->BytePerPixelInDETC[k] / (locals->SwathWidthYPerState[i][j][k] / 2); + locals->LinesInDETChroma = locals->DETBufferSizeInKByte[0] * 1024 / 2 / locals->BytePerPixelInDETC[k] / (locals->SwathWidthYPerState[i][j][k] / 2); } else { - locals->LinesInDETLuma = locals->DETBufferSizeInKByte * 1024 * 2 / 3 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k]; - locals->LinesInDETChroma = locals->DETBufferSizeInKByte * 1024 / 3 / locals->BytePerPixelInDETY[k] / (locals->SwathWidthYPerState[i][j][k] / 2); + locals->LinesInDETLuma = locals->DETBufferSizeInKByte[0] * 1024 * 2 / 3 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k]; + locals->LinesInDETChroma = locals->DETBufferSizeInKByte[0] * 1024 / 3 / locals->BytePerPixelInDETY[k] / (locals->SwathWidthYPerState[i][j][k] / 2); } locals->EffectiveLBLatencyHidingSourceLinesLuma = dml_min(locals->MaxLineBufferLines, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 51098c2c9854..fbed5304692d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -2968,7 +2968,7 @@ static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib) RoundedUpMaxSwathSizeBytesC = 0.0; if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC - <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) { + <= mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0) { mode_lib->vba.SwathHeightY[k] = MaximumSwathHeightY; mode_lib->vba.SwathHeightC[k] = MaximumSwathHeightC; } else { @@ -2977,17 +2977,17 @@ static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib) } if (mode_lib->vba.SwathHeightC[k] == 0) { - mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte * 1024; + mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024; mode_lib->vba.DETBufferSizeC[k] = 0; } else if (mode_lib->vba.SwathHeightY[k] <= mode_lib->vba.SwathHeightC[k]) { - mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte + mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2; - mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte + mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2; } else { - mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte + mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 * 2 / 3; - mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte + mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 3; } } @@ -3926,7 +3926,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.MaximumSwathWidthInDETBuffer = dml_min( mode_lib->vba.MaximumSwathWidthSupport, - mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0 + mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0 / (locals->BytePerPixelInDETY[k] * locals->MinSwathHeightY[k] + locals->BytePerPixelInDETC[k] @@ -4443,7 +4443,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode locals->RoundedUpMaxSwathSizeBytesC = 0; } - if (locals->RoundedUpMaxSwathSizeBytesY + locals->RoundedUpMaxSwathSizeBytesC <= locals->DETBufferSizeInKByte * 1024 / 2) { + if (locals->RoundedUpMaxSwathSizeBytesY + locals->RoundedUpMaxSwathSizeBytesC <= locals->DETBufferSizeInKByte[0] * 1024 / 2) { locals->SwathHeightYPerState[i][j][k] = locals->MaxSwathHeightY[k]; locals->SwathHeightCPerState[i][j][k] = locals->MaxSwathHeightC[k]; } else { @@ -4452,15 +4452,15 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode } if (locals->BytePerPixelInDETC[k] == 0) { - locals->LinesInDETLuma = locals->DETBufferSizeInKByte * 1024 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k]; + locals->LinesInDETLuma = locals->DETBufferSizeInKByte[0] * 1024 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k]; locals->LinesInDETChroma = 0; } else if (locals->SwathHeightYPerState[i][j][k] <= locals->SwathHeightCPerState[i][j][k]) { - locals->LinesInDETLuma = locals->DETBufferSizeInKByte * 1024 / 2 / locals->BytePerPixelInDETY[k] / + locals->LinesInDETLuma = locals->DETBufferSizeInKByte[0] * 1024 / 2 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k]; - locals->LinesInDETChroma = locals->DETBufferSizeInKByte * 1024 / 2 / locals->BytePerPixelInDETC[k] / (locals->SwathWidthYPerState[i][j][k] / 2); + locals->LinesInDETChroma = locals->DETBufferSizeInKByte[0] * 1024 / 2 / locals->BytePerPixelInDETC[k] / (locals->SwathWidthYPerState[i][j][k] / 2); } else { - locals->LinesInDETLuma = locals->DETBufferSizeInKByte * 1024 * 2 / 3 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k]; - locals->LinesInDETChroma = locals->DETBufferSizeInKByte * 1024 / 3 / locals->BytePerPixelInDETY[k] / (locals->SwathWidthYPerState[i][j][k] / 2); + locals->LinesInDETLuma = locals->DETBufferSizeInKByte[0] * 1024 * 2 / 3 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k]; + locals->LinesInDETChroma = locals->DETBufferSizeInKByte[0] * 1024 / 3 / locals->BytePerPixelInDETY[k] / (locals->SwathWidthYPerState[i][j][k] / 2); } locals->EffectiveLBLatencyHidingSourceLinesLuma = dml_min(locals->MaxLineBufferLines, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index 398210d1af34..c26e742e8137 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -148,7 +148,7 @@ static double CalculateDCCConfiguration( bool DCCProgrammingAssumesScanDirectionUnknown, unsigned int ViewportWidth, unsigned int ViewportHeight, - double DETBufferSize, + unsigned int DETBufferSize, unsigned int RequestHeight256Byte, unsigned int SwathHeight, enum dm_swizzle_mode TilingFormat, @@ -289,7 +289,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( unsigned int MaxLineBufferLines, unsigned int LineBufferSize, unsigned int DPPOutputBufferPixels, - double DETBufferSizeInKByte, + unsigned int DETBufferSizeInKByte, unsigned int WritebackInterfaceLumaBufferSize, unsigned int WritebackInterfaceChromaBufferSize, double DCFCLK, @@ -354,11 +354,11 @@ static void CalculateDCFCLKDeepSleep( double DPPCLK[], double *DCFCLKDeepSleep); static void CalculateDETBufferSize( - double DETBufferSizeInKByte, + unsigned int DETBufferSizeInKByte, unsigned int SwathHeightY, unsigned int SwathHeightC, - double *DETBufferSizeY, - double *DETBufferSizeC); + unsigned int *DETBufferSizeY, + unsigned int *DETBufferSizeC); static void CalculateUrgentBurstFactor( unsigned int DETBufferSizeInKByte, unsigned int SwathHeightY, @@ -1074,7 +1074,7 @@ static double CalculateDCCConfiguration( bool DCCProgrammingAssumesScanDirectionUnknown, unsigned int ViewportWidth, unsigned int ViewportHeight, - double DETBufferSize, + unsigned int DETBufferSize, unsigned int RequestHeight256Byte, unsigned int SwathHeight, enum dm_swizzle_mode TilingFormat, @@ -2246,7 +2246,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman } CalculateUrgentBurstFactor( - mode_lib->vba.DETBufferSizeInKByte, + mode_lib->vba.DETBufferSizeInKByte[0], mode_lib->vba.SwathHeightY[k], mode_lib->vba.SwathHeightC[k], locals->SwathWidthY[k], @@ -2415,7 +2415,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman mode_lib->vba.MaxLineBufferLines, mode_lib->vba.LineBufferSize, mode_lib->vba.DPPOutputBufferPixels, - mode_lib->vba.DETBufferSizeInKByte, + mode_lib->vba.DETBufferSizeInKByte[0], mode_lib->vba.WritebackInterfaceLumaBufferSize, mode_lib->vba.WritebackInterfaceChromaBufferSize, mode_lib->vba.DCFCLK, @@ -2588,7 +2588,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman false, // We should always know the direction DCCProgrammingAssumesScanDirectionUnknown, mode_lib->vba.ViewportWidth[k], mode_lib->vba.ViewportHeight[k], - mode_lib->vba.DETBufferSizeInKByte * 1024, + mode_lib->vba.DETBufferSizeInKByte[0] * 1024, locals->BlockHeight256BytesY[k], mode_lib->vba.SwathHeightY[k], mode_lib->vba.SurfaceTiling[k], @@ -2689,13 +2689,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman // Stutter Efficiency for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { CalculateDETBufferSize( - mode_lib->vba.DETBufferSizeInKByte, + mode_lib->vba.DETBufferSizeInKByte[0], mode_lib->vba.SwathHeightY[k], mode_lib->vba.SwathHeightC[k], &locals->DETBufferSizeY[k], &locals->DETBufferSizeC[k]); - locals->LinesInDETY[k] = locals->DETBufferSizeY[k] + locals->LinesInDETY[k] = (double)locals->DETBufferSizeY[k] / locals->BytePerPixelDETY[k] / locals->SwathWidthY[k]; locals->LinesInDETYRoundedDownToSwath[k] = dml_floor( locals->LinesInDETY[k], @@ -2984,7 +2984,7 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib) RoundedUpMaxSwathSizeBytesC = 0.0; if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC - <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) { + <= mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0) { mode_lib->vba.SwathHeightY[k] = MaximumSwathHeightY; mode_lib->vba.SwathHeightC[k] = MaximumSwathHeightC; } else { @@ -2993,7 +2993,7 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib) } CalculateDETBufferSize( - mode_lib->vba.DETBufferSizeInKByte, + mode_lib->vba.DETBufferSizeInKByte[0], mode_lib->vba.SwathHeightY[k], mode_lib->vba.SwathHeightC[k], &mode_lib->vba.DETBufferSizeY[k], @@ -3888,7 +3888,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.MaximumSwathWidthInDETBuffer = dml_min( mode_lib->vba.MaximumSwathWidthSupport, - mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0 + mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0 / (locals->BytePerPixelInDETY[k] * locals->MinSwathHeightY[k] + locals->BytePerPixelInDETC[k] @@ -4437,7 +4437,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.RoundedUpMaxSwathSizeBytesC = 0.0; } if (mode_lib->vba.RoundedUpMaxSwathSizeBytesY + mode_lib->vba.RoundedUpMaxSwathSizeBytesC - <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) { + <= mode_lib->vba.DETBufferSizeInKByte[0] * 1024.0 / 2.0) { locals->SwathHeightYThisState[k] = locals->MaxSwathHeightY[k]; locals->SwathHeightCThisState[k] = locals->MaxSwathHeightC[k]; } else { @@ -4801,7 +4801,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } CalculateUrgentBurstFactor( - mode_lib->vba.DETBufferSizeInKByte, + mode_lib->vba.DETBufferSizeInKByte[0], locals->SwathHeightYThisState[k], locals->SwathHeightCThisState[k], locals->SwathWidthYThisState[k], @@ -4975,7 +4975,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.MaxLineBufferLines, mode_lib->vba.LineBufferSize, mode_lib->vba.DPPOutputBufferPixels, - mode_lib->vba.DETBufferSizeInKByte, + mode_lib->vba.DETBufferSizeInKByte[0], mode_lib->vba.WritebackInterfaceLumaBufferSize, mode_lib->vba.WritebackInterfaceChromaBufferSize, mode_lib->vba.DCFCLKPerState[i], @@ -5230,7 +5230,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( unsigned int MaxLineBufferLines, unsigned int LineBufferSize, unsigned int DPPOutputBufferPixels, - double DETBufferSizeInKByte, + unsigned int DETBufferSizeInKByte, unsigned int WritebackInterfaceLumaBufferSize, unsigned int WritebackInterfaceChromaBufferSize, double DCFCLK, @@ -5285,8 +5285,8 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( double EffectiveLBLatencyHidingC; double DPPOutputBufferLinesY; double DPPOutputBufferLinesC; - double DETBufferSizeY; - double DETBufferSizeC; + unsigned int DETBufferSizeY; + unsigned int DETBufferSizeC; double LinesInDETY[DC__NUM_DPP__MAX]; double LinesInDETC; unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX]; @@ -5382,12 +5382,12 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( &DETBufferSizeY, &DETBufferSizeC); - LinesInDETY[k] = DETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k]; + LinesInDETY[k] = (double)DETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k]; LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]); FullDETBufferingTimeY[k] = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k]; if (BytePerPixelDETC[k] > 0) { - LinesInDETC = DETBufferSizeC / BytePerPixelDETC[k] / (SwathWidthY[k] / 2.0); + LinesInDETC = (double)DETBufferSizeC / BytePerPixelDETC[k] / (SwathWidthY[k] / 2.0); LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]); FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / (VRatio[k] / 2); @@ -5574,11 +5574,11 @@ static void CalculateDCFCLKDeepSleep( } static void CalculateDETBufferSize( - double DETBufferSizeInKByte, + unsigned int DETBufferSizeInKByte, unsigned int SwathHeightY, unsigned int SwathHeightC, - double *DETBufferSizeY, - double *DETBufferSizeC) + unsigned int *DETBufferSizeY, + unsigned int *DETBufferSizeC) { if (SwathHeightC == 0) { *DETBufferSizeY = DETBufferSizeInKByte * 1024; @@ -5625,8 +5625,8 @@ static void CalculateUrgentBurstFactor( double DETBufferSizeInTimeLumaPre; double DETBufferSizeInTimeChroma; double DETBufferSizeInTimeChromaPre; - double DETBufferSizeY; - double DETBufferSizeC; + unsigned int DETBufferSizeY; + unsigned int DETBufferSizeC; *NotEnoughUrgentLatencyHiding = 0; *NotEnoughUrgentLatencyHidingPre = 0; @@ -5663,7 +5663,7 @@ static void CalculateUrgentBurstFactor( &DETBufferSizeY, &DETBufferSizeC); - LinesInDETLuma = DETBufferSizeY / BytePerPixelInDETY / SwathWidthY; + LinesInDETLuma = (double)DETBufferSizeY / BytePerPixelInDETY / SwathWidthY; DETBufferSizeInTimeLuma = dml_floor(LinesInDETLuma, SwathHeightY) * LineTime / VRatio; if (DETBufferSizeInTimeLuma - UrgentLatency <= 0) { *NotEnoughUrgentLatencyHiding = 1; @@ -5687,7 +5687,7 @@ static void CalculateUrgentBurstFactor( } if (BytePerPixelInDETC > 0) { - LinesInDETChroma = DETBufferSizeC / BytePerPixelInDETC / (SwathWidthY / 2); + LinesInDETChroma = (double)DETBufferSizeC / BytePerPixelInDETC / (SwathWidthY / 2); DETBufferSizeInTimeChroma = dml_floor(LinesInDETChroma, SwathHeightC) * LineTime / (VRatio / 2); if (DETBufferSizeInTimeChroma - UrgentLatency <= 0) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index cb3f70a71b51..ec56210b6180 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -299,7 +299,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( unsigned int MaxLineBufferLines, unsigned int LineBufferSize, unsigned int DPPOutputBufferPixels, - double DETBufferSizeInKByte, + unsigned int DETBufferSizeInKByte, unsigned int WritebackInterfaceBufferSize, double DCFCLK, double ReturnBW, @@ -318,8 +318,8 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( unsigned int DPPPerPlane[], bool DCCEnable[], double DPPCLK[], - double DETBufferSizeY[], - double DETBufferSizeC[], + unsigned int DETBufferSizeY[], + unsigned int DETBufferSizeC[], unsigned int SwathHeightY[], unsigned int SwathHeightC[], unsigned int LBBitPerPixel[], @@ -570,7 +570,7 @@ static void CalculateStutterEfficiency( double SRExitTime, bool SynchronizedVBlank, int DPPPerPlane[], - double DETBufferSizeY[], + unsigned int DETBufferSizeY[], int BytePerPixelY[], double BytePerPixelDETY[], double SwathWidthY[], @@ -603,7 +603,7 @@ static void CalculateStutterEfficiency( static void CalculateSwathAndDETConfiguration( bool ForceSingleDPP, int NumberOfActivePlanes, - long DETBufferSizeInKByte, + unsigned int DETBufferSizeInKByte, double MaximumSwathWidthLuma[], double MaximumSwathWidthChroma[], enum scan_direction_class SourceScan[], @@ -635,8 +635,8 @@ static void CalculateSwathAndDETConfiguration( double SwathWidthChroma[], int SwathHeightY[], int SwathHeightC[], - double DETBufferSizeY[], - double DETBufferSizeC[], + unsigned int DETBufferSizeY[], + unsigned int DETBufferSizeC[], bool ViewportSizeSupportPerPlane[], bool *ViewportSizeSupport); static void CalculateSwathWidth( @@ -2613,7 +2613,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman CalculateUrgentBurstFactor( v->swath_width_luma_ub[k], v->swath_width_chroma_ub[k], - v->DETBufferSizeInKByte, + v->DETBufferSizeInKByte[0], v->SwathHeightY[k], v->SwathHeightC[k], v->HTotal[k] / v->PixelClock[k], @@ -2635,7 +2635,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman CalculateUrgentBurstFactor( v->swath_width_luma_ub[k], v->swath_width_chroma_ub[k], - v->DETBufferSizeInKByte, + v->DETBufferSizeInKByte[0], v->SwathHeightY[k], v->SwathHeightC[k], v->HTotal[k] / v->PixelClock[k], @@ -2808,7 +2808,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->MaxLineBufferLines, v->LineBufferSize, v->DPPOutputBufferPixels, - v->DETBufferSizeInKByte, + v->DETBufferSizeInKByte[0], v->WritebackInterfaceBufferSize, v->DCFCLK, v->ReturnBW, @@ -3027,7 +3027,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->SurfaceWidthC[k], v->SurfaceHeightY[k], v->SurfaceHeightC[k], - v->DETBufferSizeInKByte * 1024, + v->DETBufferSizeInKByte[0] * 1024, v->BlockHeight256BytesY[k], v->BlockHeight256BytesC[k], v->SurfaceTiling[k], @@ -3177,7 +3177,7 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib) CalculateSwathAndDETConfiguration( false, mode_lib->vba.NumberOfActivePlanes, - mode_lib->vba.DETBufferSizeInKByte, + mode_lib->vba.DETBufferSizeInKByte[0], dummy1, dummy2, mode_lib->vba.SourceScan, @@ -3911,7 +3911,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l CalculateSwathAndDETConfiguration( true, v->NumberOfActivePlanes, - v->DETBufferSizeInKByte, + v->DETBufferSizeInKByte[0], v->MaximumSwathWidthLuma, v->MaximumSwathWidthChroma, v->SourceScan, @@ -4399,7 +4399,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l CalculateSwathAndDETConfiguration( false, v->NumberOfActivePlanes, - v->DETBufferSizeInKByte, + v->DETBufferSizeInKByte[0], v->MaximumSwathWidthLuma, v->MaximumSwathWidthChroma, v->SourceScan, @@ -4622,7 +4622,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l CalculateUrgentBurstFactor( v->swath_width_luma_ub_this_state[k], v->swath_width_chroma_ub_this_state[k], - v->DETBufferSizeInKByte, + v->DETBufferSizeInKByte[0], v->SwathHeightYThisState[k], v->SwathHeightCThisState[k], v->HTotal[k] / v->PixelClock[k], @@ -5025,7 +5025,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l CalculateUrgentBurstFactor( v->swath_width_luma_ub_this_state[k], v->swath_width_chroma_ub_this_state[k], - v->DETBufferSizeInKByte, + v->DETBufferSizeInKByte[0], v->SwathHeightYThisState[k], v->SwathHeightCThisState[k], v->HTotal[k] / v->PixelClock[k], @@ -5197,7 +5197,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->MaxLineBufferLines, v->LineBufferSize, v->DPPOutputBufferPixels, - v->DETBufferSizeInKByte, + v->DETBufferSizeInKByte[0], v->WritebackInterfaceBufferSize, v->DCFCLKState[i][j], v->ReturnBWPerState[i][j], @@ -5369,7 +5369,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( unsigned int MaxLineBufferLines, unsigned int LineBufferSize, unsigned int DPPOutputBufferPixels, - double DETBufferSizeInKByte, + unsigned int DETBufferSizeInKByte, unsigned int WritebackInterfaceBufferSize, double DCFCLK, double ReturnBW, @@ -5388,8 +5388,8 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport( unsigned int DPPPerPlane[], bool DCCEnable[], double DPPCLK[], - double DETBufferSizeY[], - double DETBufferSizeC[], + unsigned int DETBufferSizeY[], + unsigned int DETBufferSizeC[], unsigned int SwathHeightY[], unsigned int SwathHeightC[], unsigned int LBBitPerPixel[], @@ -6126,7 +6126,7 @@ static void CalculateStutterEfficiency( double SRExitTime, bool SynchronizedVBlank, int DPPPerPlane[], - double DETBufferSizeY[], + unsigned int DETBufferSizeY[], int BytePerPixelY[], double BytePerPixelDETY[], double SwathWidthY[], @@ -6273,7 +6273,7 @@ static void CalculateStutterEfficiency( static void CalculateSwathAndDETConfiguration( bool ForceSingleDPP, int NumberOfActivePlanes, - long DETBufferSizeInKByte, + unsigned int DETBufferSizeInKByte, double MaximumSwathWidthLuma[], double MaximumSwathWidthChroma[], enum scan_direction_class SourceScan[], @@ -6305,8 +6305,8 @@ static void CalculateSwathAndDETConfiguration( double SwathWidthChroma[], int SwathHeightY[], int SwathHeightC[], - double DETBufferSizeY[], - double DETBufferSizeC[], + unsigned int DETBufferSizeY[], + unsigned int DETBufferSizeC[], bool ViewportSizeSupportPerPlane[], bool *ViewportSizeSupport) { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 2a967458065b..d764d784e279 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -299,7 +299,7 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib) mode_lib->vba.MaxDCHUBToPSCLThroughput = ip->max_dchub_pscl_bw_pix_per_clk; mode_lib->vba.MaxPSCLToLBThroughput = ip->max_pscl_lb_bw_pix_per_clk; mode_lib->vba.ROBBufferSizeInKByte = ip->rob_buffer_size_kbytes; - mode_lib->vba.DETBufferSizeInKByte = ip->det_buffer_size_kbytes; + mode_lib->vba.DETBufferSizeInKByte[0] = ip->det_buffer_size_kbytes; mode_lib->vba.PixelChunkSizeInKByte = ip->pixel_chunk_size_kbytes; mode_lib->vba.MetaChunkSize = ip->meta_chunk_size_kbytes; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 025aa5bd8ea0..86db86b7153e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -233,7 +233,7 @@ struct vba_vars_st { // IP Parameters // unsigned int ROBBufferSizeInKByte; - double DETBufferSizeInKByte; + unsigned int DETBufferSizeInKByte[DC__NUM_DPP__MAX]; double DETBufferSizeInTime; unsigned int DPPOutputBufferPixels; unsigned int OPPOutputBufferLines; @@ -351,8 +351,8 @@ struct vba_vars_st { // Intermediates/Informational bool ImmediateFlipSupport; - double DETBufferSizeY[DC__NUM_DPP__MAX]; - double DETBufferSizeC[DC__NUM_DPP__MAX]; + unsigned int DETBufferSizeY[DC__NUM_DPP__MAX]; + unsigned int DETBufferSizeC[DC__NUM_DPP__MAX]; unsigned int SwathHeightY[DC__NUM_DPP__MAX]; unsigned int SwathHeightC[DC__NUM_DPP__MAX]; unsigned int LBBitPerPixel[DC__NUM_DPP__MAX]; @@ -631,8 +631,8 @@ struct vba_vars_st { enum odm_combine_mode odm_combine_dummy[DC__NUM_DPP__MAX]; double dummy1[DC__NUM_DPP__MAX]; double dummy2[DC__NUM_DPP__MAX]; - double dummy3[DC__NUM_DPP__MAX]; - double dummy4[DC__NUM_DPP__MAX]; + unsigned int dummy3[DC__NUM_DPP__MAX]; + unsigned int dummy4[DC__NUM_DPP__MAX]; double dummy5; double dummy6; double dummy7[DC__NUM_DPP__MAX]; @@ -872,8 +872,8 @@ struct vba_vars_st { int PercentMarginOverMinimumRequiredDCFCLK; bool DynamicMetadataSupported[DC__VOLTAGE_STATES][2]; enum immediate_flip_requirement ImmediateFlipRequirement; - double DETBufferSizeYThisState[DC__NUM_DPP__MAX]; - double DETBufferSizeCThisState[DC__NUM_DPP__MAX]; + unsigned int DETBufferSizeYThisState[DC__NUM_DPP__MAX]; + unsigned int DETBufferSizeCThisState[DC__NUM_DPP__MAX]; bool NoUrgentLatencyHiding[DC__NUM_DPP__MAX]; bool NoUrgentLatencyHidingPre[DC__NUM_DPP__MAX]; int swath_width_luma_ub_this_state[DC__NUM_DPP__MAX]; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index be57088d185d..f403d8e84a8c 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -37,6 +37,8 @@ static uint32_t dsc_policy_max_target_bpp_limit = 16; /* default DSC policy enables DSC only when needed */ static bool dsc_policy_enable_dsc_when_not_needed; +static bool dsc_policy_disable_dsc_stream_overhead; + static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) { @@ -250,6 +252,7 @@ static bool intersect_dsc_caps( if (pixel_encoding == PIXEL_ENCODING_YCBCR422 || pixel_encoding == PIXEL_ENCODING_YCBCR420) dsc_common_caps->bpp_increment_div = min(dsc_common_caps->bpp_increment_div, (uint32_t)8); + dsc_common_caps->is_dp = dsc_sink_caps->is_dp; return true; } @@ -258,12 +261,63 @@ static inline uint32_t dsc_div_by_10_round_up(uint32_t value) return (value + 9) / 10; } +static struct fixed31_32 compute_dsc_max_bandwidth_overhead( + const struct dc_crtc_timing *timing, + const int num_slices_h, + const bool is_dp) +{ + struct fixed31_32 max_dsc_overhead; + struct fixed31_32 refresh_rate; + + if (dsc_policy_disable_dsc_stream_overhead || !is_dp) + return dc_fixpt_from_int(0); + + /* use target bpp that can take entire target bandwidth */ + refresh_rate = dc_fixpt_from_int(timing->pix_clk_100hz); + refresh_rate = dc_fixpt_div_int(refresh_rate, timing->h_total); + refresh_rate = dc_fixpt_div_int(refresh_rate, timing->v_total); + refresh_rate = dc_fixpt_mul_int(refresh_rate, 100); + + max_dsc_overhead = dc_fixpt_from_int(num_slices_h); + max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, timing->v_total); + max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, 256); + max_dsc_overhead = dc_fixpt_div_int(max_dsc_overhead, 1000); + max_dsc_overhead = dc_fixpt_mul(max_dsc_overhead, refresh_rate); + + return max_dsc_overhead; +} + +static uint32_t compute_bpp_x16_from_target_bandwidth( + const uint32_t bandwidth_in_kbps, + const struct dc_crtc_timing *timing, + const uint32_t num_slices_h, + const uint32_t bpp_increment_div, + const bool is_dp) +{ + struct fixed31_32 overhead_in_kbps; + struct fixed31_32 effective_bandwidth_in_kbps; + struct fixed31_32 bpp_x16; + + overhead_in_kbps = compute_dsc_max_bandwidth_overhead( + timing, num_slices_h, is_dp); + effective_bandwidth_in_kbps = dc_fixpt_from_int(bandwidth_in_kbps); + effective_bandwidth_in_kbps = dc_fixpt_sub(effective_bandwidth_in_kbps, + overhead_in_kbps); + bpp_x16 = dc_fixpt_mul_int(effective_bandwidth_in_kbps, 10); + bpp_x16 = dc_fixpt_div_int(bpp_x16, timing->pix_clk_100hz); + bpp_x16 = dc_fixpt_from_int(dc_fixpt_floor(dc_fixpt_mul_int(bpp_x16, bpp_increment_div))); + bpp_x16 = dc_fixpt_div_int(bpp_x16, bpp_increment_div); + bpp_x16 = dc_fixpt_mul_int(bpp_x16, 16); + return dc_fixpt_floor(bpp_x16); +} + /* Get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range, and timing's pixel clock * and uncompressed bandwidth. */ static void get_dsc_bandwidth_range( const uint32_t min_bpp_x16, const uint32_t max_bpp_x16, + const uint32_t num_slices_h, const struct dsc_enc_caps *dsc_caps, const struct dc_crtc_timing *timing, struct dc_dsc_bw_range *range) @@ -272,16 +326,21 @@ static void get_dsc_bandwidth_range( range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing); /* max dsc target bpp */ - range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing->pix_clk_100hz, max_bpp_x16); + range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing, + max_bpp_x16, num_slices_h, dsc_caps->is_dp); range->max_target_bpp_x16 = max_bpp_x16; if (range->max_kbps > range->stream_kbps) { /* max dsc target bpp is capped to native bandwidth */ range->max_kbps = range->stream_kbps; - range->max_target_bpp_x16 = calc_dsc_bpp_x16(range->stream_kbps, timing->pix_clk_100hz, dsc_caps->bpp_increment_div); + range->max_target_bpp_x16 = compute_bpp_x16_from_target_bandwidth( + range->max_kbps, timing, num_slices_h, + dsc_caps->bpp_increment_div, + dsc_caps->is_dp); } /* min dsc target bpp */ - range->min_kbps = dc_dsc_stream_bandwidth_in_kbps(timing->pix_clk_100hz, min_bpp_x16); + range->min_kbps = dc_dsc_stream_bandwidth_in_kbps(timing, + min_bpp_x16, num_slices_h, dsc_caps->is_dp); range->min_target_bpp_x16 = min_bpp_x16; if (range->min_kbps > range->max_kbps) { /* min dsc target bpp is capped to max dsc bandwidth*/ @@ -290,7 +349,6 @@ static void get_dsc_bandwidth_range( } } - /* Decides if DSC should be used and calculates target bpp if it should, applying DSC policy. * * Returns: @@ -303,6 +361,7 @@ static bool decide_dsc_target_bpp_x16( const struct dsc_enc_caps *dsc_common_caps, const int target_bandwidth_kbps, const struct dc_crtc_timing *timing, + const int num_slices_h, int *target_bpp_x16) { bool should_use_dsc = false; @@ -311,7 +370,7 @@ static bool decide_dsc_target_bpp_x16( memset(&range, 0, sizeof(range)); get_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16, - dsc_common_caps, timing, &range); + num_slices_h, dsc_common_caps, timing, &range); if (!policy->enable_dsc_when_not_needed && target_bandwidth_kbps >= range.stream_kbps) { /* enough bandwidth without dsc */ *target_bpp_x16 = 0; @@ -327,7 +386,10 @@ static bool decide_dsc_target_bpp_x16( should_use_dsc = true; } else if (target_bandwidth_kbps >= range.min_kbps) { /* use target bpp that can take entire target bandwidth */ - *target_bpp_x16 = calc_dsc_bpp_x16(target_bandwidth_kbps, timing->pix_clk_100hz, dsc_common_caps->bpp_increment_div); + *target_bpp_x16 = compute_bpp_x16_from_target_bandwidth( + target_bandwidth_kbps, timing, num_slices_h, + dsc_common_caps->bpp_increment_div, + dsc_common_caps->is_dp); should_use_dsc = true; } else { /* not enough bandwidth to fulfill minimum requirement */ @@ -531,18 +593,6 @@ static bool setup_dsc_config( if (!is_dsc_possible) goto done; - if (target_bandwidth_kbps > 0) { - is_dsc_possible = decide_dsc_target_bpp_x16( - &policy, - &dsc_common_caps, - target_bandwidth_kbps, - timing, - &target_bpp); - dsc_cfg->bits_per_pixel = target_bpp; - } - if (!is_dsc_possible) - goto done; - sink_per_slice_throughput_mps = 0; // Validate available DSC settings against the mode timing @@ -690,12 +740,26 @@ static bool setup_dsc_config( dsc_cfg->num_slices_v = pic_height/slice_height; + if (target_bandwidth_kbps > 0) { + is_dsc_possible = decide_dsc_target_bpp_x16( + &policy, + &dsc_common_caps, + target_bandwidth_kbps, + timing, + num_slices_h, + &target_bpp); + dsc_cfg->bits_per_pixel = target_bpp; + } + if (!is_dsc_possible) + goto done; + // Final decission: can we do DSC or not? if (is_dsc_possible) { // Fill out the rest of DSC settings dsc_cfg->block_pred_enable = dsc_common_caps.is_block_pred_supported; dsc_cfg->linebuf_depth = dsc_common_caps.lb_bit_depth; dsc_cfg->version_minor = (dsc_common_caps.dsc_version & 0xf0) >> 4; + dsc_cfg->is_dp = dsc_sink_caps->is_dp; } done: @@ -806,6 +870,7 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_da dsc_sink_caps->branch_max_line_width = dpcd_dsc_branch_decoder_caps[DP_DSC_BRANCH_MAX_LINE_WIDTH - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0] * 320; ASSERT(dsc_sink_caps->branch_max_line_width == 0 || dsc_sink_caps->branch_max_line_width >= 5120); + dsc_sink_caps->is_dp = true; return true; } @@ -838,7 +903,8 @@ bool dc_dsc_compute_bandwidth_range( dsc_min_slice_height_override, max_bpp_x16, &config); if (is_dsc_possible) - get_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, &dsc_common_caps, timing, range); + get_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, + config.num_slices_h, &dsc_common_caps, timing, range); return is_dsc_possible; } @@ -864,13 +930,20 @@ bool dc_dsc_compute_config( return is_dsc_possible; } -uint32_t dc_dsc_stream_bandwidth_in_kbps(uint32_t pix_clk_100hz, uint32_t bpp_x16) +uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing, + uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp) { - struct fixed31_32 link_bw_kbps; - link_bw_kbps = dc_fixpt_from_int(pix_clk_100hz); - link_bw_kbps = dc_fixpt_div_int(link_bw_kbps, 160); - link_bw_kbps = dc_fixpt_mul_int(link_bw_kbps, bpp_x16); - return dc_fixpt_ceil(link_bw_kbps); + struct fixed31_32 overhead_in_kbps; + struct fixed31_32 bpp; + struct fixed31_32 actual_bandwidth_in_kbps; + + overhead_in_kbps = compute_dsc_max_bandwidth_overhead( + timing, num_slices_h, is_dp); + bpp = dc_fixpt_from_fraction(bpp_x16, 16); + actual_bandwidth_in_kbps = dc_fixpt_from_fraction(timing->pix_clk_100hz, 10); + actual_bandwidth_in_kbps = dc_fixpt_mul(actual_bandwidth_in_kbps, bpp); + actual_bandwidth_in_kbps = dc_fixpt_add(actual_bandwidth_in_kbps, overhead_in_kbps); + return dc_fixpt_ceil(actual_bandwidth_in_kbps); } void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, uint32_t max_target_bpp_limit_override_x16, struct dc_dsc_policy *policy) @@ -954,3 +1027,8 @@ void dc_dsc_policy_set_enable_dsc_when_not_needed(bool enable) { dsc_policy_enable_dsc_when_not_needed = enable; } + +void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable) +{ + dsc_policy_disable_dsc_stream_overhead = disable; +} diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index c6a1cd80aeae..7b294f637881 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -284,26 +284,6 @@ static u32 _do_bytes_per_pixel_calc(int slice_width, u16 drm_bpp, return bytes_per_pixel; } -static u32 _do_calc_dsc_bpp_x16(u32 stream_bandwidth_kbps, u32 pix_clk_100hz, - u32 bpp_increment_div) -{ - u32 dsc_target_bpp_x16; - float f_dsc_target_bpp; - float f_stream_bandwidth_100bps; - // bpp_increment_div is actually precision - u32 precision = bpp_increment_div; - - f_stream_bandwidth_100bps = stream_bandwidth_kbps * 10.0f; - f_dsc_target_bpp = f_stream_bandwidth_100bps / pix_clk_100hz; - - // Round down to the nearest precision stop to bring it into DSC spec - // range - dsc_target_bpp_x16 = (u32)(f_dsc_target_bpp * precision); - dsc_target_bpp_x16 = (dsc_target_bpp_x16 * 16) / precision; - - return dsc_target_bpp_x16; -} - /** * calc_rc_params - reads the user's cmdline mode * @rc: DC internal DSC parameters @@ -367,26 +347,3 @@ u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps) DC_FP_END(); return ret; } - -/** - * calc_dsc_bpp_x16 - retrieve the dsc bits per pixel - * @stream_bandwidth_kbps: - * @pix_clk_100hz: - * @bpp_increment_div: - * - * Calculate the total of bits per pixel for DSC configuration. - * - * @note This calculation requires float point operation, most of it executes - * under kernel_fpu_{begin,end}. - */ -u32 calc_dsc_bpp_x16(u32 stream_bandwidth_kbps, u32 pix_clk_100hz, - u32 bpp_increment_div) -{ - u32 dsc_bpp; - - DC_FP_START(); - dsc_bpp = _do_calc_dsc_bpp_x16(stream_bandwidth_kbps, pix_clk_100hz, - bpp_increment_div); - DC_FP_END(); - return dsc_bpp; -} diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h index 8123827840c5..262f06afcbf9 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h @@ -79,8 +79,6 @@ typedef struct qp_entry qp_table[]; void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps); u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps); -u32 calc_dsc_bpp_x16(u32 stream_bandwidth_kbps, u32 pix_clk_100hz, - u32 bpp_increment_div); #endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index 92c65d2fa7d7..5aa714e831dd 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -111,6 +111,7 @@ bool dal_hw_factory_init( case DCN_VERSION_3_0: case DCN_VERSION_3_01: case DCN_VERSION_3_02: + case DCN_VERSION_3_03: dal_hw_factory_dcn30_init(factory); return true; #endif diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index da9499c09a11..199a9dd0e0e3 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -106,6 +106,7 @@ bool dal_hw_translate_init( case DCN_VERSION_3_0: case DCN_VERSION_3_01: case DCN_VERSION_3_02: + case DCN_VERSION_3_03: dal_hw_translate_dcn30_init(translate); return true; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 3ae05c96d557..ffc3f2c63db8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -65,7 +65,8 @@ bool perform_link_training_with_retries( bool skip_video_pattern, int attempts, struct pipe_ctx *pipe_ctx, - enum signal_type signal); + enum signal_type signal, + bool do_fallback); bool is_mst_supported(struct dc_link *link); @@ -75,6 +76,8 @@ void detect_edp_sink_caps(struct dc_link *link); bool is_dp_active_dongle(const struct dc_link *link); +bool is_dp_branch_device(const struct dc_link *link); + bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing); void dp_enable_mst_on_sink(struct dc_link *link, bool enable); @@ -94,5 +97,13 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx); bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable); +/* Convert PHY repeater count read from DPCD uint8_t. */ +uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count); + +/* Check DPCD training status registers to detect link loss. */ +enum link_training_result dp_check_link_loss_status( + struct dc_link *link, + const struct link_training_settings *link_training_setting); +enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings); #endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index ddbe4bb52724..00fc81431b43 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -32,6 +32,12 @@ struct dpp { const struct dpp_funcs *funcs; struct dc_context *ctx; + /** + * @inst: + * + * inst stands for "instance," and it is an id number that references a + * specific DPP. + */ int inst; struct dpp_caps *caps; struct pwl_params regamma_params; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index f520e13aee4c..f94135c6e3c2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -88,6 +88,7 @@ struct dsc_enc_caps { int32_t max_total_throughput_mps; /* Maximum total throughput with all the slices combined */ int32_t max_slice_width; uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */ + bool is_dp; }; struct dsc_funcs { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 75c77ad9cbfe..640bb432bd6a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -363,6 +363,9 @@ struct mpc_funcs { struct mpc *mpc, int opp_id); + void (*set_bg_color)(struct mpc *mpc, + struct tg_color *bg_color, + int mpcc_id); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 1d5853c95448..43284d410687 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -235,6 +235,10 @@ struct hw_sequencer_funcs { enum dc_color_depth color_depth, const struct tg_color *solid_color, int width, int height, int offset); + void (*update_visual_confirm_color)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct tg_color *color, + int mpcc_id); }; void color_space_to_black_color( diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h index 7d36e55f3097..883dd8733ea4 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h @@ -81,6 +81,11 @@ struct dc_link *link_enc_cfg_get_link_using_link_enc( /* Return DIG link encoder used by link. NULL if unused. */ struct link_encoder *link_enc_cfg_get_link_enc_used_by_link( struct dc_state *state, - struct dc_link *link); + const struct dc_link *link); + +/* Return next available DIG link encoder. NULL if none available. */ +struct link_encoder *link_enc_cfg_get_next_avail_link_enc( + const struct dc *dc, + const struct dc_state *state); #endif /* DC_INC_LINK_ENC_CFG_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile index ca8168726324..e9fabb85741f 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/Makefile +++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile @@ -109,4 +109,12 @@ IRQ_DCN3_02 = irq_service_dcn302.o AMD_DAL_IRQ_DCN3_02 = $(addprefix $(AMDDALPATH)/dc/irq/dcn302/,$(IRQ_DCN3_02)) AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN3_02) +############################################################################### +# DCN 3_03 +############################################################################### +IRQ_DCN3_03 = irq_service_dcn303.o + +AMD_DAL_IRQ_DCN3_03 = $(addprefix $(AMDDALPATH)/dc/irq/dcn303/,$(IRQ_DCN3_03)) + +AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN3_03) endif diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 1a5be2792055..ed54e1c819be 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -58,8 +58,8 @@ enum dc_irq_source to_dal_irq_source_dcn21( return DC_IRQ_SOURCE_VBLANK5; case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP: return DC_IRQ_SOURCE_VBLANK6; - case DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT: - return DC_IRQ_SOURCE_DMCUB_OUTBOX0; + case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT: + return DC_IRQ_SOURCE_DMCUB_OUTBOX; case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL: return DC_IRQ_SOURCE_DC1_VLINE0; case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL: @@ -187,7 +187,7 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { .ack = NULL }; -static const struct irq_source_info_funcs dmub_trace_irq_info_funcs = { +static const struct irq_source_info_funcs dmub_outbox_irq_info_funcs = { .set = NULL, .ack = NULL }; @@ -301,11 +301,11 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = { .funcs = &vline0_irq_info_funcs\ } -#define dmub_trace_int_entry()\ - [DC_IRQ_SOURCE_DMCUB_OUTBOX0] = {\ - IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX0_READY_INT_EN,\ - DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX0_READY_INT_ACK),\ - .funcs = &dmub_trace_irq_info_funcs\ +#define dmub_outbox_int_entry()\ + [DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\ + IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\ + DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\ + .funcs = &dmub_outbox_irq_info_funcs\ } #define dummy_irq_entry() \ @@ -426,7 +426,7 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = { vline0_int_entry(3), vline0_int_entry(4), vline0_int_entry(5), - dmub_trace_int_entry(), + dmub_outbox_int_entry(), }; static const struct irq_service_funcs irq_service_funcs_dcn21 = { diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c new file mode 100644 index 000000000000..66e60762388e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Authors: AMD + */ + +#include "dm_services.h" +#include "irq_service_dcn303.h" +#include "../dce110/irq_service_dce110.h" + +#include "sienna_cichlid_ip_offset.h" +#include "dcn/dcn_3_0_3_offset.h" +#include "dcn/dcn_3_0_3_sh_mask.h" + +#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" + +static enum dc_irq_source to_dal_irq_source_dcn303(struct irq_service *irq_service, + uint32_t src_id, + uint32_t ext_id) +{ + switch (src_id) { + case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK1; + case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP: + return DC_IRQ_SOURCE_VBLANK2; + case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP1; + case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT: + return DC_IRQ_SOURCE_PFLIP2; + case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE1; + case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT: + return DC_IRQ_SOURCE_VUPDATE2; + + case DCN_1_0__SRCID__DC_HPD1_INT: + /* generic src_id for all HPD and HPDRX interrupts */ + switch (ext_id) { + case DCN_1_0__CTXID__DC_HPD1_INT: + return DC_IRQ_SOURCE_HPD1; + case DCN_1_0__CTXID__DC_HPD2_INT: + return DC_IRQ_SOURCE_HPD2; + case DCN_1_0__CTXID__DC_HPD1_RX_INT: + return DC_IRQ_SOURCE_HPD1RX; + case DCN_1_0__CTXID__DC_HPD2_RX_INT: + return DC_IRQ_SOURCE_HPD2RX; + default: + return DC_IRQ_SOURCE_INVALID; + } + break; + + default: + return DC_IRQ_SOURCE_INVALID; + } +} + +static bool hpd_ack(struct irq_service *irq_service, const struct irq_source_info *info) +{ + uint32_t addr = info->status_reg; + uint32_t value = dm_read_reg(irq_service->ctx, addr); + uint32_t current_status = get_reg_field_value(value, HPD0_DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED); + + dal_irq_service_ack_generic(irq_service, info); + + value = dm_read_reg(irq_service->ctx, info->enable_reg); + + set_reg_field_value(value, current_status ? 0 : 1, HPD0_DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY); + + dm_write_reg(irq_service->ctx, info->enable_reg, value); + + return true; +} + +static const struct irq_source_info_funcs hpd_irq_info_funcs = { + .set = NULL, + .ack = hpd_ack +}; + +static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static const struct irq_source_info_funcs pflip_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +static const struct irq_source_info_funcs vblank_irq_info_funcs = { + .set = NULL, + .ack = NULL +}; + +#undef BASE_INNER +#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg + +/* compile time expand base address. */ +#define BASE(seg) BASE_INNER(seg) + +#define SRI(reg_name, block, id)\ + BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ + mm ## block ## id ## _ ## reg_name + + +#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\ + .enable_reg = SRI(reg1, block, reg_num),\ + .enable_mask = block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ + .enable_value = {\ + block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ + ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ + },\ + .ack_reg = SRI(reg2, block, reg_num),\ + .ack_mask = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\ + .ack_value = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \ + + + +#define hpd_int_entry(reg_num)\ + [DC_IRQ_SOURCE_HPD1 + reg_num] = {\ + IRQ_REG_ENTRY(HPD, reg_num,\ + DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\ + DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\ + .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\ + .funcs = &hpd_irq_info_funcs\ +} + +#define hpd_rx_int_entry(reg_num)\ + [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\ + IRQ_REG_ENTRY(HPD, reg_num,\ + DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\ + DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\ + .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\ + .funcs = &hpd_rx_irq_info_funcs\ +} +#define pflip_int_entry(reg_num)\ + [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\ + IRQ_REG_ENTRY(HUBPREQ, reg_num,\ + DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\ + DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\ + .funcs = &pflip_irq_info_funcs\ +} + +/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic + * of DCE's DC_IRQ_SOURCE_VUPDATEx. + */ +#define vupdate_no_lock_int_entry(reg_num)\ + [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\ + OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\ + .funcs = &vupdate_no_lock_irq_info_funcs\ + } + +#define vblank_int_entry(reg_num)\ + [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\ + IRQ_REG_ENTRY(OTG, reg_num,\ + OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\ + OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\ + .funcs = &vblank_irq_info_funcs\ + } + +#define dummy_irq_entry() { .funcs = &dummy_irq_info_funcs } + +#define i2c_int_entry(reg_num) \ + [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry() + +#define dp_sink_int_entry(reg_num) \ + [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry() + +#define gpio_pad_int_entry(reg_num) \ + [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry() + +#define dc_underflow_int_entry(reg_num) \ + [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry() + +static const struct irq_source_info_funcs dummy_irq_info_funcs = { + .set = dal_irq_service_dummy_set, + .ack = dal_irq_service_dummy_ack +}; + +static const struct irq_source_info irq_source_info_dcn303[DAL_IRQ_SOURCES_NUMBER] = { + [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(), + hpd_int_entry(0), + hpd_int_entry(1), + hpd_rx_int_entry(0), + hpd_rx_int_entry(1), + i2c_int_entry(1), + i2c_int_entry(2), + dp_sink_int_entry(1), + dp_sink_int_entry(2), + [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(), + pflip_int_entry(0), + pflip_int_entry(1), + [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(), + gpio_pad_int_entry(0), + gpio_pad_int_entry(1), + gpio_pad_int_entry(2), + gpio_pad_int_entry(3), + gpio_pad_int_entry(4), + gpio_pad_int_entry(5), + gpio_pad_int_entry(6), + gpio_pad_int_entry(7), + gpio_pad_int_entry(8), + gpio_pad_int_entry(9), + gpio_pad_int_entry(10), + gpio_pad_int_entry(11), + gpio_pad_int_entry(12), + gpio_pad_int_entry(13), + gpio_pad_int_entry(14), + gpio_pad_int_entry(15), + gpio_pad_int_entry(16), + gpio_pad_int_entry(17), + gpio_pad_int_entry(18), + gpio_pad_int_entry(19), + gpio_pad_int_entry(20), + gpio_pad_int_entry(21), + gpio_pad_int_entry(22), + gpio_pad_int_entry(23), + gpio_pad_int_entry(24), + gpio_pad_int_entry(25), + gpio_pad_int_entry(26), + gpio_pad_int_entry(27), + gpio_pad_int_entry(28), + gpio_pad_int_entry(29), + gpio_pad_int_entry(30), + dc_underflow_int_entry(1), + dc_underflow_int_entry(2), + [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(), + [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(), + vupdate_no_lock_int_entry(0), + vupdate_no_lock_int_entry(1), + vblank_int_entry(0), + vblank_int_entry(1), +}; + +static const struct irq_service_funcs irq_service_funcs_dcn303 = { + .to_dal_irq_source = to_dal_irq_source_dcn303 +}; + +static void dcn303_irq_construct(struct irq_service *irq_service, struct irq_service_init_data *init_data) +{ + dal_irq_service_construct(irq_service, init_data); + + irq_service->info = irq_source_info_dcn303; + irq_service->funcs = &irq_service_funcs_dcn303; +} + +struct irq_service *dal_irq_service_dcn303_create(struct irq_service_init_data *init_data) +{ + struct irq_service *irq_service = kzalloc(sizeof(*irq_service), GFP_KERNEL); + + if (!irq_service) + return NULL; + + dcn303_irq_construct(irq_service, init_data); + return irq_service; +} diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.h b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.h new file mode 100644 index 000000000000..fd64e3848ff3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.h @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2021 Advanced Micro Devices, Inc. + * + * Authors: AMD + */ + +#ifndef __DAL_IRQ_SERVICE_DCN303_H__ +#define __DAL_IRQ_SERVICE_DCN303_H__ + +#include "../irq_service.h" + +struct irq_service *dal_irq_service_dcn303_create(struct irq_service_init_data *init_data); + +#endif /* __DAL_IRQ_SERVICE_DCN303_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h index ae8f47ec0f8c..5f9346622301 100644 --- a/drivers/gpu/drm/amd/display/dc/irq_types.h +++ b/drivers/gpu/drm/amd/display/dc/irq_types.h @@ -150,7 +150,7 @@ enum dc_irq_source { DC_IRQ_SOURCE_DC4_VLINE1, DC_IRQ_SOURCE_DC5_VLINE1, DC_IRQ_SOURCE_DC6_VLINE1, - DC_IRQ_DMCUB_OUTBOX1, + DC_IRQ_SOURCE_DMCUB_OUTBOX, DC_IRQ_SOURCE_DMCUB_OUTBOX0, DAL_IRQ_SOURCES_NUMBER |