diff options
author | Dave Airlie <airlied@redhat.com> | 2021-09-28 17:08:21 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2021-09-28 17:08:26 +1000 |
commit | 1e3944578b749449bd7fa6bf0bae4c3d3f5f1733 (patch) | |
tree | d9f2f1573b23988a1e105905ecc5777a80810206 /drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |
parent | f602a96e025272d237a61df455b12893aa782d33 (diff) | |
parent | 2485e2753ec896b169526e3ef7988589d1c458f5 (diff) | |
download | linux-1e3944578b749449bd7fa6bf0bae4c3d3f5f1733.tar.bz2 |
Merge tag 'amd-drm-next-5.16-2021-09-27' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-5.16-2021-09-27:
amdgpu:
- RAS improvements
- BACO fixes
- Yellow Carp updates
- Misc code cleanups
- Initial DP 2.0 support
- VCN priority handling
- Cyan Skillfish updates
- Rework IB handling for multimedia engine tests
- Backlight fixes
- DCN 3.1 power saving improvements
- Runtime PM fixes
- Modifier support for DCC image stores for gfx 10.3
- Hotplug fixes
- Clean up stack related warnings in display code
- DP alt mode fixes
- Display rework for better handling FP code
- Debugfs fixes
amdkfd:
- SVM fixes
- DMA map fixes
radeon:
- AGP fix
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210927212653.4575-1-alexander.deucher@amd.com
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c')
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 459 |
1 files changed, 400 insertions, 59 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 66c799f5c7cf..07adac1a8c42 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -215,6 +215,8 @@ static void handle_cursor_update(struct drm_plane *plane, static const struct drm_format_info * amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); +static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); + static bool is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state); @@ -618,6 +620,116 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) } #endif +/** + * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command. + * @adev: amdgpu_device pointer + * @notify: dmub notification structure + * + * Dmub AUX or SET_CONFIG command completion processing callback + * Copies dmub notification to DM which is to be read by AUX command. + * issuing thread and also signals the event to wake up the thread. + */ +void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify) +{ + if (adev->dm.dmub_notify) + memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); + if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) + complete(&adev->dm.dmub_aux_transfer_done); +} + +/** + * dmub_hpd_callback - DMUB HPD interrupt processing callback. + * @adev: amdgpu_device pointer + * @notify: dmub notification structure + * + * Dmub Hpd interrupt processing callback. Gets displayindex through the + * ink index and calls helper to do the processing. + */ +void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify) +{ + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + struct drm_connector_list_iter iter; + struct dc_link *link; + uint8_t link_index = 0; + struct drm_device *dev = adev->dm.ddev; + + if (adev == NULL) + return; + + if (notify == NULL) { + DRM_ERROR("DMUB HPD callback notification was NULL"); + return; + } + + if (notify->link_index > adev->dm.dc->link_count) { + DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); + return; + } + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + link_index = notify->link_index; + + link = adev->dm.dc->links[link_index]; + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { + aconnector = to_amdgpu_dm_connector(connector); + if (link && aconnector->dc_link == link) { + DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); + handle_hpd_irq_helper(aconnector); + break; + } + } + drm_connector_list_iter_end(&iter); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + +} + +/** + * register_dmub_notify_callback - Sets callback for DMUB notify + * @adev: amdgpu_device pointer + * @type: Type of dmub notification + * @callback: Dmub interrupt callback function + * @dmub_int_thread_offload: offload indicator + * + * API to register a dmub callback handler for a dmub notification + * Also sets indicator whether callback processing to be offloaded. + * to dmub interrupt handling thread + * Return: true if successfully registered, false if there is existing registration + */ +bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type, +dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload) +{ + if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { + adev->dm.dmub_callback[type] = callback; + adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; + } else + return false; + + return true; +} + +static void dm_handle_hpd_work(struct work_struct *work) +{ + struct dmub_hpd_work *dmub_hpd_wrk; + + dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); + + if (!dmub_hpd_wrk->dmub_notify) { + DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); + return; + } + + if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { + dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, + dmub_hpd_wrk->dmub_notify); + } + kfree(dmub_hpd_wrk); + +} + #define DMUB_TRACE_MAX_READ 64 /** * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt @@ -634,18 +746,33 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) struct amdgpu_display_manager *dm = &adev->dm; struct dmcub_trace_buf_entry entry = { 0 }; uint32_t count = 0; + struct dmub_hpd_work *dmub_hpd_wrk; if (dc_enable_dmub_notifications(adev->dm.dc)) { + dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); + if (!dmub_hpd_wrk) { + DRM_ERROR("Failed to allocate dmub_hpd_wrk"); + return; + } + INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); + if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { do { dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); - } while (notify.pending_notification); + if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) { + DRM_ERROR("DM: notify type %d larger than the array size %zu!", notify.type, + ARRAY_SIZE(dm->dmub_thread_offload)); + continue; + } + if (dm->dmub_thread_offload[notify.type] == true) { + dmub_hpd_wrk->dmub_notify = ¬ify; + dmub_hpd_wrk->adev = adev; + queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); + } else { + dm->dmub_callback[notify.type](adev, ¬ify); + } - if (adev->dm.dmub_notify) - memcpy(adev->dm.dmub_notify, ¬ify, sizeof(struct dmub_notification)); - if (notify.type == DMUB_NOTIFICATION_AUX_REPLY) - complete(&adev->dm.dmub_aux_transfer_done); - // TODO : HPD Implementation + } while (notify.pending_notification); } else { DRM_ERROR("DM: Failed to receive correct outbox IRQ !"); @@ -1083,6 +1210,83 @@ static void vblank_control_worker(struct work_struct *work) } #endif + +static void dm_handle_hpd_rx_offload_work(struct work_struct *work) +{ + struct hpd_rx_irq_offload_work *offload_work; + struct amdgpu_dm_connector *aconnector; + struct dc_link *dc_link; + struct amdgpu_device *adev; + enum dc_connection_type new_connection_type = dc_connection_none; + unsigned long flags; + + offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); + aconnector = offload_work->offload_wq->aconnector; + + if (!aconnector) { + DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); + goto skip; + } + + adev = drm_to_adev(aconnector->base.dev); + dc_link = aconnector->dc_link; + + mutex_lock(&aconnector->hpd_lock); + if (!dc_link_detect_sink(dc_link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + mutex_unlock(&aconnector->hpd_lock); + + if (new_connection_type == dc_connection_none) + goto skip; + + if (amdgpu_in_reset(adev)) + goto skip; + + mutex_lock(&adev->dm.dc_lock); + if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) + dc_link_dp_handle_automated_test(dc_link); + else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && + hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) && + dc_link_dp_allow_hpd_rx_irq(dc_link)) { + dc_link_dp_handle_link_loss(dc_link); + spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); + offload_work->offload_wq->is_handling_link_loss = false; + spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); + } + mutex_unlock(&adev->dm.dc_lock); + +skip: + kfree(offload_work); + +} + +static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) +{ + int max_caps = dc->caps.max_links; + int i = 0; + struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; + + hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); + + if (!hpd_rx_offload_wq) + return NULL; + + + for (i = 0; i < max_caps; i++) { + hpd_rx_offload_wq[i].wq = + create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); + + if (hpd_rx_offload_wq[i].wq == NULL) { + DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); + return NULL; + } + + spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); + } + + return hpd_rx_offload_wq; +} + static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; @@ -1201,6 +1405,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) dc_hardware_init(adev->dm.dc); + adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); + if (!adev->dm.hpd_rx_offload_wq) { + DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); + goto error; + } + #if defined(CONFIG_DRM_AMD_DC_DCN) if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { struct dc_phy_addr_space_config pa_config; @@ -1253,7 +1463,25 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); goto error; } + + adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); + if (!adev->dm.delayed_hpd_wq) { + DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); + goto error; + } + amdgpu_dm_outbox_init(adev); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, + dmub_aux_setconfig_callback, false)) { + DRM_ERROR("amdgpu: fail to register dmub aux callback"); + goto error; + } + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + goto error; + } +#endif } if (amdgpu_dm_initialize_drm_device(adev)) { @@ -1335,6 +1563,8 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) if (dc_enable_dmub_notifications(adev->dm.dc)) { kfree(adev->dm.dmub_notify); adev->dm.dmub_notify = NULL; + destroy_workqueue(adev->dm.delayed_hpd_wq); + adev->dm.delayed_hpd_wq = NULL; } if (adev->dm.dmub_bo) @@ -1342,6 +1572,18 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) &adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_cpu_addr); + if (adev->dm.hpd_rx_offload_wq) { + for (i = 0; i < adev->dm.dc->caps.max_links; i++) { + if (adev->dm.hpd_rx_offload_wq[i].wq) { + destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); + adev->dm.hpd_rx_offload_wq[i].wq = NULL; + } + } + + kfree(adev->dm.hpd_rx_offload_wq); + adev->dm.hpd_rx_offload_wq = NULL; + } + /* DC Destroy TODO: Replace destroy DAL */ if (adev->dm.dc) dc_destroy(&adev->dm.dc); @@ -1978,6 +2220,16 @@ context_alloc_fail: return res; } +static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) +{ + int i; + + if (dm->hpd_rx_offload_wq) { + for (i = 0; i < dm->dc->caps.max_links; i++) + flush_workqueue(dm->hpd_rx_offload_wq[i].wq); + } +} + static int dm_suspend(void *handle) { struct amdgpu_device *adev = handle; @@ -1999,6 +2251,8 @@ static int dm_suspend(void *handle) amdgpu_dm_irq_suspend(adev); + hpd_rx_irq_work_suspend(dm); + return ret; } @@ -2009,6 +2263,8 @@ static int dm_suspend(void *handle) amdgpu_dm_irq_suspend(adev); + hpd_rx_irq_work_suspend(dm); + dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); return 0; @@ -2155,7 +2411,7 @@ cleanup: return; } -static void dm_set_dpms_off(struct dc_link *link) +static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state) { struct dc_stream_state *stream_state; struct amdgpu_dm_connector *aconnector = link->priv; @@ -2176,6 +2432,7 @@ static void dm_set_dpms_off(struct dc_link *link) } stream_update.stream = stream_state; + acrtc_state->force_dpms_off = true; dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0, stream_state, &stream_update, stream_state->ctx->dc->current_state); @@ -2613,20 +2870,22 @@ void amdgpu_dm_update_connector_after_detect( dc_sink_release(sink); } -static void handle_hpd_irq(void *param) +static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) { - struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; enum dc_connection_type new_connection_type = dc_connection_none; struct amdgpu_device *adev = drm_to_adev(dev); -#ifdef CONFIG_DRM_AMD_DC_HDCP struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); -#endif + struct dm_crtc_state *dm_crtc_state = NULL; if (adev->dm.disable_hpd_irq) return; + if (dm_con_state->base.state && dm_con_state->base.crtc) + dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state( + dm_con_state->base.state, + dm_con_state->base.crtc)); /* * In case of failure or MST no need to update connector status or notify the OS * since (for MST case) MST does this in its own context. @@ -2658,8 +2917,9 @@ static void handle_hpd_irq(void *param) } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { if (new_connection_type == dc_connection_none && - aconnector->dc_link->type == dc_connection_none) - dm_set_dpms_off(aconnector->dc_link); + aconnector->dc_link->type == dc_connection_none && + dm_crtc_state) + dm_set_dpms_off(aconnector->dc_link, dm_crtc_state); amdgpu_dm_update_connector_after_detect(aconnector); @@ -2674,7 +2934,15 @@ static void handle_hpd_irq(void *param) } -static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) +static void handle_hpd_irq(void *param) +{ + struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; + + handle_hpd_irq_helper(aconnector); + +} + +static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) { uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; uint8_t dret; @@ -2752,6 +3020,25 @@ static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); } +static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, + union hpd_irq_data hpd_irq_data) +{ + struct hpd_rx_irq_offload_work *offload_work = + kzalloc(sizeof(*offload_work), GFP_KERNEL); + + if (!offload_work) { + DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); + return; + } + + INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); + offload_work->data = hpd_irq_data; + offload_work->offload_wq = offload_wq; + + queue_work(offload_wq->wq, &offload_work->work); + DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); +} + static void handle_hpd_rx_irq(void *param) { struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; @@ -2763,14 +3050,16 @@ static void handle_hpd_rx_irq(void *param) enum dc_connection_type new_connection_type = dc_connection_none; struct amdgpu_device *adev = drm_to_adev(dev); union hpd_irq_data hpd_irq_data; - bool lock_flag = 0; + bool link_loss = false; + bool has_left_work = false; + int idx = aconnector->base.index; + struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); if (adev->dm.disable_hpd_irq) return; - /* * TODO:Temporary add mutex to protect hpd interrupt not have a gpio * conflict, after implement i2c helper, this mutex should be @@ -2778,43 +3067,41 @@ static void handle_hpd_rx_irq(void *param) */ mutex_lock(&aconnector->hpd_lock); - read_hpd_rx_irq_data(dc_link, &hpd_irq_data); + result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, + &link_loss, true, &has_left_work); - if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || - (dc_link->type == dc_connection_mst_branch)) { - if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) { - result = true; - dm_handle_hpd_rx_irq(aconnector); - goto out; - } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { - result = false; - dm_handle_hpd_rx_irq(aconnector); + if (!has_left_work) + goto out; + + if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { + schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + goto out; + } + + if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { + if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || + hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { + dm_handle_mst_sideband_msg(aconnector); goto out; } - } - /* - * TODO: We need the lock to avoid touching DC state while it's being - * modified during automated compliance testing, or when link loss - * happens. While this should be split into subhandlers and proper - * interfaces to avoid having to conditionally lock like this in the - * outer layer, we need this workaround temporarily to allow MST - * lightup in some scenarios to avoid timeout. - */ - if (!amdgpu_in_reset(adev) && - (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) || - hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) { - mutex_lock(&adev->dm.dc_lock); - lock_flag = 1; - } + if (link_loss) { + bool skip = false; -#ifdef CONFIG_DRM_AMD_DC_HDCP - result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL); -#else - result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL); -#endif - if (!amdgpu_in_reset(adev) && lock_flag) - mutex_unlock(&adev->dm.dc_lock); + spin_lock(&offload_wq->offload_lock); + skip = offload_wq->is_handling_link_loss; + + if (!skip) + offload_wq->is_handling_link_loss = true; + + spin_unlock(&offload_wq->offload_lock); + + if (!skip) + schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + + goto out; + } + } out: if (result && !is_mst_root_connector) { @@ -2899,6 +3186,10 @@ static void register_hpd_handlers(struct amdgpu_device *adev) amdgpu_dm_irq_register_interrupt(adev, &int_params, handle_hpd_rx_irq, (void *) aconnector); + + if (adev->dm.hpd_rx_offload_wq) + adev->dm.hpd_rx_offload_wq[connector->index].aconnector = + aconnector; } } } @@ -4670,6 +4961,16 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev, AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(PACKERS, pkrs) | AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | AMD_FMT_MOD_SET(DCC_RETILE, 1) | AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | @@ -4680,6 +4981,17 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev, AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | AMD_FMT_MOD_SET(PACKERS, pkrs)); add_modifier(mods, size, capacity, AMD_FMT_MOD | @@ -4761,10 +5073,27 @@ fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, if (modifier_has_dcc(modifier) && !force_disable_dcc) { uint64_t dcc_address = afb->address + afb->base.offsets[1]; + bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); + bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); dcc->enable = 1; dcc->meta_pitch = afb->base.pitches[1]; - dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); + dcc->independent_64b_blks = independent_64b_blks; + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { + if (independent_64b_blks && independent_128b_blks) + dcc->dcc_ind_blk = hubp_ind_block_64b; + else if (independent_128b_blks) + dcc->dcc_ind_blk = hubp_ind_block_128b; + else if (independent_64b_blks && !independent_128b_blks) + dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; + else + dcc->dcc_ind_blk = hubp_ind_block_unconstrained; + } else { + if (independent_64b_blks) + dcc->dcc_ind_blk = hubp_ind_block_64b; + else + dcc->dcc_ind_blk = hubp_ind_block_unconstrained; + } address->grph.meta_addr.low_part = lower_32_bits(dcc_address); address->grph.meta_addr.high_part = upper_32_bits(dcc_address); @@ -5600,9 +5929,15 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, { struct drm_connector *drm_connector = &aconnector->base; uint32_t link_bandwidth_kbps; + uint32_t max_dsc_target_bpp_limit_override = 0; link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, dc_link_get_link_cap(aconnector->dc_link)); + + if (stream->link && stream->link->local_sink) + max_dsc_target_bpp_limit_override = + stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit; + /* Set DSC policy according to dsc_clock_en */ dc_dsc_policy_set_enable_dsc_when_not_needed( aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); @@ -5612,7 +5947,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], dsc_caps, aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, - 0, + max_dsc_target_bpp_limit_override, link_bandwidth_kbps, &stream->timing, &stream->timing.dsc_cfg)) { @@ -5963,6 +6298,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) state->freesync_config = cur->freesync_config; state->cm_has_degamma = cur->cm_has_degamma; state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; + state->force_dpms_off = cur->force_dpms_off; /* TODO Duplicate dc_stream after objects are stream object is flattened */ return &state->base; @@ -8679,7 +9015,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * and rely on sending it from software. */ if (acrtc_attach->base.state->event && - acrtc_state->active_planes > 0) { + acrtc_state->active_planes > 0 && + !acrtc_state->force_dpms_off) { drm_crtc_vblank_get(pcrtc); spin_lock_irqsave(&pcrtc->dev->event_lock, flags); @@ -10819,6 +11156,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_con_state = NULL; + struct dc_sink *sink; struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); @@ -10830,28 +11168,31 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, goto update; } - if (!edid) { + sink = amdgpu_dm_connector->dc_sink ? + amdgpu_dm_connector->dc_sink : + amdgpu_dm_connector->dc_em_sink; + + if (!edid || !sink) { dm_con_state = to_dm_connector_state(connector->state); amdgpu_dm_connector->min_vfreq = 0; amdgpu_dm_connector->max_vfreq = 0; amdgpu_dm_connector->pixel_clock_mhz = 0; + connector->display_info.monitor_range.min_vfreq = 0; + connector->display_info.monitor_range.max_vfreq = 0; + freesync_capable = false; goto update; } dm_con_state = to_dm_connector_state(connector->state); - if (!amdgpu_dm_connector->dc_sink) { - DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); - goto update; - } if (!adev->dm.freesync_module) goto update; - if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT - || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) { + if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT + || sink->sink_signal == SIGNAL_TYPE_EDP) { bool edid_check_required = false; if (edid) { @@ -10898,7 +11239,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, freesync_capable = true; } } - } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { + } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); if (i >= 0 && vsdb_info.freesync_supported) { timing = &edid->detailed_timings[i]; |