summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/amdgpu_dm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2021-09-28 17:08:21 +1000
committerDave Airlie <airlied@redhat.com>2021-09-28 17:08:26 +1000
commit1e3944578b749449bd7fa6bf0bae4c3d3f5f1733 (patch)
treed9f2f1573b23988a1e105905ecc5777a80810206 /drivers/gpu/drm/amd/display/amdgpu_dm
parentf602a96e025272d237a61df455b12893aa782d33 (diff)
parent2485e2753ec896b169526e3ef7988589d1c458f5 (diff)
downloadlinux-1e3944578b749449bd7fa6bf0bae4c3d3f5f1733.tar.bz2
Merge tag 'amd-drm-next-5.16-2021-09-27' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-5.16-2021-09-27: amdgpu: - RAS improvements - BACO fixes - Yellow Carp updates - Misc code cleanups - Initial DP 2.0 support - VCN priority handling - Cyan Skillfish updates - Rework IB handling for multimedia engine tests - Backlight fixes - DCN 3.1 power saving improvements - Runtime PM fixes - Modifier support for DCC image stores for gfx 10.3 - Hotplug fixes - Clean up stack related warnings in display code - DP alt mode fixes - Display rework for better handling FP code - Debugfs fixes amdkfd: - SVM fixes - DMA map fixes radeon: - AGP fix From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210927212653.4575-1-alexander.deucher@amd.com Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c459
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h91
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c16
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c49
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c4
6 files changed, 555 insertions, 67 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 66c799f5c7cf..07adac1a8c42 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -215,6 +215,8 @@ static void handle_cursor_update(struct drm_plane *plane,
static const struct drm_format_info *
amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
+
static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state);
@@ -618,6 +620,116 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
}
#endif
+/**
+ * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * Dmub AUX or SET_CONFIG command completion processing callback
+ * Copies dmub notification to DM which is to be read by AUX command.
+ * issuing thread and also signals the event to wake up the thread.
+ */
+void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
+{
+ if (adev->dm.dmub_notify)
+ memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
+ if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
+ complete(&adev->dm.dmub_aux_transfer_done);
+}
+
+/**
+ * dmub_hpd_callback - DMUB HPD interrupt processing callback.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * Dmub Hpd interrupt processing callback. Gets displayindex through the
+ * ink index and calls helper to do the processing.
+ */
+void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct dc_link *link;
+ uint8_t link_index = 0;
+ struct drm_device *dev = adev->dm.ddev;
+
+ if (adev == NULL)
+ return;
+
+ if (notify == NULL) {
+ DRM_ERROR("DMUB HPD callback notification was NULL");
+ return;
+ }
+
+ if (notify->link_index > adev->dm.dc->link_count) {
+ DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
+ return;
+ }
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ link_index = notify->link_index;
+
+ link = adev->dm.dc->links[link_index];
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (link && aconnector->dc_link == link) {
+ DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
+ handle_hpd_irq_helper(aconnector);
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+}
+
+/**
+ * register_dmub_notify_callback - Sets callback for DMUB notify
+ * @adev: amdgpu_device pointer
+ * @type: Type of dmub notification
+ * @callback: Dmub interrupt callback function
+ * @dmub_int_thread_offload: offload indicator
+ *
+ * API to register a dmub callback handler for a dmub notification
+ * Also sets indicator whether callback processing to be offloaded.
+ * to dmub interrupt handling thread
+ * Return: true if successfully registered, false if there is existing registration
+ */
+bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
+dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
+{
+ if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
+ adev->dm.dmub_callback[type] = callback;
+ adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
+ } else
+ return false;
+
+ return true;
+}
+
+static void dm_handle_hpd_work(struct work_struct *work)
+{
+ struct dmub_hpd_work *dmub_hpd_wrk;
+
+ dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
+
+ if (!dmub_hpd_wrk->dmub_notify) {
+ DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
+ return;
+ }
+
+ if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
+ dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
+ dmub_hpd_wrk->dmub_notify);
+ }
+ kfree(dmub_hpd_wrk);
+
+}
+
#define DMUB_TRACE_MAX_READ 64
/**
* dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
@@ -634,18 +746,33 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct amdgpu_display_manager *dm = &adev->dm;
struct dmcub_trace_buf_entry entry = { 0 };
uint32_t count = 0;
+ struct dmub_hpd_work *dmub_hpd_wrk;
if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
+ if (!dmub_hpd_wrk) {
+ DRM_ERROR("Failed to allocate dmub_hpd_wrk");
+ return;
+ }
+ INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
+
if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
do {
dc_stat_get_dmub_notification(adev->dm.dc, &notify);
- } while (notify.pending_notification);
+ if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
+ DRM_ERROR("DM: notify type %d larger than the array size %zu!", notify.type,
+ ARRAY_SIZE(dm->dmub_thread_offload));
+ continue;
+ }
+ if (dm->dmub_thread_offload[notify.type] == true) {
+ dmub_hpd_wrk->dmub_notify = &notify;
+ dmub_hpd_wrk->adev = adev;
+ queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
+ } else {
+ dm->dmub_callback[notify.type](adev, &notify);
+ }
- if (adev->dm.dmub_notify)
- memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
- if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
- complete(&adev->dm.dmub_aux_transfer_done);
- // TODO : HPD Implementation
+ } while (notify.pending_notification);
} else {
DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
@@ -1083,6 +1210,83 @@ static void vblank_control_worker(struct work_struct *work)
}
#endif
+
+static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
+{
+ struct hpd_rx_irq_offload_work *offload_work;
+ struct amdgpu_dm_connector *aconnector;
+ struct dc_link *dc_link;
+ struct amdgpu_device *adev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ unsigned long flags;
+
+ offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
+ aconnector = offload_work->offload_wq->aconnector;
+
+ if (!aconnector) {
+ DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
+ goto skip;
+ }
+
+ adev = drm_to_adev(aconnector->base.dev);
+ dc_link = aconnector->dc_link;
+
+ mutex_lock(&aconnector->hpd_lock);
+ if (!dc_link_detect_sink(dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+ mutex_unlock(&aconnector->hpd_lock);
+
+ if (new_connection_type == dc_connection_none)
+ goto skip;
+
+ if (amdgpu_in_reset(adev))
+ goto skip;
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
+ dc_link_dp_handle_automated_test(dc_link);
+ else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+ hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
+ dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+ dc_link_dp_handle_link_loss(dc_link);
+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ offload_work->offload_wq->is_handling_link_loss = false;
+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+ }
+ mutex_unlock(&adev->dm.dc_lock);
+
+skip:
+ kfree(offload_work);
+
+}
+
+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
+{
+ int max_caps = dc->caps.max_links;
+ int i = 0;
+ struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
+
+ hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
+
+ if (!hpd_rx_offload_wq)
+ return NULL;
+
+
+ for (i = 0; i < max_caps; i++) {
+ hpd_rx_offload_wq[i].wq =
+ create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
+
+ if (hpd_rx_offload_wq[i].wq == NULL) {
+ DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
+ return NULL;
+ }
+
+ spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
+ }
+
+ return hpd_rx_offload_wq;
+}
+
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -1201,6 +1405,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_hardware_init(adev->dm.dc);
+ adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
+ if (!adev->dm.hpd_rx_offload_wq) {
+ DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
+ goto error;
+ }
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
struct dc_phy_addr_space_config pa_config;
@@ -1253,7 +1463,25 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
goto error;
}
+
+ adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
+ if (!adev->dm.delayed_hpd_wq) {
+ DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
+ goto error;
+ }
+
amdgpu_dm_outbox_init(adev);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
+ dmub_aux_setconfig_callback, false)) {
+ DRM_ERROR("amdgpu: fail to register dmub aux callback");
+ goto error;
+ }
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ goto error;
+ }
+#endif
}
if (amdgpu_dm_initialize_drm_device(adev)) {
@@ -1335,6 +1563,8 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
if (dc_enable_dmub_notifications(adev->dm.dc)) {
kfree(adev->dm.dmub_notify);
adev->dm.dmub_notify = NULL;
+ destroy_workqueue(adev->dm.delayed_hpd_wq);
+ adev->dm.delayed_hpd_wq = NULL;
}
if (adev->dm.dmub_bo)
@@ -1342,6 +1572,18 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
&adev->dm.dmub_bo_gpu_addr,
&adev->dm.dmub_bo_cpu_addr);
+ if (adev->dm.hpd_rx_offload_wq) {
+ for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+ if (adev->dm.hpd_rx_offload_wq[i].wq) {
+ destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
+ adev->dm.hpd_rx_offload_wq[i].wq = NULL;
+ }
+ }
+
+ kfree(adev->dm.hpd_rx_offload_wq);
+ adev->dm.hpd_rx_offload_wq = NULL;
+ }
+
/* DC Destroy TODO: Replace destroy DAL */
if (adev->dm.dc)
dc_destroy(&adev->dm.dc);
@@ -1978,6 +2220,16 @@ context_alloc_fail:
return res;
}
+static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
+{
+ int i;
+
+ if (dm->hpd_rx_offload_wq) {
+ for (i = 0; i < dm->dc->caps.max_links; i++)
+ flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
+ }
+}
+
static int dm_suspend(void *handle)
{
struct amdgpu_device *adev = handle;
@@ -1999,6 +2251,8 @@ static int dm_suspend(void *handle)
amdgpu_dm_irq_suspend(adev);
+ hpd_rx_irq_work_suspend(dm);
+
return ret;
}
@@ -2009,6 +2263,8 @@ static int dm_suspend(void *handle)
amdgpu_dm_irq_suspend(adev);
+ hpd_rx_irq_work_suspend(dm);
+
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
return 0;
@@ -2155,7 +2411,7 @@ cleanup:
return;
}
-static void dm_set_dpms_off(struct dc_link *link)
+static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
{
struct dc_stream_state *stream_state;
struct amdgpu_dm_connector *aconnector = link->priv;
@@ -2176,6 +2432,7 @@ static void dm_set_dpms_off(struct dc_link *link)
}
stream_update.stream = stream_state;
+ acrtc_state->force_dpms_off = true;
dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
stream_state, &stream_update,
stream_state->ctx->dc->current_state);
@@ -2613,20 +2870,22 @@ void amdgpu_dm_update_connector_after_detect(
dc_sink_release(sink);
}
-static void handle_hpd_irq(void *param)
+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
{
- struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
-#endif
+ struct dm_crtc_state *dm_crtc_state = NULL;
if (adev->dm.disable_hpd_irq)
return;
+ if (dm_con_state->base.state && dm_con_state->base.crtc)
+ dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
+ dm_con_state->base.state,
+ dm_con_state->base.crtc));
/*
* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in its own context.
@@ -2658,8 +2917,9 @@ static void handle_hpd_irq(void *param)
} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
if (new_connection_type == dc_connection_none &&
- aconnector->dc_link->type == dc_connection_none)
- dm_set_dpms_off(aconnector->dc_link);
+ aconnector->dc_link->type == dc_connection_none &&
+ dm_crtc_state)
+ dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
amdgpu_dm_update_connector_after_detect(aconnector);
@@ -2674,7 +2934,15 @@ static void handle_hpd_irq(void *param)
}
-static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
+static void handle_hpd_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+
+ handle_hpd_irq_helper(aconnector);
+
+}
+
+static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
{
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
uint8_t dret;
@@ -2752,6 +3020,25 @@ static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
}
+static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+ union hpd_irq_data hpd_irq_data)
+{
+ struct hpd_rx_irq_offload_work *offload_work =
+ kzalloc(sizeof(*offload_work), GFP_KERNEL);
+
+ if (!offload_work) {
+ DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
+ return;
+ }
+
+ INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
+ offload_work->data = hpd_irq_data;
+ offload_work->offload_wq = offload_wq;
+
+ queue_work(offload_wq->wq, &offload_work->work);
+ DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
+}
+
static void handle_hpd_rx_irq(void *param)
{
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
@@ -2763,14 +3050,16 @@ static void handle_hpd_rx_irq(void *param)
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
union hpd_irq_data hpd_irq_data;
- bool lock_flag = 0;
+ bool link_loss = false;
+ bool has_left_work = false;
+ int idx = aconnector->base.index;
+ struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
if (adev->dm.disable_hpd_irq)
return;
-
/*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
* conflict, after implement i2c helper, this mutex should be
@@ -2778,43 +3067,41 @@ static void handle_hpd_rx_irq(void *param)
*/
mutex_lock(&aconnector->hpd_lock);
- read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
+ result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
+ &link_loss, true, &has_left_work);
- if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
- (dc_link->type == dc_connection_mst_branch)) {
- if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
- result = true;
- dm_handle_hpd_rx_irq(aconnector);
- goto out;
- } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
- result = false;
- dm_handle_hpd_rx_irq(aconnector);
+ if (!has_left_work)
+ goto out;
+
+ if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ goto out;
+ }
+
+ if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+ if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ dm_handle_mst_sideband_msg(aconnector);
goto out;
}
- }
- /*
- * TODO: We need the lock to avoid touching DC state while it's being
- * modified during automated compliance testing, or when link loss
- * happens. While this should be split into subhandlers and proper
- * interfaces to avoid having to conditionally lock like this in the
- * outer layer, we need this workaround temporarily to allow MST
- * lightup in some scenarios to avoid timeout.
- */
- if (!amdgpu_in_reset(adev) &&
- (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
- hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
- mutex_lock(&adev->dm.dc_lock);
- lock_flag = 1;
- }
+ if (link_loss) {
+ bool skip = false;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
- result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
-#else
- result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
-#endif
- if (!amdgpu_in_reset(adev) && lock_flag)
- mutex_unlock(&adev->dm.dc_lock);
+ spin_lock(&offload_wq->offload_lock);
+ skip = offload_wq->is_handling_link_loss;
+
+ if (!skip)
+ offload_wq->is_handling_link_loss = true;
+
+ spin_unlock(&offload_wq->offload_lock);
+
+ if (!skip)
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
+ goto out;
+ }
+ }
out:
if (result && !is_mst_root_connector) {
@@ -2899,6 +3186,10 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
amdgpu_dm_irq_register_interrupt(adev, &int_params,
handle_hpd_rx_irq,
(void *) aconnector);
+
+ if (adev->dm.hpd_rx_offload_wq)
+ adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+ aconnector;
}
}
}
@@ -4670,6 +4961,16 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs) |
AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(PACKERS, pkrs) |
+ AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_RETILE, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
@@ -4680,6 +4981,17 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(PACKERS, pkrs) |
+ AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_RETILE, 1) |
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
@@ -4761,10 +5073,27 @@ fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
if (modifier_has_dcc(modifier) && !force_disable_dcc) {
uint64_t dcc_address = afb->address + afb->base.offsets[1];
+ bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
+ bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
dcc->enable = 1;
dcc->meta_pitch = afb->base.pitches[1];
- dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
+ dcc->independent_64b_blks = independent_64b_blks;
+ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
+ if (independent_64b_blks && independent_128b_blks)
+ dcc->dcc_ind_blk = hubp_ind_block_64b;
+ else if (independent_128b_blks)
+ dcc->dcc_ind_blk = hubp_ind_block_128b;
+ else if (independent_64b_blks && !independent_128b_blks)
+ dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
+ else
+ dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
+ } else {
+ if (independent_64b_blks)
+ dcc->dcc_ind_blk = hubp_ind_block_64b;
+ else
+ dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
+ }
address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
@@ -5600,9 +5929,15 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
{
struct drm_connector *drm_connector = &aconnector->base;
uint32_t link_bandwidth_kbps;
+ uint32_t max_dsc_target_bpp_limit_override = 0;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
+
+ if (stream->link && stream->link->local_sink)
+ max_dsc_target_bpp_limit_override =
+ stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
+
/* Set DSC policy according to dsc_clock_en */
dc_dsc_policy_set_enable_dsc_when_not_needed(
aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
@@ -5612,7 +5947,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,
aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
- 0,
+ max_dsc_target_bpp_limit_override,
link_bandwidth_kbps,
&stream->timing,
&stream->timing.dsc_cfg)) {
@@ -5963,6 +6298,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->freesync_config = cur->freesync_config;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
+ state->force_dpms_off = cur->force_dpms_off;
/* TODO Duplicate dc_stream after objects are stream object is flattened */
return &state->base;
@@ -8679,7 +9015,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* and rely on sending it from software.
*/
if (acrtc_attach->base.state->event &&
- acrtc_state->active_planes > 0) {
+ acrtc_state->active_planes > 0 &&
+ !acrtc_state->force_dpms_off) {
drm_crtc_vblank_get(pcrtc);
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
@@ -10819,6 +11156,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
struct dm_connector_state *dm_con_state = NULL;
+ struct dc_sink *sink;
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -10830,28 +11168,31 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
goto update;
}
- if (!edid) {
+ sink = amdgpu_dm_connector->dc_sink ?
+ amdgpu_dm_connector->dc_sink :
+ amdgpu_dm_connector->dc_em_sink;
+
+ if (!edid || !sink) {
dm_con_state = to_dm_connector_state(connector->state);
amdgpu_dm_connector->min_vfreq = 0;
amdgpu_dm_connector->max_vfreq = 0;
amdgpu_dm_connector->pixel_clock_mhz = 0;
+ connector->display_info.monitor_range.min_vfreq = 0;
+ connector->display_info.monitor_range.max_vfreq = 0;
+ freesync_capable = false;
goto update;
}
dm_con_state = to_dm_connector_state(connector->state);
- if (!amdgpu_dm_connector->dc_sink) {
- DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
- goto update;
- }
if (!adev->dm.freesync_module)
goto update;
- if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
- || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
+ || sink->sink_signal == SIGNAL_TYPE_EDP) {
bool edid_check_required = false;
if (edid) {
@@ -10898,7 +11239,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
freesync_capable = true;
}
}
- } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (i >= 0 && vsdb_info.freesync_supported) {
timing = &edid->detailed_timings[i];
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index d1d353a7c77d..a85b09986aab 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -47,6 +47,8 @@
#define AMDGPU_DM_MAX_CRTC 6
#define AMDGPU_DM_MAX_NUM_EDP 2
+
+#define AMDGPU_DMUB_NOTIFICATION_MAX 5
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
@@ -86,6 +88,21 @@ struct dm_compressor_info {
uint64_t gpu_addr;
};
+typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify);
+
+/**
+ * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ
+ *
+ * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq
+ * @dmub_notify: notification for callback function
+ * @adev: amdgpu_device pointer
+ */
+struct dmub_hpd_work {
+ struct work_struct handle_hpd_work;
+ struct dmub_notification *dmub_notify;
+ struct amdgpu_device *adev;
+};
+
/**
* struct vblank_control_work - Work data for vblank control
* @work: Kernel work data for the work event
@@ -155,6 +172,48 @@ struct dal_allocation {
};
/**
+ * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq
+ * offload work
+ */
+struct hpd_rx_irq_offload_work_queue {
+ /**
+ * @wq: workqueue structure to queue offload work.
+ */
+ struct workqueue_struct *wq;
+ /**
+ * @offload_lock: To protect fields of offload work queue.
+ */
+ spinlock_t offload_lock;
+ /**
+ * @is_handling_link_loss: Used to prevent inserting link loss event when
+ * we're handling link loss
+ */
+ bool is_handling_link_loss;
+ /**
+ * @aconnector: The aconnector that this work queue is attached to
+ */
+ struct amdgpu_dm_connector *aconnector;
+};
+
+/**
+ * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure
+ */
+struct hpd_rx_irq_offload_work {
+ /**
+ * @work: offload work
+ */
+ struct work_struct work;
+ /**
+ * @data: reference irq data which is used while handling offload work
+ */
+ union hpd_irq_data data;
+ /**
+ * @offload_wq: offload work queue that this work is queued to
+ */
+ struct hpd_rx_irq_offload_work_queue *offload_wq;
+};
+
+/**
* struct amdgpu_display_manager - Central amdgpu display manager device
*
* @dc: Display Core control structure
@@ -190,9 +249,31 @@ struct amdgpu_display_manager {
*/
struct dmub_srv *dmub_srv;
+ /**
+ * @dmub_notify:
+ *
+ * Notification from DMUB.
+ */
+
struct dmub_notification *dmub_notify;
/**
+ * @dmub_callback:
+ *
+ * Callback functions to handle notification from DMUB.
+ */
+
+ dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX];
+
+ /**
+ * @dmub_thread_offload:
+ *
+ * Flag to indicate if callback is offload.
+ */
+
+ bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX];
+
+ /**
* @dmub_fb_info:
*
* Framebuffer regions for the DMUB.
@@ -422,7 +503,12 @@ struct amdgpu_display_manager {
*/
struct crc_rd_work *crc_rd_wrk;
#endif
-
+ /**
+ * @hpd_rx_offload_wq:
+ *
+ * Work queue to offload works of hpd_rx_irq
+ */
+ struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq;
/**
* @mst_encoders:
*
@@ -439,6 +525,7 @@ struct amdgpu_display_manager {
*/
struct list_head da_list;
struct completion dmub_aux_transfer_done;
+ struct workqueue_struct *delayed_hpd_wq;
/**
* @brightness:
@@ -542,6 +629,8 @@ struct dm_crtc_state {
bool dsc_force_changed;
bool vrr_supported;
+
+ bool force_dpms_off;
struct mod_freesync_config freesync_config;
struct dc_info_packet vrr_infopacket;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 87daa78a32b8..f3ada9b6be5a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -247,6 +247,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
{
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
struct dc_link *link = connector->dc_link;
+ struct dc *dc = (struct dc *)link->dc;
struct dc_link_settings prefer_link_settings;
char *wr_buf = NULL;
const uint32_t wr_buf_size = 40;
@@ -313,7 +314,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
prefer_link_settings.lane_count = param[0];
prefer_link_settings.link_rate = param[1];
- dp_retrain_link_dp_test(link, &prefer_link_settings, false);
+ dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true);
kfree(wr_buf);
return size;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index c5f1dc3b5961..5bfdc66b5867 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -448,6 +448,8 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
struct drm_connector_state *conn_state;
+ struct dc_sink *sink = NULL;
+ bool link_is_hdcp14 = false;
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
@@ -460,8 +462,13 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
display->index = aconnector->base.index;
display->state = MOD_HDCP_DISPLAY_ACTIVE;
- if (aconnector->dc_sink != NULL)
- link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal);
+ if (aconnector->dc_sink)
+ sink = aconnector->dc_sink;
+ else if (aconnector->dc_em_sink)
+ sink = aconnector->dc_em_sink;
+
+ if (sink != NULL)
+ link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal);
display->controller = CONTROLLER_ID_D0 + config->otg_inst;
display->dig_fe = config->dig_fe;
@@ -470,8 +477,9 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
display->stream_enc_idx = config->stream_enc_idx;
link->link_enc_idx = config->link_enc_idx;
link->phy_idx = config->phy_idx;
- link->hdcp_supported_informational = dc_link_is_hdcp14(aconnector->dc_link,
- aconnector->dc_sink->sink_signal) ? 1 : 0;
+ if (sink)
+ link_is_hdcp14 = dc_link_is_hdcp14(aconnector->dc_link, sink->sink_signal);
+ link->hdcp_supported_informational = link_is_hdcp14;
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
link->dp.assr_enabled = config->assr_enabled;
link->dp.mst_enabled = config->mst_enabled;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 6fee12c91ef5..1aa69dd8e02f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -40,6 +40,39 @@
#include "dm_helpers.h"
+struct monitor_patch_info {
+ unsigned int manufacturer_id;
+ unsigned int product_id;
+ void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param);
+ unsigned int patch_param;
+};
+static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param);
+
+static const struct monitor_patch_info monitor_patch_table[] = {
+{0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15},
+{0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15},
+};
+
+static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param)
+{
+ if (edid_caps)
+ edid_caps->panel_patch.max_dsc_target_bpp_limit = param;
+}
+
+static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++)
+ if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id)
+ && (edid_caps->product_id == monitor_patch_table[i].product_id)) {
+ monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param);
+ ret++;
+ }
+
+ return ret;
+}
+
/* dm_helpers_parse_edid_caps
*
* Parse edid caps
@@ -125,6 +158,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
kfree(sads);
kfree(sadb);
+ amdgpu_dm_patch_edid_caps(edid_caps);
+
return result;
}
@@ -751,3 +786,17 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
&new_downspread.raw,
sizeof(new_downspread));
}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
+{
+ // FPGA programming for this clock in diags framework that
+ // needs to go through dm layer, therefore leave dummy interace here
+}
+
+
+void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
+{
+ /* TODO: add peridic detection implementation */
+}
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 7af0d58c231b..1a99fcc27078 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -542,7 +542,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
params[i].sink->ctx->dc->res_pool->dscs[0],
&params[i].sink->dsc_caps.dsc_dec_caps,
params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
- 0,
+ params[i].sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
0,
params[i].timing,
&params[i].timing->dsc_cfg)) {
@@ -574,7 +574,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
param.sink->ctx->dc->debug.dsc_min_slice_height_override,
- 0,
+ param.sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
(int) kbps, param.timing, &dsc_config);
return dsc_config.bits_per_pixel;