From 3b675b15571c5342499dec6a0b77d730f1101b62 Mon Sep 17 00:00:00 2001 From: Gerd Hoffmann Date: Fri, 10 Jan 2020 10:45:35 +0100 Subject: drm/virtio: add missing virtio_gpu_array_lock_resv call When submitting a fenced command we must lock the object reservations because virtio_gpu_queue_fenced_ctrl_buffer() unlocks after adding the fence. Reported-by: Jann Horn Signed-off-by: Gerd Hoffmann Reviewed-by: Chia-I Wu Tested-by: Jann Horn Link: http://patchwork.freedesktop.org/patch/msgid/20200110094535.23472-1-kraxel@redhat.com --- drivers/gpu/drm/virtio/virtgpu_plane.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 390524143139..1635a9ff4794 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -232,6 +232,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, if (!objs) return; virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); + virtio_gpu_array_lock_resv(objs); virtio_gpu_cmd_transfer_to_host_2d (vgdev, 0, plane->state->crtc_w, -- cgit v1.2.3 From c3b040b5c58f9a2de8f672f9e7cb1a8d411b9f23 Mon Sep 17 00:00:00 2001 From: Tobias Schramm Date: Thu, 9 Jan 2020 08:31:29 +0100 Subject: drm/rockchip: fix integer type used for storing dp data rate commit 2589c4025f13 ("drm/rockchip: Avoid drm_dp_link helpers") changes the type of variables used to store the display port data rate and number of lanes to u8. However u8 is not sufficient to store the link data rate of the display port. This commit reverts the type of data rate to unsigned int. Fixes: 2589c4025f13 ("drm/rockchip: Avoid drm_dp_link helpers") Signed-off-by: Tobias Schramm Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20200109073129.378507-2-t.schramm@manjaro.org --- drivers/gpu/drm/rockchip/cdn-dp-core.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h index 83c4586665b4..81ac9b658a70 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.h +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h @@ -95,7 +95,7 @@ struct cdn_dp_device { struct cdn_dp_port *port[MAX_PHY]; u8 ports; u8 max_lanes; - u8 max_rate; + unsigned int max_rate; u8 lanes; int active_port; -- cgit v1.2.3 From 7617e9621bf2ca3bb58d5f216cc35c86b029105d Mon Sep 17 00:00:00 2001 From: Wayne Lin Date: Mon, 6 Jan 2020 18:21:58 +0800 Subject: drm/dp_mst: clear time slots for ports invalid [Why] When change the connection status in a MST topology, mst device which detect the event will send out CONNECTION_STATUS_NOTIFY messgae. e.g. src-mst-mst-sst => src-mst (unplug) mst-sst Currently, under the above case of unplugging device, ports which have been allocated payloads and are no longer in the topology still occupy time slots and recorded in proposed_vcpi[] of topology manager. If we don't clean up the proposed_vcpi[], when code flow goes to try to update payload table by calling drm_dp_update_payload_part1(), we will fail at checking port validation due to there are ports with proposed time slots but no longer in the mst topology. As the result of that, we will also stop updating the DPCD payload table of down stream port. [How] While handling the CONNECTION_STATUS_NOTIFY message, add a detection to see if the event indicates that a device is unplugged to an output port. If the detection is true, then iterrate over all proposed_vcpi[] to see whether a port of the proposed_vcpi[] is still in the topology or not. If the port is invalid, set its num_slots to 0. Thereafter, when try to update payload table by calling drm_dp_update_payload_part1(), we can successfully update the DPCD payload table of down stream port and clear the proposed_vcpi[] to NULL. Changes since v1:(https://patchwork.kernel.org/patch/11275801/) * Invert the conditional to reduce the indenting Reviewed-by: Lyude Paul Signed-off-by: Wayne Lin Signed-off-by: Lyude Paul [removed cc for stable - there's too many patches this depends on for this to backport cleanly] Link: https://patchwork.freedesktop.org/patch/msgid/20200106102158.28261-1-Wayne.Lin@amd.com --- drivers/gpu/drm/drm_dp_mst_topology.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 644c72f9c594..03874ee2a033 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -2318,7 +2318,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, { struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - int old_ddps, ret; + int old_ddps, old_input, ret, i; u8 new_pdt; bool dowork = false, create_connector = false; @@ -2349,6 +2349,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, } old_ddps = port->ddps; + old_input = port->input; port->input = conn_stat->input_port; port->mcs = conn_stat->message_capability_status; port->ldps = conn_stat->legacy_device_plug_status; @@ -2373,6 +2374,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, dowork = false; } + if (!old_input && old_ddps != port->ddps && !port->ddps) { + for (i = 0; i < mgr->max_payloads; i++) { + struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; + struct drm_dp_mst_port *port_validated; + + if (!vcpi) + continue; + + port_validated = + container_of(vcpi, struct drm_dp_mst_port, vcpi); + port_validated = + drm_dp_mst_topology_get_port_validated(mgr, port_validated); + if (!port_validated) { + mutex_lock(&mgr->payload_lock); + vcpi->num_slots = 0; + mutex_unlock(&mgr->payload_lock); + } else { + drm_dp_mst_topology_put_port(port_validated); + } + } + } + if (port->connector) drm_modeset_unlock(&mgr->base.lock); else if (create_connector) -- cgit v1.2.3 From 5a64967a2f3bbc01cc708ee43c7b0893089c61c4 Mon Sep 17 00:00:00 2001 From: Wayne Lin Date: Mon, 13 Jan 2020 17:36:49 +0800 Subject: drm/dp_mst: Have DP_Tx send one msg at a time [Why] Noticed this while testing MST with the 4 ports MST hub from StarTech.com. Sometimes can't light up monitors normally and get the error message as 'sideband msg build failed'. Look into aux transactions, found out that source sometimes will send out another down request before receiving the down reply of the previous down request. On the other hand, in drm_dp_get_one_sb_msg(), current code doesn't handle the interleaved replies case. Hence, source can't build up message completely and can't light up monitors. [How] For good compatibility, enforce source to send out one down request at a time. Add a flag, is_waiting_for_dwn_reply, to determine if the source can send out a down request immediately or not. - Check the flag before calling process_single_down_tx_qlock to send out a msg - Set the flag when successfully send out a down request - Clear the flag when successfully build up a down reply - Clear the flag when find erros during sending out a down request - Clear the flag when find errors during building up a down reply - Clear the flag when timeout occurs during waiting for a down reply - Use drm_dp_mst_kick_tx() to try to send another down request in queue at the end of drm_dp_mst_wait_tx_reply() (attempt to send out messages in queue when errors occur) Cc: Lyude Paul Signed-off-by: Wayne Lin Signed-off-by: Lyude Paul Link: https://patchwork.freedesktop.org/patch/msgid/20200113093649.11755-1-Wayne.Lin@amd.com --- drivers/gpu/drm/drm_dp_mst_topology.c | 14 ++++++++++++-- include/drm/drm_dp_mst_helper.h | 6 ++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 03874ee2a033..5a61a5596912 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1190,6 +1190,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { mstb->tx_slots[txmsg->seqno] = NULL; } + mgr->is_waiting_for_dwn_reply = false; + } out: if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { @@ -1199,6 +1201,7 @@ out: } mutex_unlock(&mgr->qlock); + drm_dp_mst_kick_tx(mgr); return ret; } @@ -2741,9 +2744,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) ret = process_single_tx_qlock(mgr, txmsg, false); if (ret == 1) { /* txmsg is sent it should be in the slots now */ + mgr->is_waiting_for_dwn_reply = true; list_del(&txmsg->next); } else if (ret) { DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); + mgr->is_waiting_for_dwn_reply = false; list_del(&txmsg->next); if (txmsg->seqno != -1) txmsg->dst->tx_slots[txmsg->seqno] = NULL; @@ -2783,7 +2788,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); } - if (list_is_singular(&mgr->tx_msg_downq)) + if (list_is_singular(&mgr->tx_msg_downq) && + !mgr->is_waiting_for_dwn_reply) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } @@ -3701,6 +3707,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) mutex_lock(&mgr->qlock); txmsg->state = DRM_DP_SIDEBAND_TX_RX; mstb->tx_slots[slot] = NULL; + mgr->is_waiting_for_dwn_reply = false; mutex_unlock(&mgr->qlock); wake_up_all(&mgr->tx_waitq); @@ -3710,6 +3717,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) no_msg: drm_dp_mst_topology_put_mstb(mstb); clear_down_rep_recv: + mutex_lock(&mgr->qlock); + mgr->is_waiting_for_dwn_reply = false; + mutex_unlock(&mgr->qlock); memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); return 0; @@ -4520,7 +4530,7 @@ static void drm_dp_tx_work(struct work_struct *work) struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); mutex_lock(&mgr->qlock); - if (!list_empty(&mgr->tx_msg_downq)) + if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index d5fc90b30487..c1bda7030e2d 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -605,6 +605,12 @@ struct drm_dp_mst_topology_mgr { * &drm_dp_sideband_msg_tx.state once they are queued */ struct mutex qlock; + + /** + * @is_waiting_for_dwn_reply: indicate whether is waiting for down reply + */ + bool is_waiting_for_dwn_reply; + /** * @tx_msg_downq: List of pending down replies. */ -- cgit v1.2.3