summaryrefslogtreecommitdiffstats
path: root/drivers/thunderbolt/tb_msgs.h
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2022-02-13 16:44:45 +0200
committerMika Westerberg <mika.westerberg@linux.intel.com>2022-05-05 09:25:23 +0300
commit8e1de7042596abb7cb277ea751fc13a4c2b65aea (patch)
tree62cc3e0ba5c2875011a2e95c69e1ab3372dfc40c /drivers/thunderbolt/tb_msgs.h
parent0a2e1667a73fe0c4374ddace925d85a4072d509c (diff)
downloadlinux-8e1de7042596abb7cb277ea751fc13a4c2b65aea.tar.bz2
thunderbolt: Add support for XDomain lane bonding
The USB4 Inter-Domain Service specification defines a protocol that can be used to establish lane bonding between two USB4 domains (hosts). So far we have not implemented it because the host controller DMA was not fast enough to be able to go over 20 Gbits/s even if lanes were bonded. However, starting from Intel Alder Lake CPUs the DMA can go over 20 Gbits/s so now it makes more sense to add this support to the driver. Because both ends need to negotiate the bonding we add a simple state machine that tracks the connection state and does the necessary steps described by the USB4 Inter-Domain Service specification. We only establish lane bonding when both sides of the link support it. Otherwise we default to use the single lane. Also this is only done when software connection manager is used. On systems with firmware based connection manager, it handles the high-speed tunneling so bonding lanes is specific to the implementation (Intel firmware based connection manager does not support lane bonding). Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Diffstat (limited to 'drivers/thunderbolt/tb_msgs.h')
-rw-r--r--drivers/thunderbolt/tb_msgs.h39
1 files changed, 39 insertions, 0 deletions
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index fe1afa44c56d..33c4c7aed56d 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -527,6 +527,10 @@ enum tb_xdp_type {
PROPERTIES_CHANGED_RESPONSE,
ERROR_RESPONSE,
UUID_REQUEST = 12,
+ LINK_STATE_STATUS_REQUEST = 15,
+ LINK_STATE_STATUS_RESPONSE,
+ LINK_STATE_CHANGE_REQUEST,
+ LINK_STATE_CHANGE_RESPONSE,
};
struct tb_xdp_header {
@@ -540,6 +544,41 @@ struct tb_xdp_error_response {
u32 error;
};
+struct tb_xdp_link_state_status {
+ struct tb_xdp_header hdr;
+};
+
+struct tb_xdp_link_state_status_response {
+ union {
+ struct tb_xdp_error_response err;
+ struct {
+ struct tb_xdp_header hdr;
+ u32 status;
+ u8 slw;
+ u8 tlw;
+ u8 sls;
+ u8 tls;
+ };
+ };
+};
+
+struct tb_xdp_link_state_change {
+ struct tb_xdp_header hdr;
+ u8 tlw;
+ u8 tls;
+ u16 reserved;
+};
+
+struct tb_xdp_link_state_change_response {
+ union {
+ struct tb_xdp_error_response err;
+ struct {
+ struct tb_xdp_header hdr;
+ u32 status;
+ };
+ };
+};
+
struct tb_xdp_uuid {
struct tb_xdp_header hdr;
};