summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
diff options
context:
space:
mode:
authorBen Ben-Ishay <benishay@nvidia.com>2021-06-09 12:28:57 +0300
committerSaeed Mahameed <saeedm@nvidia.com>2021-10-26 19:30:40 -0700
commite5ca8fb08ab2c4b2c9ea31a41a02ae2a0236ded4 (patch)
tree4613ade5ca0ed5a726cee3f9070c70117dc0b831 /drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
parentd7b896acbdcb3ef5dab1fd2f33ba5a8da6ba1dda (diff)
downloadlinux-e5ca8fb08ab2c4b2c9ea31a41a02ae2a0236ded4.tar.bz2
net/mlx5e: Add control path for SHAMPO feature
This commit introduces the control path infrastructure for SHAMPO feature. SHAMPO feature enables packet stitching by splitting packets to header and payload, the header is placed on a dedicated buffer and the payload on the RX ring, this allows stitching the data part of a flow together continuously in the receive buffer. SHAMPO feature is implemented as linked list striding RQ feature. To support packets splitting and payload stitching: - Enlarge the ICOSQ and the correspond CQ to support the header buffer memory regions. - Add support to create linked list striding RQ with SHAMPO feature set in the open_rq function. - Add deallocation function and corresponded calls for SHAMPO header buffer. - Add mlx5e_create_umr_klm_mkey to support KLM mkey for the header buffer. - Rename mlx5e_create_umr_mkey to mlx5e_create_umr_mtt_mkey. Signed-off-by: Ben Ben-Ishay <benishay@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en/tir.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
index a1afb8585e37..da169b816665 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -79,15 +79,21 @@ void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
if (builder->modify)
MLX5_SET(modify_tir_in, builder->in, bitmask.packet_merge, 1);
- if (pkt_merge_param->type == MLX5E_PACKET_MERGE_NONE)
- return;
-
- MLX5_SET(tirc, tirc, packet_merge_mask,
- MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO |
- MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO);
- MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
- (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout);
+ switch (pkt_merge_param->type) {
+ case MLX5E_PACKET_MERGE_LRO:
+ MLX5_SET(tirc, tirc, packet_merge_mask,
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO |
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+ (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout);
+ break;
+ case MLX5E_PACKET_MERGE_SHAMPO:
+ MLX5_SET(tirc, tirc, packet_merge_mask, MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO);
+ break;
+ default:
+ break;
+ }
}
static int mlx5e_hfunc_to_hw(u8 hfunc)