From 115d9f95ea7ab780ef315dc356bebba2e07cb731 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Wed, 13 Jul 2022 13:57:03 +0300 Subject: net/mlx5e: Remove WARN_ON when trying to offload an unsupported TLS cipher/version The driver reports whether TX/RX TLS device offloads are supported, but not which ciphers/versions, these should be handled by returning -EOPNOTSUPP when .tls_dev_add() is called. Remove the WARN_ON kernel trace when the driver gets a request to offload a cipher/version that is not supported as it is expected. Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support") Signed-off-by: Gal Pressman Reviewed-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/mellanox/mlx5') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index 814f2a56f633..30a70d139046 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -54,7 +54,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, struct mlx5_core_dev *mdev = priv->mdev; int err; - if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info))) + if (!mlx5e_ktls_type_check(mdev, crypto_info)) return -EOPNOTSUPP; if (direction == TLS_OFFLOAD_CTX_DIR_TX) -- cgit v1.2.3 From 903f2194f74bbd289f3170114035d472a36a8ab4 Mon Sep 17 00:00:00 2001 From: Maor Dickman Date: Mon, 18 Jul 2022 09:44:13 +0300 Subject: net/mlx5e: TC, Fix post_act to not match on in_port metadata The cited commit changed CT to use multi table actions post act infrastructure instead of using it own post act infrastructure, this broke decap during VF tunnel offload (Stack devices) with CT due to wrong match on in_port metadata in the post act table. This changed only broke VF tunnel offload because it modify the packet in_port metadata to be VF metadata and it isn't propagate the post act creation. Fixed by modify post act rules to match only on fte_id and not match on in_port metadata which isn't needed. Fixes: a81283263bb0 ("net/mlx5e: Use multi table support for CT and sample actions") Signed-off-by: Maor Dickman Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet/mellanox/mlx5') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c index dea137dd744b..2b64dd557b5d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c @@ -128,6 +128,7 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at post_attr->inner_match_level = MLX5_MATCH_NONE; post_attr->outer_match_level = MLX5_MATCH_NONE; post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP; + post_attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT; handle->ns_type = post_act->ns_type; /* Splits were handled before post action */ -- cgit v1.2.3 From 562696c3c62c7c23dd896e9447252ce9268cb812 Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Thu, 26 May 2022 16:48:47 +0300 Subject: net/mlx5e: Fix the value of MLX5E_MAX_RQ_NUM_MTTS MLX5E_MAX_RQ_NUM_MTTS should be the maximum value, so that MLX5_MTT_OCTW(MLX5E_MAX_RQ_NUM_MTTS) fits into u16. The current value of 1 << 17 results in MLX5_MTT_OCTW(1 << 17) = 1 << 16, which doesn't fit into u16. This commit replaces it with the maximum value that still fits u16. Fixes: 73281b78a37a ("net/mlx5e: Derive Striding RQ size from MTU") Signed-off-by: Maxim Mikityanskiy Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/mellanox/mlx5') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index b6c15efe92ad..f794ffaf1e04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -109,7 +109,7 @@ struct page_pool; #define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1)) #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS) #define MLX5E_MAX_RQ_NUM_MTTS \ - ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ + (ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */ #define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024)) #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \ (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS)) -- cgit v1.2.3 From 52586d2f56b3e4f528ca7268d65074e92c936681 Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Mon, 30 May 2022 21:06:03 +0300 Subject: net/mlx5e: xsk: Account for XSK RQ UMRs when calculating ICOSQ size ICOSQ is used to post UMR WQEs for both regular RQ and XSK RQ. However, space in ICOSQ is reserved only for the regular RQ, which may cause ICOSQ overflows when using XSK (the most risk is on activating channels). This commit fixes the issue by reserving space for XSK UMR WQEs as well. As XSK may be enabled without restarting the channel and recreating the ICOSQ, this space is reserved unconditionally. Fixes: db05815b36cb ("net/mlx5e: Add XSK zero-copy support") Signed-off-by: Maxim Mikityanskiy Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/params.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/net/ethernet/mellanox/mlx5') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 3c1edfa33aa7..e025040350ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -790,8 +790,20 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev, return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc)); + + /* If XDP program is attached, XSK may be turned on at any time without + * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of + * both regular RQ and XSK RQ. + * Although mlx5e_mpwqe_get_log_rq_size accepts mlx5e_xsk_param, it + * doesn't affect its return value, as long as params->xdp_prog != NULL, + * so we can just multiply by 2. + */ + if (params->xdp_prog) + wqebbs *= 2; + if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp); + return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs)); } -- cgit v1.2.3 From 677e78c8d44f326a73a77d71acf3a49ea562c1d9 Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Thu, 2 Jun 2022 15:14:08 +0300 Subject: net/mlx5e: Fix calculations related to max MPWQE size Before commit 76c31e5f7585 ("net/mlx5e: Use FW limitation for max MPW WQEBBs"), the maximum size of MPWQE in WQEBBs was hardcoded as a driver constant. That commit started using the firmware capability that can further limit the size, however, it unintentionally changed a few things: 1. The calculation of MLX5E_MAX_KLM_PER_WQE used the size in DS, which was replaced by the size in WQEBBs, making the resulting value 4 times smaller. 2. MLX5E_TX_MPW_MAX_WQEBBS used to be aligned to the cache line size (either 64 or 128 bytes, i.e. 1 or 2 WQEBBs), but it's no longer the case if the firmware capability is smaller than the driver maximum. Fix both issues by using the correct units for MLX5E_MAX_KLM_PER_WQE and by aligning mlx5e_get_sw_max_sq_mpw_wqebbs after taking the minimum. Besides fixing the arithmetics in calculation of MLX5E_MAX_KLM_PER_WQE, also use appropriate constants: `size of BSF * num of DS per WQEBB * number of WQEBBs` (the calculation before the blamed commit) doesn't make much sense to calculate the WQE size in bytes, so just use `size of WQEBB * number of WQEBBs`. While at it, replace the types that hold the number of WQEBBs by u8. These values don't exceed 16, and it allows to fill holes in two structs. Fixes: 76c31e5f7585 ("net/mlx5e: Use FW limitation for max MPW WQEBBs") Signed-off-by: Maxim Mikityanskiy Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index f794ffaf1e04..29b10ef787b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -174,8 +174,8 @@ struct page_pool; ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT) #define MLX5E_MAX_KLM_PER_WQE(mdev) \ - MLX5E_KLM_ENTRIES_PER_WQE(mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)) \ - << MLX5_MKEY_BSF_OCTO_SIZE) + MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * \ + mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev))) #define MLX5E_MSG_LEVEL NETIF_MSG_LINK @@ -233,7 +233,7 @@ static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev) MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB); } -static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs) +static inline u8 mlx5e_get_sw_max_sq_mpw_wqebbs(u8 max_sq_wqebbs) { /* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS. * Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16, @@ -242,11 +242,12 @@ static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs) * than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be * cache-aligned. */ -#if L1_CACHE_BYTES < 128 - return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1); -#else - return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 2); + u8 wqebbs = min_t(u8, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1); + +#if L1_CACHE_BYTES >= 128 + wqebbs = ALIGN_DOWN(wqebbs, 2); #endif + return wqebbs; } struct mlx5e_tx_wqe { @@ -455,7 +456,7 @@ struct mlx5e_txqsq { struct netdev_queue *txq; u32 sqn; u16 stop_room; - u16 max_sq_mpw_wqebbs; + u8 max_sq_mpw_wqebbs; u8 min_inline_mode; struct device *pdev; __be32 mkey_be; @@ -570,7 +571,7 @@ struct mlx5e_xdpsq { struct device *pdev; __be32 mkey_be; u16 stop_room; - u16 max_sq_mpw_wqebbs; + u8 max_sq_mpw_wqebbs; u8 min_inline_mode; unsigned long state; unsigned int hw_mtu; -- cgit v1.2.3 From c0063a43700fa8c98cac2637aa1afcf40bb9e403 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Thu, 7 Jul 2022 21:49:18 +0200 Subject: net/mlx5e: Modify slow path rules to go to slow fdb While extending available range of supported chains/prios referenced commit also modified slow path rules to go to FT chain instead of actual slow FDB. However neither of existing users of the MLX5_ATTR_FLAG_SLOW_PATH flag (tunnel encap entries with invalid encap and flows with trap action) need to match on FT chain. After bridge offload was implemented packets of such flows can also be matched by bridge priority tables which is undesirable. Restore slow path flows implementation to redirect packets to slow_fdb. Fixes: 278d51f24330 ("net/mlx5: E-Switch, Increase number of chains and priorities") Signed-off-by: Vlad Buslov Reviewed-by: Roi Dayan Reviewed-by: Paul Blakey Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 23 ++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 2ce3728576d1..eb79810199d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -230,10 +230,8 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest, } static void -esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, - struct mlx5_flow_act *flow_act, - struct mlx5_fs_chains *chains, - int i) +esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, + struct mlx5_fs_chains *chains, int i) { if (mlx5_chains_ignore_flow_level_supported(chains)) flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; @@ -241,6 +239,16 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, dest[i].ft = mlx5_chains_get_tc_end_ft(chains); } +static void +esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, int i) +{ + if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) + flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[i].ft = esw->fdb_table.offloads.slow_fdb; +} + static int esw_setup_chain_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, @@ -475,8 +483,11 @@ esw_setup_dests(struct mlx5_flow_destination *dest, } else if (attr->dest_ft) { esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); (*i)++; - } else if (mlx5e_tc_attr_flags_skip(attr->flags)) { - esw_setup_slow_path_dest(dest, flow_act, chains, *i); + } else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) { + esw_setup_slow_path_dest(dest, flow_act, esw, *i); + (*i)++; + } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) { + esw_setup_accept_dest(dest, flow_act, chains, *i); (*i)++; } else if (attr->dest_chain) { err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, -- cgit v1.2.3 From a6e9085d791f8306084fd5bc44dd3fdd4e1ac27b Mon Sep 17 00:00:00 2001 From: Maher Sanalla Date: Sun, 24 Jul 2022 11:28:21 +0300 Subject: net/mlx5: Adjust log_max_qp to be 18 at most The cited commit limited log_max_qp to be 17 due to FW capabilities. Recently, it turned out that there are old FW versions that supported more than 17, so the cited commit caused a degradation. Thus, set the maximum log_max_qp back to 18 as it was before the cited commit. Fixes: 7f839965b2d7 ("net/mlx5: Update log_max_qp value to be 17 at most") Signed-off-by: Maher Sanalla Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/mellanox/mlx5') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c9b4e50a593e..95f26624b57c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -524,7 +524,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) /* Check log_max_qp from HCA caps to set in current profile */ if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) { - prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp)); + prof->log_max_qp = min_t(u8, 18, MLX5_CAP_GEN_MAX(dev, log_max_qp)); } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) { mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n", prof->log_max_qp, -- cgit v1.2.3 From 62d2664351ef37da34f6f3a3fd8ab34257d6fe30 Mon Sep 17 00:00:00 2001 From: Yevgeny Kliteynik Date: Mon, 25 Jul 2022 00:06:12 +0300 Subject: net/mlx5: DR, Fix SMFS steering info dump format Fix several issues in SMFS steering info dump: - Fix outdated macro value for matcher mask in the SMFS debug dump format. The existing value denotes the old format of the matcher mask, as it was used during the early stages of development, and it results in wrong parsing by the steering dump parser - wrong fields are shown in the parsed output. - Add the missing destination table to the dumped action. The missing dest table handle breaks the ability to associate between the "go to table" action and the actual table in the steering info. Fixes: 9222f0b27da2 ("net/mlx5: DR, Add support for dumping steering info") Signed-off-by: Yevgeny Kliteynik Signed-off-by: Muhammad Sammar Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c index d5998ef59be4..7adcf0eec13b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c @@ -21,10 +21,11 @@ enum dr_dump_rec_type { DR_DUMP_REC_TYPE_TABLE_TX = 3102, DR_DUMP_REC_TYPE_MATCHER = 3200, - DR_DUMP_REC_TYPE_MATCHER_MASK = 3201, + DR_DUMP_REC_TYPE_MATCHER_MASK_DEPRECATED = 3201, DR_DUMP_REC_TYPE_MATCHER_RX = 3202, DR_DUMP_REC_TYPE_MATCHER_TX = 3203, DR_DUMP_REC_TYPE_MATCHER_BUILDER = 3204, + DR_DUMP_REC_TYPE_MATCHER_MASK = 3205, DR_DUMP_REC_TYPE_RULE = 3300, DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 = 3301, @@ -114,13 +115,15 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id, break; case DR_ACTION_TYP_FT: if (action->dest_tbl->is_fw_tbl) - seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n", DR_DUMP_REC_TYPE_ACTION_FT, action_id, - rule_id, action->dest_tbl->fw_tbl.id); + rule_id, action->dest_tbl->fw_tbl.id, + -1); else - seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", + seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx\n", DR_DUMP_REC_TYPE_ACTION_FT, action_id, - rule_id, action->dest_tbl->tbl->table_id); + rule_id, action->dest_tbl->tbl->table_id, + DR_DBG_PTR_TO_ID(action->dest_tbl->tbl)); break; case DR_ACTION_TYP_CTR: -- cgit v1.2.3 From 42b4f7f66a43cdb9216e76e595c8a9af154806da Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Mon, 27 Jun 2022 16:05:31 +0300 Subject: net/mlx5: Fix driver use of uninitialized timeout Currently, driver is setting default values to all timeouts during function setup. The offending commit is using a timeout before function setup, meaning: the timeout is 0 (or garbage), since no value have been set. This may result in failure to probe the driver: mlx5_function_setup:1034:(pid 69850): Firmware over 4294967296 MS in pre-initializing state, aborting probe_one:1591:(pid 69850): mlx5_init_one failed with error code -16 Hence, set default values to timeouts during tout_init() Fixes: 37ca95e62ee2 ("net/mlx5: Increase FW pre-init timeout for health recovery") Signed-off-by: Shay Drory Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c | 11 ++++------- drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h | 1 - drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 -- 3 files changed, 4 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet/mellanox/mlx5') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c index d758848d34d0..696e45e2bd06 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c @@ -32,20 +32,17 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type dev->timeouts->to[type] = val; } -void mlx5_tout_set_def_val(struct mlx5_core_dev *dev) +int mlx5_tout_init(struct mlx5_core_dev *dev) { int i; - for (i = 0; i < MAX_TIMEOUT_TYPES; i++) - tout_set(dev, tout_def_sw_val[i], i); -} - -int mlx5_tout_init(struct mlx5_core_dev *dev) -{ dev->timeouts = kmalloc(sizeof(*dev->timeouts), GFP_KERNEL); if (!dev->timeouts) return -ENOMEM; + for (i = 0; i < MAX_TIMEOUT_TYPES; i++) + tout_set(dev, tout_def_sw_val[i], i); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h index 257c03eeab36..bc9e9aeda847 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h @@ -35,7 +35,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev); void mlx5_tout_cleanup(struct mlx5_core_dev *dev); void mlx5_tout_query_iseg(struct mlx5_core_dev *dev); int mlx5_tout_query_dtor(struct mlx5_core_dev *dev); -void mlx5_tout_set_def_val(struct mlx5_core_dev *dev); u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type); #define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 95f26624b57c..ba2e5232b90b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1023,8 +1023,6 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, u64 timeout) if (mlx5_core_is_pf(dev)) pcie_print_link_status(dev->pdev); - mlx5_tout_set_def_val(dev); - /* wait for firmware to accept initialization segments configurations */ err = wait_fw_init(dev, timeout, -- cgit v1.2.3 From 8eaa1d110800fac050bab44001732747a1c39894 Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Fri, 29 Jul 2022 15:13:56 +0300 Subject: net/mlx5e: xsk: Discard unaligned XSK frames on striding RQ Striding RQ uses MTT page mapping, where each page corresponds to an XSK frame. MTT pages have alignment requirements, and XSK frames don't have any alignment guarantees in the unaligned mode. Frames with improper alignment must be discarded, otherwise the packet data will be written at a wrong address. Fixes: 282c0c798f8e ("net/mlx5e: Allow XSK frames smaller than a page") Signed-off-by: Maxim Mikityanskiy Reviewed-by: Tariq Toukan Reviewed-by: Saeed Mahameed Reviewed-by: Maciej Fijalkowski Link: https://lore.kernel.org/r/20220729121356.3990867-1-maximmi@nvidia.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h | 14 ++++++++++++++ include/net/xdp_sock_drv.h | 11 +++++++++++ 2 files changed, 25 insertions(+) (limited to 'drivers/net/ethernet/mellanox/mlx5') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h index a8cfab4a393c..cc18d97d8ee0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h @@ -7,6 +7,8 @@ #include "en.h" #include +#define MLX5E_MTT_PTAG_MASK 0xfffffffffffffff8ULL + /* RX data path */ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, @@ -21,6 +23,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) { +retry: dma_info->xsk = xsk_buff_alloc(rq->xsk_pool); if (!dma_info->xsk) return -ENOMEM; @@ -32,6 +35,17 @@ static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq, */ dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk); + /* MTT page mapping has alignment requirements. If they are not + * satisfied, leak the descriptor so that it won't come again, and try + * to allocate a new one. + */ + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + if (unlikely(dma_info->addr & ~MLX5E_MTT_PTAG_MASK)) { + xsk_buff_discard(dma_info->xsk); + goto retry; + } + } + return 0; } diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index 4aa031849668..0774ce97c2f1 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -95,6 +95,13 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) xp_free(xskb); } +static inline void xsk_buff_discard(struct xdp_buff *xdp) +{ + struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); + + xp_release(xskb); +} + static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) { xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM; @@ -238,6 +245,10 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) { } +static inline void xsk_buff_discard(struct xdp_buff *xdp) +{ +} + static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) { } -- cgit v1.2.3