summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c402
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c371
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h137
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c1069
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c303
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c148
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c171
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c445
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c270
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c300
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h17
85 files changed, 4439 insertions, 1225 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index a22c32aabf11..cd4a1ab0ea78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -111,6 +111,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_ste_v2.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o \
+ steering/dr_definer.o \
steering/dr_dbg.o lib/smfs.o
#
# SF device
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e7a894ba5c3e..d3ca745d107d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -37,7 +37,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/random.h>
-#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eq.h>
#include <linux/debugfs.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 66c6a7017695..ddb197970c22 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -46,10 +46,6 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
u32 running_fw, stored_fw;
int err;
- err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
- if (err)
- return err;
-
err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
if (err)
return err;
@@ -318,6 +314,10 @@ static const struct devlink_ops mlx5_devlink_ops = {
.rate_node_new = mlx5_esw_devlink_rate_node_new,
.rate_node_del = mlx5_esw_devlink_rate_node_del,
.rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set,
+ .port_fn_roce_get = mlx5_devlink_port_fn_roce_get,
+ .port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
+ .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
+ .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
#endif
#ifdef CONFIG_MLX5_SF_MANAGER
.port_new = mlx5_devlink_sf_port_new,
@@ -840,7 +840,7 @@ static const struct devlink_trap_group mlx5_trap_groups_arr[] = {
DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
};
-static int mlx5_devlink_traps_register(struct devlink *devlink)
+int mlx5_devlink_traps_register(struct devlink *devlink)
{
struct mlx5_core_dev *core_dev = devlink_priv(devlink);
int err;
@@ -862,7 +862,7 @@ err_trap_group:
return err;
}
-static void mlx5_devlink_traps_unregister(struct devlink *devlink)
+void mlx5_devlink_traps_unregister(struct devlink *devlink)
{
devl_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
@@ -889,17 +889,11 @@ int mlx5_devlink_register(struct devlink *devlink)
if (err)
goto max_uc_list_err;
- err = mlx5_devlink_traps_register(devlink);
- if (err)
- goto traps_reg_err;
-
if (!mlx5_core_is_mp_slave(dev))
devlink_set_features(devlink, DEVLINK_F_RELOAD);
return 0;
-traps_reg_err:
- mlx5_devlink_max_uc_list_param_unregister(devlink);
max_uc_list_err:
mlx5_devlink_auxdev_params_unregister(devlink);
auxdev_reg_err:
@@ -910,7 +904,6 @@ auxdev_reg_err:
void mlx5_devlink_unregister(struct devlink *devlink)
{
- mlx5_devlink_traps_unregister(devlink);
mlx5_devlink_max_uc_list_param_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index 30bf4882779b..fd033df24856 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -30,6 +30,8 @@ void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_
int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev);
int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
enum devlink_trap_action *action);
+int mlx5_devlink_traps_register(struct devlink *devlink);
+void mlx5_devlink_traps_unregister(struct devlink *devlink);
struct devlink *mlx5_devlink_alloc(struct device *dev);
void mlx5_devlink_free(struct devlink *devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index c5bb79a4fa57..2732128e7a6e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -228,6 +228,17 @@ const char *parse_fs_hdrs(struct trace_seq *p,
return ret;
}
+static const char
+*fs_dest_range_field_to_str(enum mlx5_flow_dest_range_field field)
+{
+ switch (field) {
+ case MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN:
+ return "packet len";
+ default:
+ return "unknown dest range field";
+ }
+}
+
const char *parse_fs_dst(struct trace_seq *p,
const struct mlx5_flow_destination *dst,
u32 counter_id)
@@ -259,6 +270,11 @@ const char *parse_fs_dst(struct trace_seq *p,
case MLX5_FLOW_DESTINATION_TYPE_PORT:
trace_seq_printf(p, "port\n");
break;
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ trace_seq_printf(p, "field=%s min=%d max=%d\n",
+ fs_dest_range_field_to_str(dst->range.field),
+ dst->range.min, dst->range.max);
+ break;
case MLX5_FLOW_DESTINATION_TYPE_NONE:
trace_seq_printf(p, "none\n");
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 26a23047f1f3..2d77fb8a8a01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -103,11 +103,11 @@ struct page_pool;
* size actually used at runtime, but it's not a problem when calculating static
* array sizes.
*/
-#define MLX5_UMR_MAX_MTT_SPACE \
+#define MLX5_UMR_MAX_FLEX_SPACE \
(ALIGN_DOWN(MLX5_SEND_WQE_MAX_SIZE - sizeof(struct mlx5e_umr_wqe), \
- MLX5_UMR_MTT_ALIGNMENT))
+ MLX5_UMR_FLEX_ALIGNMENT))
#define MLX5_MPWRQ_MAX_PAGES_PER_WQE \
- rounddown_pow_of_two(MLX5_UMR_MAX_MTT_SPACE / sizeof(struct mlx5_mtt))
+ rounddown_pow_of_two(MLX5_UMR_MAX_FLEX_SPACE / sizeof(struct mlx5_mtt))
#define MLX5E_MAX_RQ_NUM_MTTS \
(ALIGN_DOWN(U16_MAX, 4) * 2) /* Fits into u16 and aligned by WQEBB. */
@@ -160,7 +160,7 @@ struct page_pool;
(((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm))
#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
- ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
+ ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT)
#define MLX5E_MAX_KLM_PER_WQE(mdev) \
MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
@@ -344,6 +344,7 @@ enum {
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */
MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */
+ MLX5E_RQ_STATE_MINI_CQE_ENHANCED, /* set when enhanced mini_cqe_cap is used */
};
struct mlx5e_cq {
@@ -370,6 +371,7 @@ struct mlx5e_cq_decomp {
u8 mini_arr_idx;
u16 left;
u16 wqe_counter;
+ bool last_cqe_title;
} ____cacheline_aligned_in_smp;
enum mlx5e_dma_map_type {
@@ -1243,4 +1245,5 @@ int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_t
int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
#endif
+int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index b69f9d10ccbd..83adaabf59f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -51,13 +51,6 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
return ret;
}
-void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
-{
- struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
-
- devlink_port_type_eth_set(dl_port, priv->netdev);
-}
-
void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
{
struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
@@ -69,13 +62,3 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
devl_unlock(devlink);
}
-
-struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (!netif_device_present(dev))
- return NULL;
-
- return mlx5e_devlink_get_dl_port(priv);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
index 10b50feb9883..4f238d4fff55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
@@ -9,8 +9,6 @@
int mlx5e_devlink_port_register(struct mlx5e_priv *priv);
void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv);
-void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv);
-struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev);
static inline struct devlink_port *
mlx5e_devlink_get_dl_port(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index bf2741eb7f9b..379c6dc9a3be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -84,7 +84,8 @@ enum {
MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
- MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
+ MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
+ MLX5E_ACCEL_FS_ESP_FT_LEVEL,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 29dd3a04c154..585bdc8383ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -107,7 +107,7 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
- MLX5_UMR_MTT_ALIGNMENT) / umr_entry_size;
+ MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
@@ -146,7 +146,7 @@ u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
u16 umr_wqe_sz;
umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) +
- ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
+ ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT);
WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK);
@@ -607,14 +607,6 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
params->log_rq_mtu_frames = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
-
- mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
- params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
- BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, NULL)) :
- BIT(params->log_rq_mtu_frames),
- BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
- MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
}
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
@@ -852,6 +844,10 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
+ MLX5_SET(cqc, cqc, cqe_compression_layout,
+ MLX5_CAP_GEN(mdev, enhanced_cqe_compression) ?
+ MLX5_CQE_COMPRESS_LAYOUT_ENHANCED :
+ MLX5_CQE_COMPRESS_LAYOUT_BASIC);
MLX5_SET(cqc, cqc, cqe_comp_en, 1);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 034debd140bc..c9be6eb88012 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -154,4 +154,18 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+static inline void mlx5e_params_print_info(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d %s)\n",
+ params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+ params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+ BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, NULL)) :
+ BIT(params->log_rq_mtu_frames),
+ BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
+ MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS),
+ MLX5_CAP_GEN(mdev, enhanced_cqe_compression) ?
+ "enhanced" : "basic");
+};
+
#endif /* __MLX5_EN_PARAMS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index fac7e3ff2674..b08339d986d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -690,7 +690,6 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
if (err) {
- WARN_ON_ONCE(true);
netdev_dbg(priv->netdev,
"Couldn't find tunnel for tun_id: %d, err: %d\n",
tun_id, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
index 21aab96357b5..a278f52d52b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c
@@ -28,4 +28,5 @@ tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_tc_act mlx5e_tc_act_accept = {
.can_offload = tc_act_can_offload_accept,
.parse_action = tc_act_parse_accept,
+ .is_terminating_action = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
index 3337241cfd84..eba0c8698926 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
@@ -11,7 +11,7 @@ static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
[FLOW_ACTION_DROP] = &mlx5e_tc_act_drop,
[FLOW_ACTION_TRAP] = &mlx5e_tc_act_trap,
[FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto,
- [FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred,
+ [FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_redirect,
[FLOW_ACTION_MIRRED] = &mlx5e_tc_act_mirred,
[FLOW_ACTION_REDIRECT_INGRESS] = &mlx5e_tc_act_redirect_ingress,
[FLOW_ACTION_VLAN_PUSH] = &mlx5e_tc_act_vlan,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index e1570ff056ae..8346557eeaf6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -32,6 +32,11 @@ struct mlx5e_tc_act_parse_state {
struct mlx5_tc_ct_priv *ct_priv;
};
+struct mlx5e_tc_act_branch_ctrl {
+ enum flow_action_id act_id;
+ u32 extval;
+};
+
struct mlx5e_tc_act {
bool (*can_offload)(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -60,6 +65,12 @@ struct mlx5e_tc_act {
int (*stats_action)(struct mlx5e_priv *priv,
struct flow_offload_action *fl_act);
+
+ bool (*get_branch_ctrl)(const struct flow_action_entry *act,
+ struct mlx5e_tc_act_branch_ctrl *cond_true,
+ struct mlx5e_tc_act_branch_ctrl *cond_false);
+
+ bool is_terminating_action;
};
struct mlx5e_tc_flow_action {
@@ -81,6 +92,7 @@ extern struct mlx5e_tc_act mlx5e_tc_act_vlan_mangle;
extern struct mlx5e_tc_act mlx5e_tc_act_mpls_push;
extern struct mlx5e_tc_act mlx5e_tc_act_mpls_pop;
extern struct mlx5e_tc_act mlx5e_tc_act_mirred;
+extern struct mlx5e_tc_act mlx5e_tc_act_redirect;
extern struct mlx5e_tc_act mlx5e_tc_act_mirred_nic;
extern struct mlx5e_tc_act mlx5e_tc_act_ct;
extern struct mlx5e_tc_act mlx5e_tc_act_sample;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
index dd025a95c439..7d16aeabb119 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c
@@ -27,4 +27,5 @@ tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_tc_act mlx5e_tc_act_drop = {
.can_offload = tc_act_can_offload_drop,
.parse_action = tc_act_parse_drop,
+ .is_terminating_action = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
index 25174f68613e..0923e6db2d0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c
@@ -121,4 +121,5 @@ struct mlx5e_tc_act mlx5e_tc_act_goto = {
.can_offload = tc_act_can_offload_goto,
.parse_action = tc_act_parse_goto,
.post_parse = tc_act_post_parse_goto,
+ .is_terminating_action = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index 4ac7de3f6afa..78c427b38048 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -334,4 +334,11 @@ tc_act_parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_tc_act mlx5e_tc_act_mirred = {
.can_offload = tc_act_can_offload_mirred,
.parse_action = tc_act_parse_mirred,
+ .is_terminating_action = false,
+};
+
+struct mlx5e_tc_act mlx5e_tc_act_redirect = {
+ .can_offload = tc_act_can_offload_mirred,
+ .parse_action = tc_act_parse_mirred,
+ .is_terminating_action = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
index 90b4c1b34776..7f409692b18f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c
@@ -48,4 +48,5 @@ tc_act_parse_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_tc_act mlx5e_tc_act_mirred_nic = {
.can_offload = tc_act_can_offload_mirred_nic,
.parse_action = tc_act_parse_mirred_nic,
+ .is_terminating_action = true,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
index c8e5ca65bb6e..512d43148922 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
@@ -3,6 +3,45 @@
#include "act.h"
#include "en/tc_priv.h"
+#include "fs_core.h"
+
+static bool police_act_validate_control(enum flow_action_id act_id,
+ struct netlink_ext_ack *extack)
+{
+ if (act_id != FLOW_ACTION_PIPE &&
+ act_id != FLOW_ACTION_ACCEPT &&
+ act_id != FLOW_ACTION_JUMP &&
+ act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform-exceed action is not pipe, ok, jump or drop");
+ return false;
+ }
+
+ return true;
+}
+
+static int police_act_validate(const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (!police_act_validate_control(act->police.exceed.act_id, extack) ||
+ !police_act_validate_control(act->police.notexceed.act_id, extack))
+ return -EOPNOTSUPP;
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
static bool
tc_act_can_offload_police(struct mlx5e_tc_act_parse_state *parse_state,
@@ -10,14 +49,10 @@ tc_act_can_offload_police(struct mlx5e_tc_act_parse_state *parse_state,
int act_index,
struct mlx5_flow_attr *attr)
{
- if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
- act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
- NL_SET_ERR_MSG_MOD(parse_state->extack,
- "Offload not supported when conform action is not pipe or ok");
- return false;
- }
- if (mlx5e_policer_validate(parse_state->flow_action, act,
- parse_state->extack))
+ int err;
+
+ err = police_act_validate(act, parse_state->extack);
+ if (err)
return false;
return !!mlx5e_get_flow_meters(parse_state->flow->priv->mdev);
@@ -37,6 +72,8 @@ fill_meter_params_from_act(const struct flow_action_entry *act,
params->mode = MLX5_RATE_LIMIT_PPS;
params->rate = act->police.rate_pkt_ps;
params->burst = act->police.burst_pkt;
+ } else if (act->police.mtu) {
+ params->mtu = act->police.mtu;
} else {
return -EOPNOTSUPP;
}
@@ -50,14 +87,25 @@ tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
+ enum mlx5_flow_namespace_type ns = mlx5e_get_flow_namespace(parse_state->flow);
+ struct mlx5e_flow_meter_params *params = &attr->meter_attr.params;
int err;
- err = fill_meter_params_from_act(act, &attr->meter_attr.params);
+ err = fill_meter_params_from_act(act, params);
if (err)
return err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO;
- attr->exe_aso_type = MLX5_EXE_ASO_FLOW_METER;
+ if (params->mtu) {
+ if (!(mlx5_fs_get_capabilities(priv->mdev, ns) &
+ MLX5_FLOW_STEERING_CAP_MATCH_RANGES))
+ return -EOPNOTSUPP;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->flags |= MLX5_ATTR_FLAG_MTU;
+ } else {
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO;
+ attr->exe_aso_type = MLX5_EXE_ASO_FLOW_METER;
+ }
return 0;
}
@@ -79,7 +127,7 @@ tc_act_police_offload(struct mlx5e_priv *priv,
struct mlx5e_flow_meter_handle *meter;
int err = 0;
- err = mlx5e_policer_validate(&fl_act->action, act, fl_act->extack);
+ err = police_act_validate(act, fl_act->extack);
if (err)
return err;
@@ -147,6 +195,19 @@ tc_act_police_stats(struct mlx5e_priv *priv,
return 0;
}
+static bool
+tc_act_police_get_branch_ctrl(const struct flow_action_entry *act,
+ struct mlx5e_tc_act_branch_ctrl *cond_true,
+ struct mlx5e_tc_act_branch_ctrl *cond_false)
+{
+ cond_true->act_id = act->police.notexceed.act_id;
+ cond_true->extval = act->police.notexceed.extval;
+
+ cond_false->act_id = act->police.exceed.act_id;
+ cond_false->extval = act->police.exceed.extval;
+ return true;
+}
+
struct mlx5e_tc_act mlx5e_tc_act_police = {
.can_offload = tc_act_can_offload_police,
.parse_action = tc_act_parse_police,
@@ -154,4 +215,5 @@ struct mlx5e_tc_act mlx5e_tc_act_police = {
.offload_action = tc_act_police_offload,
.destroy_action = tc_act_police_destroy,
.stats_action = tc_act_police_stats,
+ .get_branch_ctrl = tc_act_police_get_branch_ctrl,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
index 53b270f652b9..915ce201aeb2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c
@@ -3,6 +3,7 @@
#include "act.h"
#include "en/tc_priv.h"
+#include "eswitch.h"
static bool
tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
@@ -10,13 +11,6 @@ tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
int act_index,
struct mlx5_flow_attr *attr)
{
- struct netlink_ext_ack *extack = parse_state->extack;
-
- if (parse_state->flow_action->num_entries != 1) {
- NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only");
- return false;
- }
-
return true;
}
@@ -27,7 +21,7 @@ tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5_flow_attr *attr)
{
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
+ attr->dest_ft = mlx5_eswitch_get_slow_fdb(priv->mdev->priv.eswitch);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index be74e1403328..78af8a3175bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -162,7 +162,6 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER);
aso_ctrl = &aso_wqe->aso_ctrl;
- memset(aso_ctrl, 0, sizeof(*aso_ctrl));
aso_ctrl->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE << 6;
aso_ctrl->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
MLX5_ASO_ALWAYS_TRUE << 4;
@@ -241,7 +240,7 @@ mlx5e_flow_meter_destroy_aso_obj(struct mlx5_core_dev *mdev, u32 obj_id)
}
static struct mlx5e_flow_meter_handle *
-__mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
+__mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters, bool alloc_aso)
{
struct mlx5_core_dev *mdev = flow_meters->mdev;
struct mlx5e_flow_meter_aso_obj *meters_obj;
@@ -257,16 +256,19 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
counter = mlx5_fc_create(mdev, true);
if (IS_ERR(counter)) {
err = PTR_ERR(counter);
- goto err_red_counter;
+ goto err_drop_counter;
}
- meter->red_counter = counter;
+ meter->drop_counter = counter;
counter = mlx5_fc_create(mdev, true);
if (IS_ERR(counter)) {
err = PTR_ERR(counter);
- goto err_green_counter;
+ goto err_act_counter;
}
- meter->green_counter = counter;
+ meter->act_counter = counter;
+
+ if (!alloc_aso)
+ goto no_aso;
meters_obj = list_first_entry_or_null(&flow_meters->partial_list,
struct mlx5e_flow_meter_aso_obj,
@@ -300,11 +302,12 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
}
bitmap_set(meters_obj->meters_map, pos, 1);
- meter->flow_meters = flow_meters;
meter->meters_obj = meters_obj;
meter->obj_id = meters_obj->base_id + pos / 2;
meter->idx = pos % 2;
+no_aso:
+ meter->flow_meters = flow_meters;
mlx5_core_dbg(mdev, "flow meter allocated, obj_id=0x%x, index=%d\n",
meter->obj_id, meter->idx);
@@ -313,10 +316,10 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
err_mem:
mlx5e_flow_meter_destroy_aso_obj(mdev, id);
err_create:
- mlx5_fc_destroy(mdev, meter->green_counter);
-err_green_counter:
- mlx5_fc_destroy(mdev, meter->red_counter);
-err_red_counter:
+ mlx5_fc_destroy(mdev, meter->act_counter);
+err_act_counter:
+ mlx5_fc_destroy(mdev, meter->drop_counter);
+err_drop_counter:
kfree(meter);
return ERR_PTR(err);
}
@@ -329,8 +332,11 @@ __mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
struct mlx5e_flow_meter_aso_obj *meters_obj;
int n, pos;
- mlx5_fc_destroy(mdev, meter->green_counter);
- mlx5_fc_destroy(mdev, meter->red_counter);
+ mlx5_fc_destroy(mdev, meter->act_counter);
+ mlx5_fc_destroy(mdev, meter->drop_counter);
+
+ if (meter->params.mtu)
+ goto out_no_aso;
meters_obj = meter->meters_obj;
pos = (meter->obj_id - meters_obj->base_id) * 2 + meter->idx;
@@ -345,6 +351,7 @@ __mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
list_add(&meters_obj->entry, &flow_meters->partial_list);
}
+out_no_aso:
mlx5_core_dbg(mdev, "flow meter freed, obj_id=0x%x, index=%d\n",
meter->obj_id, meter->idx);
kfree(meter);
@@ -409,12 +416,13 @@ mlx5e_tc_meter_alloc(struct mlx5e_flow_meters *flow_meters,
{
struct mlx5e_flow_meter_handle *meter;
- meter = __mlx5e_flow_meter_alloc(flow_meters);
+ meter = __mlx5e_flow_meter_alloc(flow_meters, !params->mtu);
if (IS_ERR(meter))
return meter;
hash_add(flow_meters->hashtbl, &meter->hlist, params->index);
meter->params.index = params->index;
+ meter->params.mtu = params->mtu;
meter->refcnt++;
return meter;
@@ -575,8 +583,8 @@ mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
u64 bytes1, packets1, lastuse1;
u64 bytes2, packets2, lastuse2;
- mlx5_fc_query_cached(meter->green_counter, &bytes1, &packets1, &lastuse1);
- mlx5_fc_query_cached(meter->red_counter, &bytes2, &packets2, &lastuse2);
+ mlx5_fc_query_cached(meter->act_counter, &bytes1, &packets1, &lastuse1);
+ mlx5_fc_query_cached(meter->drop_counter, &bytes2, &packets2, &lastuse2);
*bytes = bytes1 + bytes2;
*packets = packets1 + packets2;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
index 6de6e8a16327..9b795cd106bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h
@@ -20,6 +20,7 @@ struct mlx5e_flow_meter_params {
u32 index;
u64 rate;
u64 burst;
+ u32 mtu;
};
struct mlx5e_flow_meter_handle {
@@ -32,8 +33,8 @@ struct mlx5e_flow_meter_handle {
struct hlist_node hlist;
struct mlx5e_flow_meter_params params;
- struct mlx5_fc *green_counter;
- struct mlx5_fc *red_counter;
+ struct mlx5_fc *act_counter;
+ struct mlx5_fc *drop_counter;
};
struct mlx5e_meter_attr {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
index 8b77e822810e..8d7d761482d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
@@ -8,23 +8,56 @@
#define MLX5_PACKET_COLOR_BITS MLX5_REG_MAPPING_MBITS(PACKET_COLOR_TO_REG)
#define MLX5_PACKET_COLOR_MASK MLX5_REG_MAPPING_MASK(PACKET_COLOR_TO_REG)
-struct mlx5e_post_meter_priv {
+struct mlx5e_post_meter_rate_table {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *green_rule;
+ struct mlx5_flow_attr *green_attr;
+ struct mlx5_flow_handle *red_rule;
+ struct mlx5_flow_attr *red_attr;
+};
+
+struct mlx5e_post_meter_mtu_table {
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
- struct mlx5_flow_handle *fwd_green_rule;
- struct mlx5_flow_handle *drop_red_rule;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
+};
+
+struct mlx5e_post_meter_mtu_tables {
+ struct mlx5e_post_meter_mtu_table green_table;
+ struct mlx5e_post_meter_mtu_table red_table;
+};
+
+struct mlx5e_post_meter_priv {
+ enum mlx5e_post_meter_type type;
+ union {
+ struct mlx5e_post_meter_rate_table rate_steering_table;
+ struct mlx5e_post_meter_mtu_tables mtu_tables;
+ };
};
struct mlx5_flow_table *
mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter)
{
- return post_meter->ft;
+ return post_meter->rate_steering_table.ft;
}
-static int
+struct mlx5_flow_table *
+mlx5e_post_meter_get_mtu_true_ft(struct mlx5e_post_meter_priv *post_meter)
+{
+ return post_meter->mtu_tables.green_table.ft;
+}
+
+struct mlx5_flow_table *
+mlx5e_post_meter_get_mtu_false_ft(struct mlx5e_post_meter_priv *post_meter)
+{
+ return post_meter->mtu_tables.red_table.ft;
+}
+
+static struct mlx5_flow_table *
mlx5e_post_meter_table_create(struct mlx5e_priv *priv,
- enum mlx5_flow_namespace_type ns_type,
- struct mlx5e_post_meter_priv *post_meter)
+ enum mlx5_flow_namespace_type ns_type)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
@@ -32,7 +65,7 @@ mlx5e_post_meter_table_create(struct mlx5e_priv *priv,
root_ns = mlx5_get_flow_namespace(priv->mdev, ns_type);
if (!root_ns) {
mlx5_core_warn(priv->mdev, "Failed to get namespace for flow meter\n");
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
}
ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
@@ -40,19 +73,14 @@ mlx5e_post_meter_table_create(struct mlx5e_priv *priv,
ft_attr.max_fte = 2;
ft_attr.level = 1;
- post_meter->ft = mlx5_create_flow_table(root_ns, &ft_attr);
- if (IS_ERR(post_meter->ft)) {
- mlx5_core_warn(priv->mdev, "Failed to create post_meter table\n");
- return PTR_ERR(post_meter->ft);
- }
-
- return 0;
+ return mlx5_create_flow_table(root_ns, &ft_attr);
}
static int
-mlx5e_post_meter_fg_create(struct mlx5e_priv *priv,
- struct mlx5e_post_meter_priv *post_meter)
+mlx5e_post_meter_rate_fg_create(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter)
{
+ struct mlx5e_post_meter_rate_table *table = &post_meter->rate_steering_table;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
void *misc2, *match_criteria;
u32 *flow_group_in;
@@ -71,25 +99,58 @@ mlx5e_post_meter_fg_create(struct mlx5e_priv *priv,
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
- post_meter->fg = mlx5_create_flow_group(post_meter->ft, flow_group_in);
- if (IS_ERR(post_meter->fg)) {
+ table->fg = mlx5_create_flow_group(table->ft, flow_group_in);
+ if (IS_ERR(table->fg)) {
mlx5_core_warn(priv->mdev, "Failed to create post_meter flow group\n");
- err = PTR_ERR(post_meter->fg);
+ err = PTR_ERR(table->fg);
}
kvfree(flow_group_in);
return err;
}
+static struct mlx5_flow_handle *
+mlx5e_post_meter_add_rule(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_fc *act_counter,
+ struct mlx5_fc *drop_counter)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_flow_handle *ret;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_DROP)
+ attr->counter = drop_counter;
+ else
+ attr->counter = act_counter;
+
+ attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
+ attr->outer_match_level = MLX5_MATCH_NONE;
+ attr->chain = 0;
+ attr->prio = 0;
+
+ ret = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+
+ /* We did not create the counter, so we can't delete it.
+ * Avoid freeing the counter when the attr is deleted in free_branching_attr
+ */
+ attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ return ret;
+}
+
static int
-mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
- struct mlx5e_post_meter_priv *post_meter,
- struct mlx5e_post_act *post_act,
- struct mlx5_fc *green_counter,
- struct mlx5_fc *red_counter)
-{
- struct mlx5_flow_destination dest[2] = {};
- struct mlx5_flow_act flow_act = {};
+mlx5e_post_meter_rate_rules_create(struct mlx5e_priv *priv,
+ struct mlx5e_post_meter_priv *post_meter,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *act_counter,
+ struct mlx5_fc *drop_counter,
+ struct mlx5_flow_attr *green_attr,
+ struct mlx5_flow_attr *red_attr)
+{
+ struct mlx5e_post_meter_rate_table *table = &post_meter->rate_steering_table;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err;
@@ -100,72 +161,242 @@ mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
MLX5_FLOW_METER_COLOR_RED, MLX5_PACKET_COLOR_MASK);
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[0].counter_id = mlx5_fc_id(red_counter);
-
- rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 1);
+ red_attr->ft = post_meter->rate_steering_table.ft;
+ rule = mlx5e_post_meter_add_rule(priv, post_meter, spec, red_attr,
+ act_counter, drop_counter);
if (IS_ERR(rule)) {
- mlx5_core_warn(priv->mdev, "Failed to create post_meter flow drop rule\n");
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter exceed rule\n");
err = PTR_ERR(rule);
goto err_red;
}
- post_meter->drop_red_rule = rule;
+ table->red_rule = rule;
+ table->red_attr = red_attr;
mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
MLX5_FLOW_METER_COLOR_GREEN, MLX5_PACKET_COLOR_MASK);
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[0].ft = mlx5e_tc_post_act_get_ft(post_act);
- dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[1].counter_id = mlx5_fc_id(green_counter);
-
- rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 2);
+ green_attr->ft = post_meter->rate_steering_table.ft;
+ rule = mlx5e_post_meter_add_rule(priv, post_meter, spec, green_attr,
+ act_counter, drop_counter);
if (IS_ERR(rule)) {
- mlx5_core_warn(priv->mdev, "Failed to create post_meter flow fwd rule\n");
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter notexceed rule\n");
err = PTR_ERR(rule);
goto err_green;
}
- post_meter->fwd_green_rule = rule;
+ table->green_rule = rule;
+ table->green_attr = green_attr;
kvfree(spec);
return 0;
err_green:
- mlx5_del_flow_rules(post_meter->drop_red_rule);
+ mlx5_del_flow_rules(table->red_rule);
err_red:
kvfree(spec);
return err;
}
static void
-mlx5e_post_meter_rules_destroy(struct mlx5e_post_meter_priv *post_meter)
+mlx5e_post_meter_rate_rules_destroy(struct mlx5_eswitch *esw,
+ struct mlx5e_post_meter_priv *post_meter)
{
- mlx5_del_flow_rules(post_meter->drop_red_rule);
- mlx5_del_flow_rules(post_meter->fwd_green_rule);
+ struct mlx5e_post_meter_rate_table *rate_table = &post_meter->rate_steering_table;
+
+ mlx5_eswitch_del_offloaded_rule(esw, rate_table->red_rule, rate_table->red_attr);
+ mlx5_eswitch_del_offloaded_rule(esw, rate_table->green_rule, rate_table->green_attr);
}
static void
-mlx5e_post_meter_fg_destroy(struct mlx5e_post_meter_priv *post_meter)
+mlx5e_post_meter_rate_fg_destroy(struct mlx5e_post_meter_priv *post_meter)
{
- mlx5_destroy_flow_group(post_meter->fg);
+ mlx5_destroy_flow_group(post_meter->rate_steering_table.fg);
}
static void
-mlx5e_post_meter_table_destroy(struct mlx5e_post_meter_priv *post_meter)
+mlx5e_post_meter_rate_table_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5_destroy_flow_table(post_meter->rate_steering_table.ft);
+}
+
+static void
+mlx5e_post_meter_mtu_rules_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ struct mlx5e_post_meter_mtu_tables *mtu_tables = &post_meter->mtu_tables;
+
+ mlx5_del_flow_rules(mtu_tables->green_table.rule);
+ mlx5_del_flow_rules(mtu_tables->red_table.rule);
+}
+
+static void
+mlx5e_post_meter_mtu_fg_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ struct mlx5e_post_meter_mtu_tables *mtu_tables = &post_meter->mtu_tables;
+
+ mlx5_destroy_flow_group(mtu_tables->green_table.fg);
+ mlx5_destroy_flow_group(mtu_tables->red_table.fg);
+}
+
+static void
+mlx5e_post_meter_mtu_table_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ struct mlx5e_post_meter_mtu_tables *mtu_tables = &post_meter->mtu_tables;
+
+ mlx5_destroy_flow_table(mtu_tables->green_table.ft);
+ mlx5_destroy_flow_table(mtu_tables->red_table.ft);
+}
+
+static int
+mlx5e_post_meter_rate_create(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *act_counter,
+ struct mlx5_fc *drop_counter,
+ struct mlx5e_post_meter_priv *post_meter,
+ struct mlx5_flow_attr *green_attr,
+ struct mlx5_flow_attr *red_attr)
+{
+ struct mlx5_flow_table *ft;
+ int err;
+
+ post_meter->type = MLX5E_POST_METER_RATE;
+
+ ft = mlx5e_post_meter_table_create(priv, ns_type);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter table\n");
+ goto err_ft;
+ }
+
+ post_meter->rate_steering_table.ft = ft;
+
+ err = mlx5e_post_meter_rate_fg_create(priv, post_meter);
+ if (err)
+ goto err_fg;
+
+ err = mlx5e_post_meter_rate_rules_create(priv, post_meter, post_act,
+ act_counter, drop_counter,
+ green_attr, red_attr);
+ if (err)
+ goto err_rules;
+
+ return 0;
+
+err_rules:
+ mlx5e_post_meter_rate_fg_destroy(post_meter);
+err_fg:
+ mlx5e_post_meter_rate_table_destroy(post_meter);
+err_ft:
+ return err;
+}
+
+static int
+mlx5e_post_meter_create_mtu_table(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_meter_mtu_table *table)
{
- mlx5_destroy_flow_table(post_meter->ft);
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *flow_group_in;
+ int err;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ table->ft = mlx5e_post_meter_table_create(priv, ns_type);
+ if (IS_ERR(table->ft)) {
+ err = PTR_ERR(table->ft);
+ goto err_ft;
+ }
+
+ /* create miss group */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+ fg = mlx5_create_flow_group(table->ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ goto err_miss_grp;
+ }
+ table->fg = fg;
+
+ kvfree(flow_group_in);
+ return 0;
+
+err_miss_grp:
+ mlx5_destroy_flow_table(table->ft);
+err_ft:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static int
+mlx5e_post_meter_mtu_create(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ struct mlx5e_post_act *post_act,
+ struct mlx5_fc *act_counter,
+ struct mlx5_fc *drop_counter,
+ struct mlx5e_post_meter_priv *post_meter,
+ struct mlx5_flow_attr *green_attr,
+ struct mlx5_flow_attr *red_attr)
+{
+ struct mlx5e_post_meter_mtu_tables *mtu_tables = &post_meter->mtu_tables;
+ static struct mlx5_flow_spec zero_spec = {};
+ struct mlx5_flow_handle *rule;
+ int err;
+
+ post_meter->type = MLX5E_POST_METER_MTU;
+
+ err = mlx5e_post_meter_create_mtu_table(priv, ns_type, &mtu_tables->green_table);
+ if (err)
+ goto err_green_ft;
+
+ green_attr->ft = mtu_tables->green_table.ft;
+ rule = mlx5e_post_meter_add_rule(priv, post_meter, &zero_spec, green_attr,
+ act_counter, drop_counter);
+ if (IS_ERR(rule)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter conform rule\n");
+ err = PTR_ERR(rule);
+ goto err_green_rule;
+ }
+ mtu_tables->green_table.rule = rule;
+ mtu_tables->green_table.attr = green_attr;
+
+ err = mlx5e_post_meter_create_mtu_table(priv, ns_type, &mtu_tables->red_table);
+ if (err)
+ goto err_red_ft;
+
+ red_attr->ft = mtu_tables->red_table.ft;
+ rule = mlx5e_post_meter_add_rule(priv, post_meter, &zero_spec, red_attr,
+ act_counter, drop_counter);
+ if (IS_ERR(rule)) {
+ mlx5_core_warn(priv->mdev, "Failed to create post_meter exceed rule\n");
+ err = PTR_ERR(rule);
+ goto err_red_rule;
+ }
+ mtu_tables->red_table.rule = rule;
+ mtu_tables->red_table.attr = red_attr;
+
+ return 0;
+
+err_red_rule:
+ mlx5_destroy_flow_table(mtu_tables->red_table.ft);
+err_red_ft:
+ mlx5_del_flow_rules(mtu_tables->green_table.rule);
+err_green_rule:
+ mlx5_destroy_flow_table(mtu_tables->green_table.ft);
+err_green_ft:
+ return err;
}
struct mlx5e_post_meter_priv *
mlx5e_post_meter_init(struct mlx5e_priv *priv,
enum mlx5_flow_namespace_type ns_type,
struct mlx5e_post_act *post_act,
- struct mlx5_fc *green_counter,
- struct mlx5_fc *red_counter)
+ enum mlx5e_post_meter_type type,
+ struct mlx5_fc *act_counter,
+ struct mlx5_fc *drop_counter,
+ struct mlx5_flow_attr *branch_true,
+ struct mlx5_flow_attr *branch_false)
{
struct mlx5e_post_meter_priv *post_meter;
int err;
@@ -174,36 +405,55 @@ mlx5e_post_meter_init(struct mlx5e_priv *priv,
if (!post_meter)
return ERR_PTR(-ENOMEM);
- err = mlx5e_post_meter_table_create(priv, ns_type, post_meter);
- if (err)
- goto err_ft;
-
- err = mlx5e_post_meter_fg_create(priv, post_meter);
- if (err)
- goto err_fg;
+ switch (type) {
+ case MLX5E_POST_METER_MTU:
+ err = mlx5e_post_meter_mtu_create(priv, ns_type, post_act,
+ act_counter, drop_counter, post_meter,
+ branch_true, branch_false);
+ break;
+ case MLX5E_POST_METER_RATE:
+ err = mlx5e_post_meter_rate_create(priv, ns_type, post_act,
+ act_counter, drop_counter, post_meter,
+ branch_true, branch_false);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
- err = mlx5e_post_meter_rules_create(priv, post_meter, post_act, green_counter,
- red_counter);
if (err)
- goto err_rules;
+ goto err;
return post_meter;
-err_rules:
- mlx5e_post_meter_fg_destroy(post_meter);
-err_fg:
- mlx5e_post_meter_table_destroy(post_meter);
-err_ft:
+err:
kfree(post_meter);
return ERR_PTR(err);
}
+static void
+mlx5e_post_meter_rate_destroy(struct mlx5_eswitch *esw, struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5e_post_meter_rate_rules_destroy(esw, post_meter);
+ mlx5e_post_meter_rate_fg_destroy(post_meter);
+ mlx5e_post_meter_rate_table_destroy(post_meter);
+}
+
+static void
+mlx5e_post_meter_mtu_destroy(struct mlx5e_post_meter_priv *post_meter)
+{
+ mlx5e_post_meter_mtu_rules_destroy(post_meter);
+ mlx5e_post_meter_mtu_fg_destroy(post_meter);
+ mlx5e_post_meter_mtu_table_destroy(post_meter);
+}
+
void
-mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter)
+mlx5e_post_meter_cleanup(struct mlx5_eswitch *esw, struct mlx5e_post_meter_priv *post_meter)
{
- mlx5e_post_meter_rules_destroy(post_meter);
- mlx5e_post_meter_fg_destroy(post_meter);
- mlx5e_post_meter_table_destroy(post_meter);
+ if (post_meter->type == MLX5E_POST_METER_RATE)
+ mlx5e_post_meter_rate_destroy(esw, post_meter);
+ else
+ mlx5e_post_meter_mtu_destroy(post_meter);
+
kfree(post_meter);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
index 34d0e4b9fc7a..e013b77186b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h
@@ -14,16 +14,49 @@
struct mlx5e_post_meter_priv;
+enum mlx5e_post_meter_type {
+ MLX5E_POST_METER_RATE = 0,
+ MLX5E_POST_METER_MTU
+};
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
struct mlx5_flow_table *
mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter);
+struct mlx5_flow_table *
+mlx5e_post_meter_get_mtu_true_ft(struct mlx5e_post_meter_priv *post_meter);
+
+struct mlx5_flow_table *
+mlx5e_post_meter_get_mtu_false_ft(struct mlx5e_post_meter_priv *post_meter);
+
struct mlx5e_post_meter_priv *
mlx5e_post_meter_init(struct mlx5e_priv *priv,
enum mlx5_flow_namespace_type ns_type,
struct mlx5e_post_act *post_act,
- struct mlx5_fc *green_counter,
- struct mlx5_fc *red_counter);
+ enum mlx5e_post_meter_type type,
+ struct mlx5_fc *act_counter,
+ struct mlx5_fc *drop_counter,
+ struct mlx5_flow_attr *branch_true,
+ struct mlx5_flow_attr *branch_false);
+
void
-mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter);
+mlx5e_post_meter_cleanup(struct mlx5_eswitch *esw, struct mlx5e_post_meter_priv *post_meter);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static inline struct mlx5_flow_table *
+mlx5e_post_meter_get_mtu_true_ft(struct mlx5e_post_meter_priv *post_meter)
+{
+ return NULL;
+}
+
+static inline struct mlx5_flow_table *
+mlx5e_post_meter_get_mtu_false_ft(struct mlx5e_post_meter_priv *post_meter)
+{
+ return NULL;
+}
+
+#endif
#endif /* __MLX5_EN_POST_METER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 864ce0c393e6..a69849e0deed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -1774,35 +1774,42 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
/* We translate the tc filter with CT action to the following HW model:
*
- * +---------------------+
- * + ft prio (tc chain) +
- * + original match +
- * +---------------------+
- * | set chain miss mapping
- * | set fte_id
- * | set tunnel_id
- * | do decap
- * v
- * +---------------------+
- * + pre_ct/pre_ct_nat + if matches +-------------------------+
- * + zone+nat match +---------------->+ post_act (see below) +
- * +---------------------+ set zone +-------------------------+
- * | set zone
- * v
- * +--------------------+
- * + CT (nat or no nat) +
- * + tuple + zone match +
- * +--------------------+
- * | set mark
- * | set labels_id
- * | set established
- * | set zone_restore
- * | do nat (if needed)
- * v
- * +--------------+
- * + post_act + original filter actions
- * + fte_id match +------------------------>
- * +--------------+
+ * +---------------------+
+ * + ft prio (tc chain) +
+ * + original match +
+ * +---------------------+
+ * | set chain miss mapping
+ * | set fte_id
+ * | set tunnel_id
+ * | do decap
+ * |
+ * +-------------+
+ * | Chain 0 |
+ * | optimization|
+ * | v
+ * | +---------------------+
+ * | + pre_ct/pre_ct_nat + if matches +----------------------+
+ * | + zone+nat match +---------------->+ post_act (see below) +
+ * | +---------------------+ set zone +----------------------+
+ * | |
+ * +-------------+ set zone
+ * |
+ * v
+ * +--------------------+
+ * + CT (nat or no nat) +
+ * + tuple + zone match +
+ * +--------------------+
+ * | set mark
+ * | set labels_id
+ * | set established
+ * | set zone_restore
+ * | do nat (if needed)
+ * v
+ * +--------------+
+ * + post_act + original filter actions
+ * + fte_id match +------------------------>
+ * +--------------+
+ *
*/
static struct mlx5_flow_handle *
__mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
@@ -1818,6 +1825,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_flow *ct_flow;
int chain_mapping = 0, err;
struct mlx5_ct_ft *ft;
+ u16 zone;
ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
if (!ct_flow) {
@@ -1884,6 +1892,25 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
}
+ /* Change original rule point to ct table
+ * Chain 0 sets the zone and jumps to ct table
+ * Other chains jump to pre_ct table to align with act_ct cached logic
+ */
+ pre_ct_attr->dest_chain = 0;
+ if (!attr->chain) {
+ zone = ft->zone & MLX5_CT_ZONE_MASK;
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
+ ZONE_TO_REG, zone);
+ if (err) {
+ ct_dbg("Failed to set zone register mapping");
+ goto err_mapping;
+ }
+
+ pre_ct_attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct;
+ } else {
+ pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
+ }
+
mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
pre_mod_acts->num_actions,
pre_mod_acts->actions);
@@ -1893,10 +1920,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
goto err_mapping;
}
pre_ct_attr->modify_hdr = mod_hdr;
-
- /* Change original rule point to ct table */
- pre_ct_attr->dest_chain = 0;
- pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
ct_flow->pre_ct_rule = mlx5_tc_rule_insert(priv, orig_spec,
pre_ct_attr);
if (IS_ERR(ct_flow->pre_ct_rule)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 2e42d7c5451e..2b7fd1c0e643 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -211,8 +211,4 @@ struct mlx5e_flow_meters *mlx5e_get_flow_meters(struct mlx5_core_dev *dev);
void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec);
void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec);
-int mlx5e_policer_validate(const struct flow_action *action,
- const struct flow_action_entry *act,
- struct netlink_ext_ack *extack);
-
#endif /* __MLX5_EN_TC_PRIV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index a715601865d3..bb9023957f74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -45,55 +45,9 @@ static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
}
-struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
- unsigned int handle)
+static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
{
- struct mlx5e_ipsec_sa_entry *sa_entry;
- struct xfrm_state *ret = NULL;
-
- rcu_read_lock();
- hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle)
- if (sa_entry->handle == handle) {
- ret = sa_entry->x;
- xfrm_state_hold(ret);
- break;
- }
- rcu_read_unlock();
-
- return ret;
-}
-
-static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
-{
- unsigned int handle = sa_entry->ipsec_obj_id;
- struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
- struct mlx5e_ipsec_sa_entry *_sa_entry;
- unsigned long flags;
-
- rcu_read_lock();
- hash_for_each_possible_rcu(ipsec->sadb_rx, _sa_entry, hlist, handle)
- if (_sa_entry->handle == handle) {
- rcu_read_unlock();
- return -EEXIST;
- }
- rcu_read_unlock();
-
- spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
- sa_entry->handle = handle;
- hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
- spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
-
- return 0;
-}
-
-static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
-{
- struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
- unsigned long flags;
-
- spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
- hash_del_rcu(&sa_entry->hlist);
- spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
+ return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
}
static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -129,9 +83,33 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
return false;
}
-static void
-mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
- struct mlx5_accel_esp_xfrm_attrs *attrs)
+static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ struct xfrm_state *x = sa_entry->x;
+
+ attrs->hard_packet_limit = x->lft.hard_packet_limit;
+ if (x->lft.soft_packet_limit == XFRM_INF)
+ return;
+
+ /* Hardware decrements hard_packet_limit counter through
+ * the operation. While fires an event when soft_packet_limit
+ * is reached. It emans that we need substitute the numbers
+ * in order to properly count soft limit.
+ *
+ * As an example:
+ * XFRM user sets soft limit is 2 and hard limit is 9 and
+ * expects to see soft event after 2 packets and hard event
+ * after 9 packets. In our case, the hard limit will be set
+ * to 9 and soft limit is comparator to 7 so user gets the
+ * soft event after 2 packeta
+ */
+ attrs->soft_packet_limit =
+ x->lft.hard_packet_limit - x->lft.soft_packet_limit;
+}
+
+void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct xfrm_state *x = sa_entry->x;
struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
@@ -157,33 +135,31 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
sizeof(aes_gcm->salt));
+ attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */
+
/* iv len */
aes_gcm->icv_len = x->aead->alg_icv_len;
/* esn */
if (sa_entry->esn_state.trigger) {
- attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
+ attrs->esn_trigger = true;
attrs->esn = sa_entry->esn_state.esn;
- if (sa_entry->esn_state.overlap)
- attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
+ attrs->esn_overlap = sa_entry->esn_state.overlap;
+ attrs->replay_window = x->replay_esn->replay_window;
}
- /* action */
- attrs->action = (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) ?
- MLX5_ACCEL_ESP_ACTION_ENCRYPT :
- MLX5_ACCEL_ESP_ACTION_DECRYPT;
- /* flags */
- attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
- MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
- MLX5_ACCEL_ESP_FLAGS_TUNNEL;
-
+ attrs->dir = x->xso.dir;
/* spi */
attrs->spi = be32_to_cpu(x->id.spi);
/* source , destination ips */
memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
- attrs->is_ipv6 = (x->props.family != AF_INET);
+ attrs->family = x->props.family;
+ attrs->type = x->xso.type;
+ attrs->reqid = x->props.reqid;
+
+ mlx5e_ipsec_init_limits(sa_entry, attrs);
}
static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
@@ -215,11 +191,6 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
return -EINVAL;
}
- if (x->props.mode != XFRM_MODE_TRANSPORT &&
- x->props.mode != XFRM_MODE_TUNNEL) {
- dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n");
- return -EINVAL;
- }
if (x->id.proto != IPPROTO_ESP) {
netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
return -EINVAL;
@@ -253,6 +224,67 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
return -EINVAL;
}
+ switch (x->xso.type) {
+ case XFRM_DEV_OFFLOAD_CRYPTO:
+ if (!(mlx5_ipsec_device_caps(priv->mdev) &
+ MLX5_IPSEC_CAP_CRYPTO)) {
+ netdev_info(netdev, "Crypto offload is not supported\n");
+ return -EINVAL;
+ }
+
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ netdev_info(netdev, "Only transport and tunnel xfrm states may be offloaded\n");
+ return -EINVAL;
+ }
+ break;
+ case XFRM_DEV_OFFLOAD_PACKET:
+ if (!(mlx5_ipsec_device_caps(priv->mdev) &
+ MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
+ netdev_info(netdev, "Packet offload is not supported\n");
+ return -EINVAL;
+ }
+
+ if (x->props.mode != XFRM_MODE_TRANSPORT) {
+ netdev_info(netdev, "Only transport xfrm states may be offloaded in packet mode\n");
+ return -EINVAL;
+ }
+
+ if (x->replay_esn && x->replay_esn->replay_window != 32 &&
+ x->replay_esn->replay_window != 64 &&
+ x->replay_esn->replay_window != 128 &&
+ x->replay_esn->replay_window != 256) {
+ netdev_info(netdev,
+ "Unsupported replay window size %u\n",
+ x->replay_esn->replay_window);
+ return -EINVAL;
+ }
+
+ if (!x->props.reqid) {
+ netdev_info(netdev, "Cannot offload without reqid\n");
+ return -EINVAL;
+ }
+
+ if (x->lft.hard_byte_limit != XFRM_INF ||
+ x->lft.soft_byte_limit != XFRM_INF) {
+ netdev_info(netdev,
+ "Device doesn't support limits in bytes\n");
+ return -EINVAL;
+ }
+
+ if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
+ x->lft.hard_packet_limit != XFRM_INF) {
+ /* XFRM stack doesn't prevent such configuration :(. */
+ netdev_info(netdev,
+ "Hard packet limit must be greater than soft one\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ netdev_info(netdev, "Unsupported xfrm offload type %d\n",
+ x->xso.type);
+ return -EINVAL;
+ }
return 0;
}
@@ -270,6 +302,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
struct net_device *netdev = x->xso.real_dev;
+ struct mlx5e_ipsec *ipsec;
struct mlx5e_priv *priv;
int err;
@@ -277,6 +310,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
if (!priv->ipsec)
return -EOPNOTSUPP;
+ ipsec = priv->ipsec;
err = mlx5e_xfrm_validate_state(x);
if (err)
return err;
@@ -288,7 +322,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
}
sa_entry->x = x;
- sa_entry->ipsec = priv->ipsec;
+ sa_entry->ipsec = ipsec;
/* check esn */
mlx5e_ipsec_update_esn_state(sa_entry);
@@ -299,25 +333,29 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
if (err)
goto err_xfrm;
- err = mlx5e_accel_ipsec_fs_add_rule(priv, sa_entry);
+ err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
if (err)
goto err_hw_ctx;
- if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) {
- err = mlx5e_ipsec_sadb_rx_add(sa_entry);
- if (err)
- goto err_add_rule;
- } else {
+ /* We use *_bh() variant because xfrm_timer_handler(), which runs
+ * in softirq context, can reach our state delete logic and we need
+ * xa_erase_bh() there.
+ */
+ err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry,
+ GFP_KERNEL);
+ if (err)
+ goto err_add_rule;
+
+ if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT)
sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
- }
INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state);
x->xso.offload_handle = (unsigned long)sa_entry;
- goto out;
+ return 0;
err_add_rule:
- mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
+ mlx5e_accel_ipsec_fs_del_rule(sa_entry);
err_hw_ctx:
mlx5_ipsec_free_sa_ctx(sa_entry);
err_xfrm:
@@ -329,59 +367,69 @@ out:
static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5e_ipsec_sa_entry *old;
- if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
- mlx5e_ipsec_sadb_rx_del(sa_entry);
+ old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
+ WARN_ON(old != sa_entry);
}
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
- struct mlx5e_priv *priv = netdev_priv(x->xso.dev);
cancel_work_sync(&sa_entry->modify_work.work);
- mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
+ mlx5e_accel_ipsec_fs_del_rule(sa_entry);
mlx5_ipsec_free_sa_ctx(sa_entry);
kfree(sa_entry);
}
-int mlx5e_ipsec_init(struct mlx5e_priv *priv)
+void mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec *ipsec;
- int ret;
+ int ret = -ENOMEM;
if (!mlx5_ipsec_device_caps(priv->mdev)) {
netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
- return 0;
+ return;
}
ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
if (!ipsec)
- return -ENOMEM;
+ return;
- hash_init(ipsec->sadb_rx);
- spin_lock_init(&ipsec->sadb_rx_lock);
+ xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
ipsec->mdev = priv->mdev;
ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
priv->netdev->name);
- if (!ipsec->wq) {
- ret = -ENOMEM;
+ if (!ipsec->wq)
goto err_wq;
+
+ if (mlx5_ipsec_device_caps(priv->mdev) &
+ MLX5_IPSEC_CAP_PACKET_OFFLOAD) {
+ ret = mlx5e_ipsec_aso_init(ipsec);
+ if (ret)
+ goto err_aso;
}
ret = mlx5e_accel_ipsec_fs_init(ipsec);
if (ret)
goto err_fs_init;
+ ipsec->fs = priv->fs;
priv->ipsec = ipsec;
netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
- return 0;
+ return;
err_fs_init:
+ if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+ mlx5e_ipsec_aso_cleanup(ipsec);
+err_aso:
destroy_workqueue(ipsec->wq);
err_wq:
kfree(ipsec);
- return (ret != -EOPNOTSUPP) ? ret : 0;
+ mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret);
+ return;
}
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
@@ -392,6 +440,8 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
return;
mlx5e_accel_ipsec_fs_cleanup(ipsec);
+ if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+ mlx5e_ipsec_aso_cleanup(ipsec);
destroy_workqueue(ipsec->wq);
kfree(ipsec);
priv->ipsec = NULL;
@@ -427,6 +477,122 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
queue_work(sa_entry->ipsec->wq, &modify_work->work);
}
+static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
+{
+ struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+ int err;
+
+ lockdep_assert_held(&x->lock);
+
+ if (sa_entry->attrs.soft_packet_limit == XFRM_INF)
+ /* Limits are not configured, as soft limit
+ * must be lowever than hard limit.
+ */
+ return;
+
+ err = mlx5e_ipsec_aso_query(sa_entry, NULL);
+ if (err)
+ return;
+
+ mlx5e_ipsec_aso_update_curlft(sa_entry, &x->curlft.packets);
+}
+
+static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x)
+{
+ struct net_device *netdev = x->xdo.real_dev;
+
+ if (x->type != XFRM_POLICY_TYPE_MAIN) {
+ netdev_info(netdev, "Cannot offload non-main policy types\n");
+ return -EINVAL;
+ }
+
+ /* Please pay attention that we support only one template */
+ if (x->xfrm_nr > 1) {
+ netdev_info(netdev, "Cannot offload more than one template\n");
+ return -EINVAL;
+ }
+
+ if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN &&
+ x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) {
+ netdev_info(netdev, "Cannot offload forward policy\n");
+ return -EINVAL;
+ }
+
+ if (!x->xfrm_vec[0].reqid) {
+ netdev_info(netdev, "Cannot offload policy without reqid\n");
+ return -EINVAL;
+ }
+
+ if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) {
+ netdev_info(netdev, "Unsupported xfrm offload type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
+ struct mlx5_accel_pol_xfrm_attrs *attrs)
+{
+ struct xfrm_policy *x = pol_entry->x;
+ struct xfrm_selector *sel;
+
+ sel = &x->selector;
+ memset(attrs, 0, sizeof(*attrs));
+
+ memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
+ memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
+ attrs->family = sel->family;
+ attrs->dir = x->xdo.dir;
+ attrs->action = x->action;
+ attrs->type = XFRM_DEV_OFFLOAD_PACKET;
+ attrs->reqid = x->xfrm_vec[0].reqid;
+}
+
+static int mlx5e_xfrm_add_policy(struct xfrm_policy *x)
+{
+ struct net_device *netdev = x->xdo.real_dev;
+ struct mlx5e_ipsec_pol_entry *pol_entry;
+ struct mlx5e_priv *priv;
+ int err;
+
+ priv = netdev_priv(netdev);
+ if (!priv->ipsec)
+ return -EOPNOTSUPP;
+
+ err = mlx5e_xfrm_validate_policy(x);
+ if (err)
+ return err;
+
+ pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL);
+ if (!pol_entry)
+ return -ENOMEM;
+
+ pol_entry->x = x;
+ pol_entry->ipsec = priv->ipsec;
+
+ mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
+ err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
+ if (err)
+ goto err_fs;
+
+ x->xdo.offload_handle = (unsigned long)pol_entry;
+ return 0;
+
+err_fs:
+ kfree(pol_entry);
+ return err;
+}
+
+static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
+{
+ struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
+
+ mlx5e_accel_ipsec_fs_del_pol(pol_entry);
+ kfree(pol_entry);
+}
+
static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = mlx5e_xfrm_add_state,
.xdo_dev_state_delete = mlx5e_xfrm_del_state,
@@ -435,6 +601,18 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
};
+static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
+ .xdo_dev_state_add = mlx5e_xfrm_add_state,
+ .xdo_dev_state_delete = mlx5e_xfrm_del_state,
+ .xdo_dev_state_free = mlx5e_xfrm_free_state,
+ .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
+
+ .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
+ .xdo_dev_policy_add = mlx5e_xfrm_add_policy,
+ .xdo_dev_policy_free = mlx5e_xfrm_free_policy,
+};
+
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -444,7 +622,12 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
return;
mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
- netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
+
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+ netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops;
+ else
+ netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
+
netdev->features |= NETIF_F_HW_ESP;
netdev->hw_enc_features |= NETIF_F_HW_ESP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 16bcceec16c4..a92e19c4c499 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -34,27 +34,14 @@
#ifndef __MLX5E_IPSEC_H__
#define __MLX5E_IPSEC_H__
-#ifdef CONFIG_MLX5_EN_IPSEC
-
#include <linux/mlx5/device.h>
#include <net/xfrm.h>
#include <linux/idr.h>
+#include "lib/aso.h"
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
-enum mlx5_accel_esp_flags {
- MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
- MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
- MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
- MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
-};
-
-enum mlx5_accel_esp_action {
- MLX5_ACCEL_ESP_ACTION_DECRYPT,
- MLX5_ACCEL_ESP_ACTION_ENCRYPT,
-};
-
struct aes_gcm_keymat {
u64 seq_iv;
@@ -66,7 +53,6 @@ struct aes_gcm_keymat {
};
struct mlx5_accel_esp_xfrm_attrs {
- enum mlx5_accel_esp_action action;
u32 esn;
u32 spi;
u32 flags;
@@ -82,16 +68,37 @@ struct mlx5_accel_esp_xfrm_attrs {
__be32 a6[4];
} daddr;
- u8 is_ipv6;
+ u8 dir : 2;
+ u8 esn_overlap : 1;
+ u8 esn_trigger : 1;
+ u8 type : 2;
+ u8 family;
+ u32 replay_window;
+ u32 authsize;
+ u32 reqid;
+ u64 hard_packet_limit;
+ u64 soft_packet_limit;
};
enum mlx5_ipsec_cap {
MLX5_IPSEC_CAP_CRYPTO = 1 << 0,
MLX5_IPSEC_CAP_ESN = 1 << 1,
+ MLX5_IPSEC_CAP_PACKET_OFFLOAD = 1 << 2,
};
struct mlx5e_priv;
+struct mlx5e_ipsec_hw_stats {
+ u64 ipsec_rx_pkts;
+ u64 ipsec_rx_bytes;
+ u64 ipsec_rx_drop_pkts;
+ u64 ipsec_rx_drop_bytes;
+ u64 ipsec_tx_pkts;
+ u64 ipsec_tx_bytes;
+ u64 ipsec_tx_drop_pkts;
+ u64 ipsec_tx_drop_bytes;
+};
+
struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_rx_drop_sp_alloc;
atomic64_t ipsec_rx_drop_sadb_miss;
@@ -102,17 +109,38 @@ struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_tx_drop_trailer;
};
-struct mlx5e_accel_fs_esp;
+struct mlx5e_ipsec_rx;
struct mlx5e_ipsec_tx;
+struct mlx5e_ipsec_work {
+ struct work_struct work;
+ struct mlx5e_ipsec *ipsec;
+ u32 id;
+};
+
+struct mlx5e_ipsec_aso {
+ u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
+ dma_addr_t dma_addr;
+ struct mlx5_aso *aso;
+ /* IPsec ASO caches data on every query call,
+ * so in nested calls, we can use this boolean to save
+ * recursive calls to mlx5e_ipsec_aso_query()
+ */
+ u8 use_cache : 1;
+};
+
struct mlx5e_ipsec {
struct mlx5_core_dev *mdev;
- DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
- spinlock_t sadb_rx_lock; /* Protects sadb_rx */
+ struct xarray sadb;
struct mlx5e_ipsec_sw_stats sw_stats;
+ struct mlx5e_ipsec_hw_stats hw_stats;
struct workqueue_struct *wq;
- struct mlx5e_accel_fs_esp *rx_fs;
- struct mlx5e_ipsec_tx *tx_fs;
+ struct mlx5e_flow_steering *fs;
+ struct mlx5e_ipsec_rx *rx_ipv4;
+ struct mlx5e_ipsec_rx *rx_ipv6;
+ struct mlx5e_ipsec_tx *tx;
+ struct mlx5e_ipsec_aso *aso;
+ struct notifier_block nb;
};
struct mlx5e_ipsec_esn_state {
@@ -123,7 +151,8 @@ struct mlx5e_ipsec_esn_state {
struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule;
- struct mlx5_modify_hdr *set_modify_hdr;
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_pkt_reformat *pkt_reformat;
};
struct mlx5e_ipsec_modify_state_work {
@@ -132,9 +161,7 @@ struct mlx5e_ipsec_modify_state_work {
};
struct mlx5e_ipsec_sa_entry {
- struct hlist_node hlist; /* Item in SADB_RX hashtable */
struct mlx5e_ipsec_esn_state esn_state;
- unsigned int handle; /* Handle in SADB_RX */
struct xfrm_state *x;
struct mlx5e_ipsec *ipsec;
struct mlx5_accel_esp_xfrm_attrs attrs;
@@ -146,19 +173,43 @@ struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_modify_state_work modify_work;
};
-int mlx5e_ipsec_init(struct mlx5e_priv *priv);
+struct mlx5_accel_pol_xfrm_attrs {
+ union {
+ __be32 a4;
+ __be32 a6[4];
+ } saddr;
+
+ union {
+ __be32 a4;
+ __be32 a6[4];
+ } daddr;
+
+ u8 family;
+ u8 action;
+ u8 type : 2;
+ u8 dir : 2;
+ u32 reqid;
+};
+
+struct mlx5e_ipsec_pol_entry {
+ struct xfrm_policy *x;
+ struct mlx5e_ipsec *ipsec;
+ struct mlx5e_ipsec_rule ipsec_rule;
+ struct mlx5_accel_pol_xfrm_attrs attrs;
+};
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+
+void mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
-struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
- unsigned int handle);
-
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
-int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry);
-void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry);
+int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
+void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
+int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
+void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
@@ -168,15 +219,33 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev);
void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
const struct mlx5_accel_esp_xfrm_attrs *attrs);
+int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
+void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
+
+int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_wqe_aso_ctrl_seg *data);
+void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
+ u64 *packets);
+
+void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv,
+ void *ipsec_stats);
+
+void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_accel_esp_xfrm_attrs *attrs);
static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
{
return sa_entry->ipsec->mdev;
}
+
+static inline struct mlx5_core_dev *
+mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry *pol_entry)
+{
+ return pol_entry->ipsec->mdev;
+}
#else
-static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv)
+static inline void mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
- return 0;
}
static inline void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index b859e4a4c744..9f19f4b59a70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -9,53 +9,67 @@
#define NUM_IPSEC_FTE BIT(15)
-enum accel_fs_esp_type {
- ACCEL_FS_ESP4,
- ACCEL_FS_ESP6,
- ACCEL_FS_ESP_NUM_TYPES,
+struct mlx5e_ipsec_fc {
+ struct mlx5_fc *cnt;
+ struct mlx5_fc *drop;
};
-struct mlx5e_ipsec_rx_err {
- struct mlx5_flow_table *ft;
- struct mlx5_flow_handle *rule;
- struct mlx5_modify_hdr *copy_modify_hdr;
+struct mlx5e_ipsec_ft {
+ struct mutex mutex; /* Protect changes to this struct */
+ struct mlx5_flow_table *pol;
+ struct mlx5_flow_table *sa;
+ struct mlx5_flow_table *status;
+ u32 refcnt;
};
-struct mlx5e_accel_fs_esp_prot {
- struct mlx5_flow_table *ft;
- struct mlx5_flow_group *miss_group;
- struct mlx5_flow_handle *miss_rule;
- struct mlx5_flow_destination default_dest;
- struct mlx5e_ipsec_rx_err rx_err;
- u32 refcnt;
- struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
+struct mlx5e_ipsec_miss {
+ struct mlx5_flow_group *group;
+ struct mlx5_flow_handle *rule;
};
-struct mlx5e_accel_fs_esp {
- struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES];
+struct mlx5e_ipsec_rx {
+ struct mlx5e_ipsec_ft ft;
+ struct mlx5e_ipsec_miss pol;
+ struct mlx5e_ipsec_miss sa;
+ struct mlx5e_ipsec_rule status;
+ struct mlx5e_ipsec_fc *fc;
};
struct mlx5e_ipsec_tx {
+ struct mlx5e_ipsec_ft ft;
+ struct mlx5e_ipsec_miss pol;
struct mlx5_flow_namespace *ns;
- struct mlx5_flow_table *ft;
- struct mutex mutex; /* Protect IPsec TX steering */
- u32 refcnt;
+ struct mlx5e_ipsec_fc *fc;
};
/* IPsec RX flow steering */
-static enum mlx5_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
+static enum mlx5_traffic_types family2tt(u32 family)
{
- if (i == ACCEL_FS_ESP4)
+ if (family == AF_INET)
return MLX5_TT_IPV4_IPSEC_ESP;
return MLX5_TT_IPV6_IPSEC_ESP;
}
-static int rx_err_add_rule(struct mlx5e_priv *priv,
- struct mlx5e_accel_fs_esp_prot *fs_prot,
- struct mlx5e_ipsec_rx_err *rx_err)
+static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
+ int level, int prio,
+ int max_num_groups)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+
+ ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.max_num_groups = max_num_groups;
+ ft_attr.max_fte = NUM_IPSEC_FTE;
+ ft_attr.level = level;
+ ft_attr.prio = prio;
+
+ return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+}
+
+static int ipsec_status_rule(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
{
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
- struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_flow_act flow_act = {};
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_flow_handle *fte;
@@ -79,26 +93,26 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
- netdev_err(priv->netdev,
- "fail to alloc ipsec copy modify_header_id err=%d\n", err);
+ mlx5_core_err(mdev,
+ "fail to alloc ipsec copy modify_header_id err=%d\n", err);
goto out_spec;
}
/* create fte */
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.modify_hdr = modify_hdr;
- fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act,
- &fs_prot->default_dest, 1);
+ fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
if (IS_ERR(fte)) {
err = PTR_ERR(fte);
- netdev_err(priv->netdev, "fail to add ipsec rx err copy rule err=%d\n", err);
+ mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
goto out;
}
kvfree(spec);
- rx_err->rule = fte;
- rx_err->copy_modify_hdr = modify_hdr;
+ rx->status.rule = fte;
+ rx->status.modify_hdr = modify_hdr;
return 0;
out:
@@ -108,13 +122,12 @@ out_spec:
return err;
}
-static int rx_fs_create(struct mlx5e_priv *priv,
- struct mlx5e_accel_fs_esp_prot *fs_prot)
+static int ipsec_miss_create(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_table *ft,
+ struct mlx5e_ipsec_miss *miss,
+ struct mlx5_flow_destination *dest)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_table *ft = fs_prot->ft;
- struct mlx5_flow_group *miss_group;
- struct mlx5_flow_handle *miss_rule;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
u32 *flow_group_in;
@@ -130,450 +143,888 @@ static int rx_fs_create(struct mlx5e_priv *priv,
/* Create miss_group */
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
- miss_group = mlx5_create_flow_group(ft, flow_group_in);
- if (IS_ERR(miss_group)) {
- err = PTR_ERR(miss_group);
- netdev_err(priv->netdev, "fail to create ipsec rx miss_group err=%d\n", err);
+ miss->group = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(miss->group)) {
+ err = PTR_ERR(miss->group);
+ mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
+ err);
goto out;
}
- fs_prot->miss_group = miss_group;
/* Create miss rule */
- miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
- if (IS_ERR(miss_rule)) {
- mlx5_destroy_flow_group(fs_prot->miss_group);
- err = PTR_ERR(miss_rule);
- netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err);
+ miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
+ if (IS_ERR(miss->rule)) {
+ mlx5_destroy_flow_group(miss->group);
+ err = PTR_ERR(miss->rule);
+ mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
+ err);
goto out;
}
- fs_prot->miss_rule = miss_rule;
out:
kvfree(flow_group_in);
kvfree(spec);
return err;
}
-static void rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
{
- struct mlx5e_accel_fs_esp_prot *fs_prot;
- struct mlx5e_accel_fs_esp *accel_esp;
-
- accel_esp = priv->ipsec->rx_fs;
-
- /* The netdev unreg already happened, so all offloaded rule are already removed */
- fs_prot = &accel_esp->fs_prot[type];
+ mlx5_del_flow_rules(rx->pol.rule);
+ mlx5_destroy_flow_group(rx->pol.group);
+ mlx5_destroy_flow_table(rx->ft.pol);
- mlx5_del_flow_rules(fs_prot->miss_rule);
- mlx5_destroy_flow_group(fs_prot->miss_group);
- mlx5_destroy_flow_table(fs_prot->ft);
+ mlx5_del_flow_rules(rx->sa.rule);
+ mlx5_destroy_flow_group(rx->sa.group);
+ mlx5_destroy_flow_table(rx->ft.sa);
- mlx5_del_flow_rules(fs_prot->rx_err.rule);
- mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
- mlx5_destroy_flow_table(fs_prot->rx_err.ft);
+ mlx5_del_flow_rules(rx->status.rule);
+ mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+ mlx5_destroy_flow_table(rx->ft.status);
}
-static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
{
- struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(priv->fs, false);
- struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5e_accel_fs_esp_prot *fs_prot;
- struct mlx5e_accel_fs_esp *accel_esp;
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+ struct mlx5_flow_destination dest[2];
struct mlx5_flow_table *ft;
int err;
- accel_esp = priv->ipsec->rx_fs;
- fs_prot = &accel_esp->fs_prot[type];
- fs_prot->default_dest =
- mlx5_ttc_get_default_dest(ttc, fs_esp2tt(type));
-
- ft_attr.max_fte = 1;
- ft_attr.autogroup.max_num_groups = 1;
- ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
- ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
+ MLX5E_NIC_PRIO, 1);
if (IS_ERR(ft))
return PTR_ERR(ft);
- fs_prot->rx_err.ft = ft;
- err = rx_err_add_rule(priv, fs_prot, &fs_prot->rx_err);
+ rx->ft.status = ft;
+
+ dest[0] = mlx5_ttc_get_default_dest(ttc, family2tt(family));
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
+ err = ipsec_status_rule(mdev, rx, dest);
if (err)
goto err_add;
/* Create FT */
- ft_attr.max_fte = NUM_IPSEC_FTE;
- ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
- ft_attr.prio = MLX5E_NIC_PRIO;
- ft_attr.autogroup.num_reserved_entries = 1;
- ft_attr.autogroup.max_num_groups = 1;
- ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_NIC_PRIO,
+ 2);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
}
- fs_prot->ft = ft;
+ rx->ft.sa = ft;
- err = rx_fs_create(priv, fs_prot);
+ err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
if (err)
goto err_fs;
+ ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_POL_FT_LEVEL, MLX5E_NIC_PRIO,
+ 2);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_pol_ft;
+ }
+ rx->ft.pol = ft;
+ memset(dest, 0x00, 2 * sizeof(*dest));
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.sa;
+ err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
+ if (err)
+ goto err_pol_miss;
+
return 0;
+err_pol_miss:
+ mlx5_destroy_flow_table(rx->ft.pol);
+err_pol_ft:
+ mlx5_del_flow_rules(rx->sa.rule);
+ mlx5_destroy_flow_group(rx->sa.group);
err_fs:
- mlx5_destroy_flow_table(fs_prot->ft);
+ mlx5_destroy_flow_table(rx->ft.sa);
err_fs_ft:
- mlx5_del_flow_rules(fs_prot->rx_err.rule);
- mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
+ mlx5_del_flow_rules(rx->status.rule);
+ mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
err_add:
- mlx5_destroy_flow_table(fs_prot->rx_err.ft);
+ mlx5_destroy_flow_table(rx->ft.status);
return err;
}
-static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec *ipsec, u32 family)
{
- struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
- struct mlx5e_accel_fs_esp_prot *fs_prot;
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
struct mlx5_flow_destination dest = {};
- struct mlx5e_accel_fs_esp *accel_esp;
+ struct mlx5e_ipsec_rx *rx;
int err = 0;
- accel_esp = priv->ipsec->rx_fs;
- fs_prot = &accel_esp->fs_prot[type];
- mutex_lock(&fs_prot->prot_mutex);
- if (fs_prot->refcnt)
+ if (family == AF_INET)
+ rx = ipsec->rx_ipv4;
+ else
+ rx = ipsec->rx_ipv6;
+
+ mutex_lock(&rx->ft.mutex);
+ if (rx->ft.refcnt)
goto skip;
/* create FT */
- err = rx_create(priv, type);
+ err = rx_create(mdev, ipsec, rx, family);
if (err)
goto out;
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = fs_prot->ft;
- mlx5_ttc_fwd_dest(ttc, fs_esp2tt(type), &dest);
+ dest.ft = rx->ft.pol;
+ mlx5_ttc_fwd_dest(ttc, family2tt(family), &dest);
skip:
- fs_prot->refcnt++;
+ rx->ft.refcnt++;
out:
- mutex_unlock(&fs_prot->prot_mutex);
- return err;
+ mutex_unlock(&rx->ft.mutex);
+ if (err)
+ return ERR_PTR(err);
+ return rx;
}
-static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+static void rx_ft_put(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ u32 family)
{
- struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
- struct mlx5e_accel_fs_esp_prot *fs_prot;
- struct mlx5e_accel_fs_esp *accel_esp;
+ struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+ struct mlx5e_ipsec_rx *rx;
+
+ if (family == AF_INET)
+ rx = ipsec->rx_ipv4;
+ else
+ rx = ipsec->rx_ipv6;
- accel_esp = priv->ipsec->rx_fs;
- fs_prot = &accel_esp->fs_prot[type];
- mutex_lock(&fs_prot->prot_mutex);
- fs_prot->refcnt--;
- if (fs_prot->refcnt)
+ mutex_lock(&rx->ft.mutex);
+ rx->ft.refcnt--;
+ if (rx->ft.refcnt)
goto out;
/* disconnect */
- mlx5_ttc_fwd_default_dest(ttc, fs_esp2tt(type));
+ mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
/* remove FT */
- rx_destroy(priv, type);
+ rx_destroy(mdev, rx);
out:
- mutex_unlock(&fs_prot->prot_mutex);
+ mutex_unlock(&rx->ft.mutex);
}
/* IPsec TX flow steering */
-static int tx_create(struct mlx5e_priv *priv)
+static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
{
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5e_ipsec *ipsec = priv->ipsec;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_table *ft;
int err;
- ft_attr.max_fte = NUM_IPSEC_FTE;
- ft_attr.autogroup.max_num_groups = 1;
- ft = mlx5_create_auto_grouped_flow_table(ipsec->tx_fs->ns, &ft_attr);
+ ft = ipsec_ft_create(tx->ns, 1, 0, 4);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ tx->ft.sa = ft;
+
+ ft = ipsec_ft_create(tx->ns, 0, 0, 2);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
- netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err);
- return err;
+ goto err_pol_ft;
}
- ipsec->tx_fs->ft = ft;
+ tx->ft.pol = ft;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = tx->ft.sa;
+ err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
+ if (err)
+ goto err_pol_miss;
return 0;
+
+err_pol_miss:
+ mlx5_destroy_flow_table(tx->ft.pol);
+err_pol_ft:
+ mlx5_destroy_flow_table(tx->ft.sa);
+ return err;
}
-static int tx_ft_get(struct mlx5e_priv *priv)
+static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec *ipsec)
{
- struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
+ struct mlx5e_ipsec_tx *tx = ipsec->tx;
int err = 0;
- mutex_lock(&tx_fs->mutex);
- if (tx_fs->refcnt)
+ mutex_lock(&tx->ft.mutex);
+ if (tx->ft.refcnt)
goto skip;
- err = tx_create(priv);
+ err = tx_create(mdev, tx);
if (err)
goto out;
skip:
- tx_fs->refcnt++;
+ tx->ft.refcnt++;
out:
- mutex_unlock(&tx_fs->mutex);
- return err;
+ mutex_unlock(&tx->ft.mutex);
+ if (err)
+ return ERR_PTR(err);
+ return tx;
}
-static void tx_ft_put(struct mlx5e_priv *priv)
+static void tx_ft_put(struct mlx5e_ipsec *ipsec)
{
- struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
+ struct mlx5e_ipsec_tx *tx = ipsec->tx;
- mutex_lock(&tx_fs->mutex);
- tx_fs->refcnt--;
- if (tx_fs->refcnt)
+ mutex_lock(&tx->ft.mutex);
+ tx->ft.refcnt--;
+ if (tx->ft.refcnt)
goto out;
- mlx5_destroy_flow_table(tx_fs->ft);
+ mlx5_del_flow_rules(tx->pol.rule);
+ mlx5_destroy_flow_group(tx->pol.group);
+ mlx5_destroy_flow_table(tx->ft.pol);
+ mlx5_destroy_flow_table(tx->ft.sa);
out:
- mutex_unlock(&tx_fs->mutex);
+ mutex_unlock(&tx->ft.mutex);
}
-static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 ipsec_obj_id,
- struct mlx5_flow_spec *spec,
- struct mlx5_flow_act *flow_act)
+static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
+ __be32 *daddr)
{
- u8 ip_version = attrs->is_ipv6 ? 6 : 4;
-
- spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS;
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
- /* ip_version */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ip_version);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
+
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+}
- /* Non fragmented */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
+static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
+ __be32 *daddr)
+{
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
+
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
+}
+
+static void setup_fte_esp(struct mlx5_flow_spec *spec)
+{
/* ESP header */
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
+}
+static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi)
+{
/* SPI number */
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
+}
+
+static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
+{
+ /* Non fragmented */
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
+}
+
+static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
+{
+ /* Add IPsec indicator in metadata_reg_a */
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
MLX5_SET(fte_match_param, spec->match_value,
- misc_parameters.outer_esp_spi, attrs->spi);
-
- if (ip_version == 4) {
- memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
- &attrs->saddr.a4, 4);
- memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
- &attrs->daddr.a4, 4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
- } else {
- memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
- &attrs->saddr.a6, 16);
- memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &attrs->daddr.a6, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
- 0xff, 16);
- memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- 0xff, 16);
- }
+ misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
+}
- flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
- flow_act->crypto.obj_id = ipsec_obj_id;
- flow_act->flags |= FLOW_ACT_NO_APPEND;
+static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid)
+{
+ /* Pass policy check before choosing this SA */
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_0, reqid);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_0, reqid);
}
-static int rx_add_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry)
+static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
+ struct mlx5_flow_act *flow_act)
{
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
- struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_modify_hdr *modify_hdr;
+
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ switch (dir) {
+ case XFRM_DEV_OFFLOAD_IN:
+ MLX5_SET(set_action_in, action, field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+ break;
+ case XFRM_DEV_OFFLOAD_OUT:
+ MLX5_SET(set_action_in, action, field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
+ ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ MLX5_SET(set_action_in, action, data, val);
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
+ if (IS_ERR(modify_hdr)) {
+ mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
+ PTR_ERR(modify_hdr));
+ return PTR_ERR(modify_hdr);
+ }
+
+ flow_act->modify_hdr = modify_hdr;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ return 0;
+}
+
+static int setup_pkt_reformat(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_esp_xfrm_attrs *attrs,
+ struct mlx5_flow_act *flow_act)
+{
+ enum mlx5_flow_namespace_type ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5_pkt_reformat *pkt_reformat;
+ u8 reformatbf[16] = {};
+ __be32 spi;
+
+ if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
+ reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+ goto cmd;
+ }
+
+ if (attrs->family == AF_INET)
+ reformat_params.type =
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
+ else
+ reformat_params.type =
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
+
+ /* convert to network format */
+ spi = htonl(attrs->spi);
+ memcpy(reformatbf, &spi, 4);
+
+ reformat_params.param_0 = attrs->authsize;
+ reformat_params.size = sizeof(reformatbf);
+ reformat_params.data = &reformatbf;
+
+cmd:
+ pkt_reformat =
+ mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
+ if (IS_ERR(pkt_reformat))
+ return PTR_ERR(pkt_reformat);
+
+ flow_act->pkt_reformat = pkt_reformat;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ return 0;
+}
+
+static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
- u32 ipsec_obj_id = sa_entry->ipsec_obj_id;
- struct mlx5_modify_hdr *modify_hdr = NULL;
- struct mlx5e_accel_fs_esp_prot *fs_prot;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5_flow_destination dest = {};
- struct mlx5e_accel_fs_esp *accel_esp;
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
- enum accel_fs_esp_type type;
struct mlx5_flow_spec *spec;
- int err = 0;
+ struct mlx5e_ipsec_rx *rx;
+ int err;
- accel_esp = priv->ipsec->rx_fs;
- type = attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4;
- fs_prot = &accel_esp->fs_prot[type];
-
- err = rx_ft_get(priv, type);
- if (err)
- return err;
+ rx = rx_ft_get(mdev, ipsec, attrs->family);
+ if (IS_ERR(rx))
+ return PTR_ERR(rx);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
- goto out_err;
+ goto err_alloc;
}
- setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
+ if (attrs->family == AF_INET)
+ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
- /* Set bit[31] ipsec marker */
- /* Set bit[23-0] ipsec_obj_id */
- MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
- MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
- MLX5_SET(set_action_in, action, data, (ipsec_obj_id | BIT(31)));
- MLX5_SET(set_action_in, action, offset, 0);
- MLX5_SET(set_action_in, action, length, 32);
+ setup_fte_spi(spec, attrs->spi);
+ setup_fte_esp(spec);
+ setup_fte_no_frags(spec);
- modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL,
- 1, action);
- if (IS_ERR(modify_hdr)) {
- err = PTR_ERR(modify_hdr);
- netdev_err(priv->netdev,
- "fail to alloc ipsec set modify_header_id err=%d\n", err);
- modify_hdr = NULL;
- goto out_err;
+ err = setup_modify_header(mdev, sa_entry->ipsec_obj_id | BIT(31),
+ XFRM_DEV_OFFLOAD_IN, &flow_act);
+ if (err)
+ goto err_mod_header;
+
+ switch (attrs->type) {
+ case XFRM_DEV_OFFLOAD_PACKET:
+ err = setup_pkt_reformat(mdev, attrs, &flow_act);
+ if (err)
+ goto err_pkt_reformat;
+ break;
+ default:
+ break;
}
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
- MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
+ flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- flow_act.modify_hdr = modify_hdr;
- dest.ft = fs_prot->rx_err.ft;
- rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
+ dest.ft = rx->ft.status;
+ rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
- attrs->action, err);
- goto out_err;
+ mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
+ goto err_add_flow;
}
+ kvfree(spec);
- ipsec_rule->rule = rule;
- ipsec_rule->set_modify_hdr = modify_hdr;
- goto out;
-
-out_err:
- if (modify_hdr)
- mlx5_modify_header_dealloc(priv->mdev, modify_hdr);
- rx_ft_put(priv, type);
+ sa_entry->ipsec_rule.rule = rule;
+ sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
+ sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
+ return 0;
-out:
+err_add_flow:
+ if (flow_act.pkt_reformat)
+ mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
+err_pkt_reformat:
+ mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
+err_mod_header:
kvfree(spec);
+err_alloc:
+ rx_ft_put(mdev, ipsec, attrs->family);
return err;
}
-static int tx_add_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry)
+static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
{
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
+ struct mlx5e_ipsec_tx *tx;
int err = 0;
- err = tx_ft_get(priv);
- if (err)
- return err;
+ tx = tx_ft_get(mdev, ipsec);
+ if (IS_ERR(tx))
+ return PTR_ERR(tx);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
- goto out;
+ goto err_alloc;
}
- setup_fte_common(&sa_entry->attrs, sa_entry->ipsec_obj_id, spec,
- &flow_act);
+ if (attrs->family == AF_INET)
+ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+
+ setup_fte_no_frags(spec);
+
+ switch (attrs->type) {
+ case XFRM_DEV_OFFLOAD_CRYPTO:
+ setup_fte_spi(spec, attrs->spi);
+ setup_fte_esp(spec);
+ setup_fte_reg_a(spec);
+ break;
+ case XFRM_DEV_OFFLOAD_PACKET:
+ setup_fte_reg_c0(spec, attrs->reqid);
+ err = setup_pkt_reformat(mdev, attrs, &flow_act);
+ if (err)
+ goto err_pkt_reformat;
+ break;
+ default:
+ break;
+ }
- /* Add IPsec indicator in metadata_reg_a */
- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
- MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
- MLX5_ETH_WQE_FT_META_IPSEC);
- MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
- MLX5_ETH_WQE_FT_META_IPSEC);
-
- flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
- MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
- rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
+ flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
+ flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW |
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(tx->fc->cnt);
+ rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
- netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
- sa_entry->attrs.action, err);
- goto out;
+ mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
+ goto err_add_flow;
}
+ kvfree(spec);
sa_entry->ipsec_rule.rule = rule;
+ sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
+ return 0;
-out:
+err_add_flow:
+ if (flow_act.pkt_reformat)
+ mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
+err_pkt_reformat:
kvfree(spec);
+err_alloc:
+ tx_ft_put(ipsec);
+ return err;
+}
+
+static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
+{
+ struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
+ struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5e_ipsec_tx *tx;
+ int err, dstn = 0;
+
+ tx = tx_ft_get(mdev, pol_entry->ipsec);
+ if (IS_ERR(tx))
+ return PTR_ERR(tx);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ if (attrs->family == AF_INET)
+ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+
+ setup_fte_no_frags(spec);
+
+ err = setup_modify_header(mdev, attrs->reqid, XFRM_DEV_OFFLOAD_OUT,
+ &flow_act);
if (err)
- tx_ft_put(priv);
+ goto err_mod_header;
+
+ switch (attrs->action) {
+ case XFRM_POLICY_ALLOW:
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
+ case XFRM_POLICY_BLOCK:
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
+ dstn++;
+ break;
+ default:
+ WARN_ON(true);
+ err = -EINVAL;
+ goto err_action;
+ }
+
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ dest[dstn].ft = tx->ft.sa;
+ dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dstn++;
+ rule = mlx5_add_flow_rules(tx->ft.pol, spec, &flow_act, dest, dstn);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
+ goto err_action;
+ }
+
+ kvfree(spec);
+ pol_entry->ipsec_rule.rule = rule;
+ pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
+ return 0;
+
+err_action:
+ mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
+err_mod_header:
+ kvfree(spec);
+err_alloc:
+ tx_ft_put(pol_entry->ipsec);
return err;
}
-int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry)
+static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
{
- if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT)
- return tx_add_rule(priv, sa_entry);
+ struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
+ struct mlx5_flow_destination dest[2];
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5e_ipsec_rx *rx;
+ int err, dstn = 0;
+
+ rx = rx_ft_get(mdev, pol_entry->ipsec, attrs->family);
+ if (IS_ERR(rx))
+ return PTR_ERR(rx);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
- return rx_add_rule(priv, sa_entry);
+ if (attrs->family == AF_INET)
+ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+
+ setup_fte_no_frags(spec);
+
+ switch (attrs->action) {
+ case XFRM_POLICY_ALLOW:
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
+ case XFRM_POLICY_BLOCK:
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
+ dstn++;
+ break;
+ default:
+ WARN_ON(true);
+ err = -EINVAL;
+ goto err_action;
+ }
+
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dstn].ft = rx->ft.sa;
+ dstn++;
+ rule = mlx5_add_flow_rules(rx->ft.pol, spec, &flow_act, dest, dstn);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
+ goto err_action;
+ }
+
+ kvfree(spec);
+ pol_entry->ipsec_rule.rule = rule;
+ return 0;
+
+err_action:
+ kvfree(spec);
+err_alloc:
+ rx_ft_put(mdev, pol_entry->ipsec, attrs->family);
+ return err;
}
-void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
- struct mlx5e_ipsec_sa_entry *sa_entry)
+static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5e_ipsec_tx *tx = ipsec->tx;
+
+ mlx5_fc_destroy(mdev, tx->fc->drop);
+ mlx5_fc_destroy(mdev, tx->fc->cnt);
+ kfree(tx->fc);
+ mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
+ mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
+ kfree(rx_ipv4->fc);
+}
+
+static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
+ struct mlx5e_ipsec_rx *rx_ipv6 = ipsec->rx_ipv6;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5e_ipsec_tx *tx = ipsec->tx;
+ struct mlx5e_ipsec_fc *fc;
+ struct mlx5_fc *counter;
+ int err;
+
+ fc = kzalloc(sizeof(*rx_ipv4->fc), GFP_KERNEL);
+ if (!fc)
+ return -ENOMEM;
+
+ /* Both IPv4 and IPv6 point to same flow counters struct. */
+ rx_ipv4->fc = fc;
+ rx_ipv6->fc = fc;
+ counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_rx_cnt;
+ }
+
+ fc->cnt = counter;
+ counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_rx_drop;
+ }
+
+ fc->drop = counter;
+ fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL);
+ if (!fc) {
+ err = -ENOMEM;
+ goto err_tx_fc;
+ }
+
+ tx->fc = fc;
+ counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_tx_cnt;
+ }
+
+ fc->cnt = counter;
+ counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_tx_drop;
+ }
+
+ fc->drop = counter;
+ return 0;
+
+err_tx_drop:
+ mlx5_fc_destroy(mdev, tx->fc->cnt);
+err_tx_cnt:
+ kfree(tx->fc);
+err_tx_fc:
+ mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
+err_rx_drop:
+ mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
+err_rx_cnt:
+ kfree(rx_ipv4->fc);
+ return err;
+}
+
+void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+ struct mlx5e_ipsec_hw_stats *stats;
+ struct mlx5e_ipsec_fc *fc;
+
+ stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
+
+ stats->ipsec_rx_pkts = 0;
+ stats->ipsec_rx_bytes = 0;
+ stats->ipsec_rx_drop_pkts = 0;
+ stats->ipsec_rx_drop_bytes = 0;
+ stats->ipsec_tx_pkts = 0;
+ stats->ipsec_tx_bytes = 0;
+ stats->ipsec_tx_drop_pkts = 0;
+ stats->ipsec_tx_drop_bytes = 0;
+
+ fc = ipsec->rx_ipv4->fc;
+ mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
+ mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
+ &stats->ipsec_rx_drop_bytes);
+
+ fc = ipsec->tx->fc;
+ mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
+ mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
+ &stats->ipsec_tx_drop_bytes);
+}
+
+int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
+ return tx_add_rule(sa_entry);
+
+ return rx_add_rule(sa_entry);
+}
+
+void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
mlx5_del_flow_rules(ipsec_rule->rule);
- if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) {
- tx_ft_put(priv);
+ if (ipsec_rule->pkt_reformat)
+ mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
+
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
+ tx_ft_put(sa_entry->ipsec);
return;
}
- mlx5_modify_header_dealloc(mdev, ipsec_rule->set_modify_hdr);
- rx_ft_put(priv,
- sa_entry->attrs.is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
+ mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+ rx_ft_put(mdev, sa_entry->ipsec, sa_entry->attrs.family);
}
-void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
+int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
{
- struct mlx5e_accel_fs_esp_prot *fs_prot;
- struct mlx5e_accel_fs_esp *accel_esp;
- enum accel_fs_esp_type i;
+ if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
+ return tx_add_policy(pol_entry);
- if (!ipsec->rx_fs)
- return;
+ return rx_add_policy(pol_entry);
+}
+
+void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
+{
+ struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
- mutex_destroy(&ipsec->tx_fs->mutex);
- WARN_ON(ipsec->tx_fs->refcnt);
- kfree(ipsec->tx_fs);
+ mlx5_del_flow_rules(ipsec_rule->rule);
- accel_esp = ipsec->rx_fs;
- for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
- fs_prot = &accel_esp->fs_prot[i];
- mutex_destroy(&fs_prot->prot_mutex);
- WARN_ON(fs_prot->refcnt);
+ if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+ rx_ft_put(mdev, pol_entry->ipsec, pol_entry->attrs.family);
+ return;
}
- kfree(ipsec->rx_fs);
+
+ mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+ tx_ft_put(pol_entry->ipsec);
+}
+
+void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
+{
+ if (!ipsec->tx)
+ return;
+
+ ipsec_fs_destroy_counters(ipsec);
+ mutex_destroy(&ipsec->tx->ft.mutex);
+ WARN_ON(ipsec->tx->ft.refcnt);
+ kfree(ipsec->tx);
+
+ mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
+ WARN_ON(ipsec->rx_ipv4->ft.refcnt);
+ kfree(ipsec->rx_ipv4);
+
+ mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
+ WARN_ON(ipsec->rx_ipv6->ft.refcnt);
+ kfree(ipsec->rx_ipv6);
}
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
{
- struct mlx5e_accel_fs_esp_prot *fs_prot;
- struct mlx5e_accel_fs_esp *accel_esp;
struct mlx5_flow_namespace *ns;
- enum accel_fs_esp_type i;
int err = -ENOMEM;
ns = mlx5_get_flow_namespace(ipsec->mdev,
@@ -581,26 +1032,34 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
if (!ns)
return -EOPNOTSUPP;
- ipsec->tx_fs = kzalloc(sizeof(*ipsec->tx_fs), GFP_KERNEL);
- if (!ipsec->tx_fs)
+ ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
+ if (!ipsec->tx)
return -ENOMEM;
- ipsec->rx_fs = kzalloc(sizeof(*ipsec->rx_fs), GFP_KERNEL);
- if (!ipsec->rx_fs)
- goto err_rx;
+ ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
+ if (!ipsec->rx_ipv4)
+ goto err_rx_ipv4;
- mutex_init(&ipsec->tx_fs->mutex);
- ipsec->tx_fs->ns = ns;
+ ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
+ if (!ipsec->rx_ipv6)
+ goto err_rx_ipv6;
- accel_esp = ipsec->rx_fs;
- for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
- fs_prot = &accel_esp->fs_prot[i];
- mutex_init(&fs_prot->prot_mutex);
- }
+ err = ipsec_fs_init_counters(ipsec);
+ if (err)
+ goto err_counters;
+
+ mutex_init(&ipsec->tx->ft.mutex);
+ mutex_init(&ipsec->rx_ipv4->ft.mutex);
+ mutex_init(&ipsec->rx_ipv6->ft.mutex);
+ ipsec->tx->ns = ns;
return 0;
-err_rx:
- kfree(ipsec->tx_fs);
+err_counters:
+ kfree(ipsec->rx_ipv6);
+err_rx_ipv6:
+ kfree(ipsec->rx_ipv4);
+err_rx_ipv4:
+ kfree(ipsec->tx);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 792724ce7336..8e3614218fc4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -2,9 +2,14 @@
/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
#include "mlx5_core.h"
+#include "en.h"
#include "ipsec.h"
#include "lib/mlx5.h"
+enum {
+ MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
+};
+
u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
u32 caps = 0;
@@ -31,6 +36,12 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
caps |= MLX5_IPSEC_CAP_CRYPTO;
+ if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
+ MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_trasport) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
+ caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
+
if (!caps)
return 0;
@@ -46,6 +57,52 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
}
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
+static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
+ struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ void *aso_ctx;
+
+ aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
+ if (attrs->esn_trigger) {
+ MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
+
+ if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
+ MLX5_SET(ipsec_aso, aso_ctx, window_sz,
+ attrs->replay_window / 64);
+ MLX5_SET(ipsec_aso, aso_ctx, mode,
+ MLX5_IPSEC_ASO_REPLAY_PROTECTION);
+ }
+ }
+
+ /* ASO context */
+ MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
+ MLX5_SET(ipsec_obj, obj, full_offload, 1);
+ MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
+ /* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
+ * in flow steering to perform matching against. Please be
+ * aware that this register was chosen arbitrary and can't
+ * be used in other places as long as IPsec packet offload
+ * active.
+ */
+ MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
+ if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+ MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
+
+ if (attrs->hard_packet_limit != XFRM_INF) {
+ MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
+ lower_32_bits(attrs->hard_packet_limit));
+ MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
+ MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
+ }
+
+ if (attrs->soft_packet_limit != XFRM_INF) {
+ MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
+ lower_32_bits(attrs->soft_packet_limit));
+
+ MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
+ }
+}
+
static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
@@ -54,6 +111,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
void *obj, *salt_p, *salt_iv_p;
+ struct mlx5e_hw_objs *res;
int err;
obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
@@ -66,11 +124,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
/* esn */
- if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
+ if (attrs->esn_trigger) {
MLX5_SET(ipsec_obj, obj, esn_en, 1);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
- if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
- MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
+ MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
}
MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
@@ -81,6 +138,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_IPSEC);
+ res = &mdev->mlx5e_res.hw_objs;
+ if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
+ mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
+
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
sa_entry->ipsec_obj_id =
@@ -152,7 +213,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
void *obj;
int err;
- if (!(attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED))
+ if (!attrs->esn_trigger)
return 0;
general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
@@ -183,8 +244,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
- if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
- MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
+ MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
/* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
@@ -203,3 +263,234 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
}
+
+static void
+mlx5e_ipsec_aso_update_esn(struct mlx5e_ipsec_sa_entry *sa_entry,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ struct mlx5_wqe_aso_ctrl_seg data = {};
+
+ data.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
+ data.condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE
+ << 4;
+ data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
+ data.bitwise_data = cpu_to_be64(BIT_ULL(54));
+ data.data_mask = data.bitwise_data;
+
+ mlx5e_ipsec_aso_query(sa_entry, &data);
+}
+
+static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
+ u32 mode_param)
+{
+ struct mlx5_accel_esp_xfrm_attrs attrs = {};
+
+ if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
+ sa_entry->esn_state.esn++;
+ sa_entry->esn_state.overlap = 0;
+ } else {
+ sa_entry->esn_state.overlap = 1;
+ }
+
+ mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
+ mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
+ mlx5e_ipsec_aso_update_esn(sa_entry, &attrs);
+}
+
+static void mlx5e_ipsec_handle_event(struct work_struct *_work)
+{
+ struct mlx5e_ipsec_work *work =
+ container_of(_work, struct mlx5e_ipsec_work, work);
+ struct mlx5_accel_esp_xfrm_attrs *attrs;
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+ struct mlx5e_ipsec_aso *aso;
+ struct mlx5e_ipsec *ipsec;
+ int ret;
+
+ sa_entry = xa_load(&work->ipsec->sadb, work->id);
+ if (!sa_entry)
+ goto out;
+
+ ipsec = sa_entry->ipsec;
+ aso = ipsec->aso;
+ attrs = &sa_entry->attrs;
+
+ spin_lock(&sa_entry->x->lock);
+ ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
+ if (ret)
+ goto unlock;
+
+ aso->use_cache = true;
+ if (attrs->esn_trigger &&
+ !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
+ u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
+
+ mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
+ }
+
+ if (attrs->soft_packet_limit != XFRM_INF)
+ if (!MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm) ||
+ !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
+ !MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
+ xfrm_state_check_expire(sa_entry->x);
+ aso->use_cache = false;
+
+unlock:
+ spin_unlock(&sa_entry->x->lock);
+out:
+ kfree(work);
+}
+
+static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
+ struct mlx5_eqe_obj_change *object;
+ struct mlx5e_ipsec_work *work;
+ struct mlx5_eqe *eqe = data;
+ u16 type;
+
+ if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
+ return NOTIFY_DONE;
+
+ object = &eqe->data.obj_change;
+ type = be16_to_cpu(object->obj_type);
+
+ if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
+ return NOTIFY_DONE;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return NOTIFY_DONE;
+
+ INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
+ work->ipsec = ipsec;
+ work->id = be32_to_cpu(object->obj_id);
+
+ queue_work(ipsec->wq, &work->work);
+ return NOTIFY_OK;
+}
+
+int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5e_ipsec_aso *aso;
+ struct mlx5e_hw_objs *res;
+ struct device *pdev;
+ int err;
+
+ aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
+ if (!aso)
+ return -ENOMEM;
+
+ res = &mdev->mlx5e_res.hw_objs;
+
+ pdev = mlx5_core_dma_dev(mdev);
+ aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
+ DMA_BIDIRECTIONAL);
+ err = dma_mapping_error(pdev, aso->dma_addr);
+ if (err)
+ goto err_dma;
+
+ aso->aso = mlx5_aso_create(mdev, res->pdn);
+ if (IS_ERR(aso->aso)) {
+ err = PTR_ERR(aso->aso);
+ goto err_aso_create;
+ }
+
+ ipsec->nb.notifier_call = mlx5e_ipsec_event;
+ mlx5_notifier_register(mdev, &ipsec->nb);
+
+ ipsec->aso = aso;
+ return 0;
+
+err_aso_create:
+ dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
+ DMA_BIDIRECTIONAL);
+err_dma:
+ kfree(aso);
+ return err;
+}
+
+void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5e_ipsec_aso *aso;
+ struct device *pdev;
+
+ aso = ipsec->aso;
+ pdev = mlx5_core_dma_dev(mdev);
+
+ mlx5_notifier_unregister(mdev, &ipsec->nb);
+ mlx5_aso_destroy(aso->aso);
+ dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
+ DMA_BIDIRECTIONAL);
+ kfree(aso);
+}
+
+static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
+ struct mlx5_wqe_aso_ctrl_seg *data)
+{
+ if (!data)
+ return;
+
+ ctrl->data_mask_mode = data->data_mask_mode;
+ ctrl->condition_1_0_operand = data->condition_1_0_operand;
+ ctrl->condition_1_0_offset = data->condition_1_0_offset;
+ ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
+ ctrl->condition_0_data = data->condition_0_data;
+ ctrl->condition_0_mask = data->condition_0_mask;
+ ctrl->condition_1_data = data->condition_1_data;
+ ctrl->condition_1_mask = data->condition_1_mask;
+ ctrl->bitwise_data = data->bitwise_data;
+ ctrl->data_mask = data->data_mask;
+}
+
+int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_wqe_aso_ctrl_seg *data)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5e_ipsec_aso *aso = ipsec->aso;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_wqe_aso_ctrl_seg *ctrl;
+ struct mlx5e_hw_objs *res;
+ struct mlx5_aso_wqe *wqe;
+ u8 ds_cnt;
+
+ lockdep_assert_held(&sa_entry->x->lock);
+ if (aso->use_cache)
+ return 0;
+
+ res = &mdev->mlx5e_res.hw_objs;
+
+ memset(aso->ctx, 0, sizeof(aso->ctx));
+ wqe = mlx5_aso_get_wqe(aso->aso);
+ ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+ mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
+ MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
+
+ ctrl = &wqe->aso_ctrl;
+ ctrl->va_l =
+ cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
+ ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
+ ctrl->l_key = cpu_to_be32(res->mkey);
+ mlx5e_ipsec_aso_copy(ctrl, data);
+
+ mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
+ return mlx5_aso_poll_cq(aso->aso, false);
+}
+
+void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
+ u64 *packets)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5e_ipsec_aso *aso = ipsec->aso;
+ u64 hard_cnt;
+
+ hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
+ /* HW decresases the limit till it reaches zero to fire an avent.
+ * We need to fix the calculations, so the returned count is a total
+ * number of passed packets and not how much left.
+ */
+ *packets = sa_entry->attrs.hard_packet_limit - hard_cnt;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 6859f1c1a831..eab5bc718771 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -312,27 +312,31 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct mlx5_cqe64 *cqe)
{
u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
- struct mlx5e_priv *priv;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+ struct mlx5e_ipsec_sa_entry *sa_entry;
struct xfrm_offload *xo;
- struct xfrm_state *xs;
struct sec_path *sp;
u32 sa_handle;
sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
- priv = netdev_priv(netdev);
sp = secpath_set(skb);
if (unlikely(!sp)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
+ atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
return;
}
- xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
- if (unlikely(!xs)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
+ rcu_read_lock();
+ sa_entry = xa_load(&ipsec->sadb, sa_handle);
+ if (unlikely(!sa_entry)) {
+ rcu_read_unlock();
+ atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
return;
}
+ xfrm_state_hold(sa_entry->x);
+ rcu_read_unlock();
- sp->xvec[sp->len++] = xs;
+ sp->xvec[sp->len++] = sa_entry->x;
sp->olen++;
xo = xfrm_offload(skb);
@@ -349,6 +353,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
break;
default:
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
+ atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index 9de84821dafb..e0e36a09721c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -37,6 +37,17 @@
#include "en.h"
#include "ipsec.h"
+static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_bytes) },
+};
+
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
@@ -50,8 +61,48 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
+#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
#define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
+{
+ if (!priv->ipsec)
+ return 0;
+
+ return NUM_IPSEC_HW_COUNTERS;
+}
+
+static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw) {}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
+{
+ unsigned int i;
+
+ if (!priv->ipsec)
+ return idx;
+
+ for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ mlx5e_ipsec_hw_stats_desc[i].format);
+
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
+{
+ int i;
+
+ if (!priv->ipsec)
+ return idx;
+
+ mlx5e_accel_ipsec_fs_read_stats(priv, &priv->ipsec->hw_stats);
+ for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats,
+ mlx5e_ipsec_hw_stats_desc, i);
+
+ return idx;
+}
+
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
{
return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
@@ -81,4 +132,5 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
return idx;
}
+MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 2e0335246967..78072bf93f3f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -125,10 +125,8 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
/* struct for callback API management */
struct mlx5e_async_ctx {
struct mlx5_async_work context;
- struct mlx5_async_ctx async_ctx;
- struct work_struct work;
+ struct mlx5_async_ctx *async_ctx;
struct mlx5e_ktls_offload_context_tx *priv_tx;
- struct completion complete;
int err;
union {
u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
@@ -136,34 +134,33 @@ struct mlx5e_async_ctx {
};
};
-static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
+struct mlx5e_bulk_async_ctx {
+ struct mlx5_async_ctx async_ctx;
+ DECLARE_FLEX_ARRAY(struct mlx5e_async_ctx, arr);
+};
+
+static struct mlx5e_bulk_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
{
- struct mlx5e_async_ctx *bulk_async;
+ struct mlx5e_bulk_async_ctx *bulk_async;
+ int sz;
int i;
- bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL);
+ sz = struct_size(bulk_async, arr, n);
+ bulk_async = kvzalloc(sz, GFP_KERNEL);
if (!bulk_async)
return NULL;
- for (i = 0; i < n; i++) {
- struct mlx5e_async_ctx *async = &bulk_async[i];
+ mlx5_cmd_init_async_ctx(mdev, &bulk_async->async_ctx);
- mlx5_cmd_init_async_ctx(mdev, &async->async_ctx);
- init_completion(&async->complete);
- }
+ for (i = 0; i < n; i++)
+ bulk_async->arr[i].async_ctx = &bulk_async->async_ctx;
return bulk_async;
}
-static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n)
+static void mlx5e_bulk_async_cleanup(struct mlx5e_bulk_async_ctx *bulk_async)
{
- int i;
-
- for (i = 0; i < n; i++) {
- struct mlx5e_async_ctx *async = &bulk_async[i];
-
- mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
- }
+ mlx5_cmd_cleanup_async_ctx(&bulk_async->async_ctx);
kvfree(bulk_async);
}
@@ -176,12 +173,10 @@ static void create_tis_callback(int status, struct mlx5_async_work *context)
if (status) {
async->err = status;
priv_tx->create_err = 1;
- goto out;
+ return;
}
priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
-out:
- complete(&async->complete);
}
static void destroy_tis_callback(int status, struct mlx5_async_work *context)
@@ -190,7 +185,6 @@ static void destroy_tis_callback(int status, struct mlx5_async_work *context)
container_of(context, struct mlx5e_async_ctx, context);
struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
- complete(&async->complete);
kfree(priv_tx);
}
@@ -214,7 +208,7 @@ mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw
goto err_out;
} else {
async->priv_tx = priv_tx;
- err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx,
+ err = mlx5e_ktls_create_tis_cb(mdev, async->async_ctx,
async->out_create, sizeof(async->out_create),
create_tis_callback, &async->context);
if (err)
@@ -232,13 +226,12 @@ static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv
struct mlx5e_async_ctx *async)
{
if (priv_tx->create_err) {
- complete(&async->complete);
kfree(priv_tx);
return;
}
async->priv_tx = priv_tx;
mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
- &async->async_ctx,
+ async->async_ctx,
async->out_destroy, sizeof(async->out_destroy),
destroy_tis_callback, &async->context);
}
@@ -247,7 +240,7 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
struct list_head *list, int size)
{
struct mlx5e_ktls_offload_context_tx *obj, *n;
- struct mlx5e_async_ctx *bulk_async;
+ struct mlx5e_bulk_async_ctx *bulk_async;
int i;
bulk_async = mlx5e_bulk_async_init(mdev, size);
@@ -256,16 +249,11 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
i = 0;
list_for_each_entry_safe(obj, n, list, list_node) {
- mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
+ mlx5e_tls_priv_tx_cleanup(obj, &bulk_async->arr[i]);
i++;
}
- for (i = 0; i < size; i++) {
- struct mlx5e_async_ctx *async = &bulk_async[i];
-
- wait_for_completion(&async->complete);
- }
- mlx5e_bulk_async_cleanup(bulk_async, size);
+ mlx5e_bulk_async_cleanup(bulk_async);
}
/* Recycling pool API */
@@ -291,7 +279,7 @@ static void create_work(struct work_struct *work)
struct mlx5e_tls_tx_pool *pool =
container_of(work, struct mlx5e_tls_tx_pool, create_work);
struct mlx5e_ktls_offload_context_tx *obj;
- struct mlx5e_async_ctx *bulk_async;
+ struct mlx5e_bulk_async_ctx *bulk_async;
LIST_HEAD(local_list);
int i, j, err = 0;
@@ -300,7 +288,7 @@ static void create_work(struct work_struct *work)
return;
for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
- obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]);
+ obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async->arr[i]);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
break;
@@ -309,14 +297,13 @@ static void create_work(struct work_struct *work)
}
for (j = 0; j < i; j++) {
- struct mlx5e_async_ctx *async = &bulk_async[j];
+ struct mlx5e_async_ctx *async = &bulk_async->arr[j];
- wait_for_completion(&async->complete);
if (!err && async->err)
err = async->err;
}
atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
- mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK);
+ mlx5e_bulk_async_cleanup(bulk_async);
if (err)
goto err_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index f900709639f6..9369a580743e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -186,7 +186,7 @@ static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macs
return err;
}
- dma_device = &mdev->pdev->dev;
+ dma_device = mlx5_core_dma_dev(mdev);
dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
err = dma_mapping_error(dma_device, dma_addr);
if (err) {
@@ -1299,12 +1299,12 @@ static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
struct mlx5_aso_ctrl_param *param)
{
+ struct mlx5e_macsec_umr *umr = macsec_aso->umr;
+
memset(aso_ctrl, 0, sizeof(*aso_ctrl));
- if (macsec_aso->umr->dma_addr) {
- aso_ctrl->va_l = cpu_to_be32(macsec_aso->umr->dma_addr | ASO_CTRL_READ_EN);
- aso_ctrl->va_h = cpu_to_be32((u64)macsec_aso->umr->dma_addr >> 32);
- aso_ctrl->l_key = cpu_to_be32(macsec_aso->umr->mkey);
- }
+ aso_ctrl->va_l = cpu_to_be32(umr->dma_addr | ASO_CTRL_READ_EN);
+ aso_ctrl->va_h = cpu_to_be32((u64)umr->dma_addr >> 32);
+ aso_ctrl->l_key = cpu_to_be32(umr->mkey);
if (!param)
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 0ae1865086ff..bed0c2d043e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -57,7 +57,6 @@ struct mlx5e_arfs_tables {
struct arfs_table arfs_tables[ARFS_NUM_TYPES];
/* Protect aRFS rules list */
spinlock_t arfs_lock;
- struct list_head rules;
int last_filter_id;
struct workqueue_struct *wq;
};
@@ -376,7 +375,6 @@ int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
return -ENOMEM;
spin_lock_init(&arfs->arfs_lock);
- INIT_LIST_HEAD(&arfs->rules);
arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
if (!arfs->wq)
goto err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 1728e197558d..7708acc9b2ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -2445,4 +2445,5 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_eth_mac_stats = mlx5e_get_eth_mac_stats,
.get_eth_ctrl_stats = mlx5e_get_eth_ctrl_stats,
.get_rmon_stats = mlx5e_get_rmon_stats,
+ .get_link_ext_stats = mlx5e_get_link_ext_stats
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5e41dfdf79c8..8d36e2de53a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -208,7 +208,7 @@ static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
u32 sz;
- sz = ALIGN(entries * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
+ sz = ALIGN(entries * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT);
return sz / MLX5_OCTWORD;
}
@@ -1206,6 +1206,13 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index))
__set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state);
+ /* For enhanced CQE compression packet processing. decompress
+ * session according to the enhanced layout.
+ */
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) &&
+ MLX5_CAP_GEN(mdev, enhanced_cqe_compression))
+ __set_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state);
+
return 0;
err_destroy_rq:
@@ -1896,6 +1903,7 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
cqe->op_own = 0xf1;
+ cqe->validity_iteration_count = 0xff;
}
cq->mdev = mdev;
@@ -3062,7 +3070,10 @@ int mlx5e_open_locked(struct net_device *netdev)
if (err)
goto err_clear_state_opened_flag;
- priv->profile->update_rx(priv);
+ err = priv->profile->update_rx(priv);
+ if (err)
+ goto err_close_channels;
+
mlx5e_selq_apply(&priv->selq);
mlx5e_activate_priv_channels(priv);
mlx5e_apply_traps(priv, true);
@@ -3072,6 +3083,8 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_queue_update_stats(priv);
return 0;
+err_close_channels:
+ mlx5e_close_channels(&priv->channels);
err_clear_state_opened_flag:
clear_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_selq_cancel(&priv->selq);
@@ -4898,7 +4911,6 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_has_offload_stats = mlx5e_has_offload_stats,
.ndo_get_offload_stats = mlx5e_get_offload_stats,
#endif
- .ndo_get_devlink_port = mlx5e_get_devlink_port,
};
static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
@@ -5226,10 +5238,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
}
priv->fs = fs;
- err = mlx5e_ipsec_init(priv);
- if (err)
- mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
-
err = mlx5e_ktls_init(priv);
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
@@ -5242,7 +5250,6 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_health_destroy_reporters(priv);
mlx5e_ktls_cleanup(priv);
- mlx5e_ipsec_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
}
@@ -5371,6 +5378,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
int err;
mlx5e_fs_init_l2_addr(priv->fs, netdev);
+ mlx5e_ipsec_init(priv);
err = mlx5e_macsec_init(priv);
if (err)
@@ -5434,6 +5442,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
mlx5e_macsec_cleanup(priv);
+ mlx5e_ipsec_cleanup(priv);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
@@ -5940,16 +5949,16 @@ static int mlx5e_probe(struct auxiliary_device *adev,
goto err_profile_cleanup;
}
+ SET_NETDEV_DEVLINK_PORT(netdev, mlx5e_devlink_get_dl_port(priv));
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
goto err_resume;
}
- mlx5e_devlink_port_type_eth_set(priv);
-
mlx5e_dcbnl_init_app(priv);
mlx5_uplink_netdev_set(mdev, netdev);
+ mlx5e_params_print_info(mdev, &priv->channels.params);
return 0;
err_resume:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 794cd8dfe9c9..75b9e1528fd2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -85,18 +85,25 @@ static const struct counter_desc sw_rep_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
};
-struct vport_stats {
- u64 vport_rx_packets;
- u64 vport_tx_packets;
- u64 vport_rx_bytes;
- u64 vport_tx_bytes;
-};
-
static const struct counter_desc vport_rep_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
- { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
- { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
- { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_rx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_tx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ rx_vport_rdma_unicast_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, rx_vport_rdma_unicast_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ tx_vport_rdma_unicast_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, tx_vport_rdma_unicast_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ rx_vport_rdma_multicast_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ rx_vport_rdma_multicast_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ tx_vport_rdma_multicast_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
+ tx_vport_rdma_multicast_bytes) },
};
#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
@@ -161,33 +168,80 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
vport_rep_stats_desc, i);
return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
{
+ struct mlx5e_rep_stats *rep_stats = &priv->stats.rep_stats;
+ int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- struct rtnl_link_stats64 *vport_stats;
- struct ifla_vf_stats vf_stats;
+ u32 *out;
int err;
- err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return;
+
+ err = mlx5_core_query_vport_counter(esw->dev, 1, rep->vport - 1, 0, out);
if (err) {
netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
rep->vport, err);
return;
}
- vport_stats = &priv->stats.vf_vport;
+ #define MLX5_GET_CTR(p, x) \
+ MLX5_GET64(query_vport_counter_out, p, x)
/* flip tx/rx as we are reporting the counters for the switch vport */
- vport_stats->rx_packets = vf_stats.tx_packets;
- vport_stats->rx_bytes = vf_stats.tx_bytes;
- vport_stats->tx_packets = vf_stats.rx_packets;
- vport_stats->tx_bytes = vf_stats.rx_bytes;
+ rep_stats->vport_rx_packets =
+ MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
+ MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
+ MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
+ MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
+
+ rep_stats->vport_tx_packets =
+ MLX5_GET_CTR(out, received_ib_unicast.packets) +
+ MLX5_GET_CTR(out, received_eth_unicast.packets) +
+ MLX5_GET_CTR(out, received_ib_multicast.packets) +
+ MLX5_GET_CTR(out, received_eth_multicast.packets) +
+ MLX5_GET_CTR(out, received_eth_broadcast.packets);
+
+ rep_stats->vport_rx_bytes =
+ MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
+ MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
+ MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+
+ rep_stats->vport_tx_bytes =
+ MLX5_GET_CTR(out, received_ib_unicast.octets) +
+ MLX5_GET_CTR(out, received_eth_unicast.octets) +
+ MLX5_GET_CTR(out, received_ib_multicast.octets) +
+ MLX5_GET_CTR(out, received_eth_multicast.octets) +
+ MLX5_GET_CTR(out, received_eth_broadcast.octets);
+
+ rep_stats->rx_vport_rdma_unicast_packets =
+ MLX5_GET_CTR(out, transmitted_ib_unicast.packets);
+ rep_stats->tx_vport_rdma_unicast_packets =
+ MLX5_GET_CTR(out, received_ib_unicast.packets);
+ rep_stats->rx_vport_rdma_unicast_bytes =
+ MLX5_GET_CTR(out, transmitted_ib_unicast.octets);
+ rep_stats->tx_vport_rdma_unicast_bytes =
+ MLX5_GET_CTR(out, received_ib_unicast.octets);
+ rep_stats->rx_vport_rdma_multicast_packets =
+ MLX5_GET_CTR(out, transmitted_ib_multicast.packets);
+ rep_stats->tx_vport_rdma_multicast_packets =
+ MLX5_GET_CTR(out, received_ib_multicast.packets);
+ rep_stats->rx_vport_rdma_multicast_bytes =
+ MLX5_GET_CTR(out, transmitted_ib_multicast.octets);
+ rep_stats->tx_vport_rdma_multicast_bytes =
+ MLX5_GET_CTR(out, received_ib_multicast.octets);
+
+ kvfree(out);
}
static void mlx5e_rep_get_strings(struct net_device *dev,
@@ -607,15 +661,6 @@ static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
return mlx5e_change_mtu(netdev, new_mtu, NULL);
}
-static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_core_dev *dev = priv->mdev;
-
- return mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
-}
-
static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -644,7 +689,6 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_setup_tc = mlx5e_rep_setup_tc,
- .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
.ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
@@ -761,7 +805,6 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- int err;
priv->fs = mlx5e_fs_init(priv->profile, mdev,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
@@ -770,10 +813,6 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
return -ENOMEM;
}
- err = mlx5e_ipsec_init(priv);
- if (err)
- mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
-
mlx5e_vxlan_set_netdev_info(priv);
mlx5e_build_rep_params(netdev);
mlx5e_timestamp_init(priv);
@@ -783,7 +822,6 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{
mlx5e_fs_cleanup(priv->fs);
- mlx5e_ipsec_cleanup(priv);
}
static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
@@ -1122,6 +1160,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
u16 max_mtu;
+ mlx5e_ipsec_init(priv);
+
netdev->min_mtu = ETH_MIN_MTU;
mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
@@ -1168,6 +1208,8 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
mlx5e_rep_tc_disable(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
+
+ mlx5e_ipsec_cleanup(priv);
}
static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
@@ -1253,37 +1295,20 @@ mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *
{
struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
- struct devlink_port *dl_port;
- int err;
rpriv->netdev = priv->netdev;
-
- err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
- rpriv);
- if (err)
- return err;
-
- dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
- if (dl_port)
- devlink_port_type_eth_set(dl_port, rpriv->netdev);
-
- return 0;
+ return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
+ rpriv);
}
static void
mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
{
struct net_device *netdev = rpriv->netdev;
- struct devlink_port *dl_port;
- struct mlx5_core_dev *dev;
struct mlx5e_priv *priv;
priv = netdev_priv(netdev);
- dev = priv->mdev;
- dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
- if (dl_port)
- devlink_port_type_clear(dl_port);
mlx5e_netdev_attach_nic_profile(priv);
}
@@ -1326,6 +1351,11 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
goto err_cleanup_profile;
}
+ dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch,
+ rpriv->rep->vport);
+ if (dl_port)
+ SET_NETDEV_DEVLINK_PORT(netdev, dl_port);
+
err = register_netdev(netdev);
if (err) {
netdev_warn(netdev,
@@ -1334,9 +1364,6 @@ mlx5e_vport_vf_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
goto err_detach_netdev;
}
- dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
- if (dl_port)
- devlink_port_type_eth_set(dl_port, netdev);
return 0;
err_detach_netdev:
@@ -1382,8 +1409,6 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5_core_dev *dev = priv->mdev;
- struct devlink_port *dl_port;
void *ppriv = priv->ppriv;
if (rep->vport == MLX5_VPORT_UPLINK) {
@@ -1391,9 +1416,6 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
goto free_ppriv;
}
- dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
- if (dl_port)
- devlink_port_type_clear(dl_port);
unregister_netdev(netdev);
mlx5e_detach_netdev(priv);
priv->profile->cleanup(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index a61a43fc8d5c..c8820ab22169 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -89,6 +89,25 @@ static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
}
+static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
+ struct mlx5_cqe64 *title = &cqd->title;
+
+ memcpy(title, cqe, sizeof(struct mlx5_cqe64));
+
+ if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)))
+ return;
+
+ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ cqd->wqe_counter = mpwrq_get_cqe_stride_index(title) +
+ mpwrq_get_cqe_consumed_strides(title);
+ else
+ cqd->wqe_counter =
+ mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1);
+}
+
static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
struct mlx5_cqwq *wq,
u32 cqcc)
@@ -175,6 +194,38 @@ static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
cqd->title.rss_hash_result = 0;
}
+static u32 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq,
+ struct mlx5_cqwq *wq,
+ struct mlx5_cqe64 *cqe,
+ int budget_rem)
+{
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
+ u32 cqcc, left;
+ u32 i;
+
+ left = get_cqe_enhanced_num_mini_cqes(cqe);
+ /* Here we avoid breaking the cqe compression session in the middle
+ * in case budget is not sufficient to handle all of it. In this case
+ * we return work_done == budget_rem to give 'busy' napi indication.
+ */
+ if (unlikely(left > budget_rem))
+ return budget_rem;
+
+ cqcc = wq->cc;
+ cqd->mini_arr_idx = 0;
+ memcpy(cqd->mini_arr, cqe, sizeof(struct mlx5_cqe64));
+ for (i = 0; i < left; i++, cqd->mini_arr_idx++, cqcc++) {
+ mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
+ INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
+ rq, &cqd->title);
+ }
+ wq->cc = cqcc;
+ rq->stats->cqe_compress_pkts += left;
+
+ return left;
+}
+
static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
struct mlx5_cqwq *wq,
int update_owner_only,
@@ -220,7 +271,7 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
rq, &cqd->title);
cqd->mini_arr_idx++;
- return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
+ return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem);
}
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page)
@@ -542,8 +593,8 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
int headroom, i;
headroom = rq->buff.headroom;
- new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
- entries = ALIGN(klm_entries, MLX5_UMR_KLM_ALIGNMENT);
+ new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
+ entries = ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries);
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
@@ -552,7 +603,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
for (i = 0; i < entries; i++, index++) {
dma_info = &shampo->info[index];
if (i >= klm_entries || (index < shampo->pi && shampo->pi - index <
- MLX5_UMR_KLM_ALIGNMENT))
+ MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT))
goto update_klm;
header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
@@ -617,8 +668,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
if (!klm_entries)
return 0;
- klm_entries += (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
- index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_ALIGNMENT);
+ klm_entries += (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
+ index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
entries_before = shampo->hd_per_wq - index;
if (unlikely(entries_before < klm_entries))
@@ -676,6 +727,17 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
};
}
+ /* Pad if needed, in case the value set to ucseg->xlt_octowords
+ * in mlx5e_build_umr_wqe() needed alignment.
+ */
+ if (rq->mpwqe.pages_per_wqe & (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1)) {
+ int pad = ALIGN(rq->mpwqe.pages_per_wqe, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT) -
+ rq->mpwqe.pages_per_wqe;
+
+ memset(&umr_wqe->inline_mtts[rq->mpwqe.pages_per_wqe], 0,
+ sizeof(*umr_wqe->inline_mtts) * pad);
+ }
+
bitmap_zero(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
@@ -2211,45 +2273,102 @@ mpwrq_cqe_out:
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
-int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
+ struct mlx5_cqwq *cqwq,
+ int budget_rem)
{
- struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
- struct mlx5_cqwq *cqwq = &cq->wq;
- struct mlx5_cqe64 *cqe;
+ struct mlx5_cqe64 *cqe, *title_cqe = NULL;
+ struct mlx5e_cq_decomp *cqd = &rq->cqd;
int work_done = 0;
- if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
- return 0;
+ cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq);
+ if (!cqe)
+ return work_done;
- if (rq->cqd.left) {
- work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
- if (work_done >= budget)
- goto out;
+ if (cqd->last_cqe_title &&
+ (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)) {
+ rq->stats->cqe_compress_blks++;
+ cqd->last_cqe_title = false;
}
- cqe = mlx5_cqwq_get_cqe(cqwq);
- if (!cqe) {
- if (unlikely(work_done))
- goto out;
- return 0;
+ do {
+ if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
+ if (title_cqe) {
+ mlx5e_read_enhanced_title_slot(rq, title_cqe);
+ title_cqe = NULL;
+ rq->stats->cqe_compress_blks++;
+ }
+ work_done +=
+ mlx5e_decompress_enhanced_cqe(rq, cqwq, cqe,
+ budget_rem - work_done);
+ continue;
+ }
+ title_cqe = cqe;
+ mlx5_cqwq_pop(cqwq);
+
+ INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
+ rq, cqe);
+ work_done++;
+ } while (work_done < budget_rem &&
+ (cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq)));
+
+ /* last cqe might be title on next poll bulk */
+ if (title_cqe) {
+ mlx5e_read_enhanced_title_slot(rq, title_cqe);
+ cqd->last_cqe_title = true;
}
- do {
+ return work_done;
+}
+
+static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq,
+ struct mlx5_cqwq *cqwq,
+ int budget_rem)
+{
+ struct mlx5_cqe64 *cqe;
+ int work_done = 0;
+
+ if (rq->cqd.left)
+ work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget_rem);
+
+ while (work_done < budget_rem && (cqe = mlx5_cqwq_get_cqe(cqwq))) {
if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
work_done +=
mlx5e_decompress_cqes_start(rq, cqwq,
- budget - work_done);
+ budget_rem - work_done);
continue;
}
mlx5_cqwq_pop(cqwq);
-
INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
rq, cqe);
- } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
+ work_done++;
+ }
+
+ return work_done;
+}
+
+int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+{
+ struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
+ struct mlx5_cqwq *cqwq = &cq->wq;
+ int work_done;
+
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
+ return 0;
+
+ if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state))
+ work_done = mlx5e_rx_cq_process_enhanced_cqe_comp(rq, cqwq,
+ budget);
+ else
+ work_done = mlx5e_rx_cq_process_basic_cqe_comp(rq, cqwq,
+ budget);
+
+ if (work_done == 0)
+ return 0;
-out:
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb)
mlx5e_shampo_flush_skb(rq, NULL, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 03c1841970f1..6687b8136e44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -1241,6 +1241,23 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
+void mlx5e_get_link_ext_stats(struct net_device *dev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+ mlx5_core_access_reg(priv->mdev, in, sz, out,
+ MLX5_ST_SZ_BYTES(ppcnt_reg), MLX5_REG_PPCNT, 0, 0);
+
+ stats->link_down_events = MLX5_GET(ppcnt_reg, out,
+ counter_set.phys_layer_cntrs.link_down_events);
+}
+
static int fec_num_lanes(struct mlx5_core_dev *dev)
{
u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
@@ -2463,6 +2480,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(per_prio),
&MLX5E_STATS_GRP(pme),
#ifdef CONFIG_MLX5_EN_IPSEC
+ &MLX5E_STATS_GRP(ipsec_hw),
&MLX5E_STATS_GRP(ipsec_sw),
#endif
&MLX5E_STATS_GRP(tls),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 9f781085be47..375752d6546d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -126,6 +126,8 @@ void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
struct ethtool_rmon_stats *rmon,
const struct ethtool_rmon_hist_range **ranges);
+void mlx5e_get_link_ext_stats(struct net_device *dev,
+ struct ethtool_link_ext_stats *stats);
/* Concrete NIC Stats */
@@ -461,6 +463,21 @@ struct mlx5e_ptp_cq_stats {
u64 resync_event;
};
+struct mlx5e_rep_stats {
+ u64 vport_rx_packets;
+ u64 vport_tx_packets;
+ u64 vport_rx_bytes;
+ u64 vport_tx_bytes;
+ u64 rx_vport_rdma_unicast_packets;
+ u64 tx_vport_rdma_unicast_packets;
+ u64 rx_vport_rdma_unicast_bytes;
+ u64 tx_vport_rdma_unicast_bytes;
+ u64 rx_vport_rdma_multicast_packets;
+ u64 tx_vport_rdma_multicast_packets;
+ u64 rx_vport_rdma_multicast_bytes;
+ u64 tx_vport_rdma_multicast_bytes;
+};
+
struct mlx5e_stats {
struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt;
@@ -469,6 +486,7 @@ struct mlx5e_stats {
struct mlx5e_pport_stats pport;
struct rtnl_link_stats64 vf_vport;
struct mlx5e_pcie_stats pcie;
+ struct mlx5e_rep_stats rep_stats;
};
extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
@@ -488,6 +506,7 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio);
extern MLX5E_DECLARE_STATS_GRP(pme);
extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
+extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
extern MLX5E_DECLARE_STATS_GRP(ptp);
extern MLX5E_DECLARE_STATS_GRP(macsec_hw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index bd9936af4582..9af2aa2922f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -132,6 +132,15 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[PACKET_COLOR_TO_REG] = packet_color_to_reg,
};
+struct mlx5e_tc_jump_state {
+ u32 jump_count;
+ bool jump_target;
+ struct mlx5_flow_attr *jumping_attr;
+
+ enum flow_action_id last_id;
+ u32 last_index;
+};
+
struct mlx5e_tc_table *mlx5e_tc_table_alloc(void)
{
struct mlx5e_tc_table *tc;
@@ -160,6 +169,7 @@ static struct lock_class_key tc_ht_lock_key;
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
+static void mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr);
void
mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
@@ -392,8 +402,9 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv,
static bool
is_flow_meter_action(struct mlx5_flow_attr *attr)
{
- return ((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
- (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER));
+ return (((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
+ (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)) ||
+ attr->flags & MLX5_ATTR_FLAG_MTU);
}
static int
@@ -404,6 +415,7 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
struct mlx5e_post_meter_priv *post_meter;
enum mlx5_flow_namespace_type ns_type;
struct mlx5e_flow_meter_handle *meter;
+ enum mlx5e_post_meter_type type;
meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
if (IS_ERR(meter)) {
@@ -412,8 +424,11 @@ mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
}
ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
- post_meter = mlx5e_post_meter_init(priv, ns_type, post_act, meter->green_counter,
- meter->red_counter);
+ type = meter->params.mtu ? MLX5E_POST_METER_MTU : MLX5E_POST_METER_RATE;
+ post_meter = mlx5e_post_meter_init(priv, ns_type, post_act,
+ type,
+ meter->act_counter, meter->drop_counter,
+ attr->branch_true, attr->branch_false);
if (IS_ERR(post_meter)) {
mlx5_core_err(priv->mdev, "Failed to init post meter\n");
goto err_meter_init;
@@ -432,9 +447,9 @@ err_meter_init:
}
static void
-mlx5e_tc_del_flow_meter(struct mlx5_flow_attr *attr)
+mlx5e_tc_del_flow_meter(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
{
- mlx5e_post_meter_cleanup(attr->meter_attr.post_meter);
+ mlx5e_post_meter_cleanup(esw, attr->meter_attr.post_meter);
mlx5e_tc_meter_put(attr->meter_attr.meter);
}
@@ -495,7 +510,7 @@ mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
if (attr->meter_attr.meter)
- mlx5e_tc_del_flow_meter(attr);
+ mlx5e_tc_del_flow_meter(esw, attr);
}
int
@@ -606,6 +621,12 @@ int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
}
+static struct mlx5_core_dev *
+get_flow_counter_dev(struct mlx5e_tc_flow *flow)
+{
+ return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
+}
+
static struct mod_hdr_tbl *
get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{
@@ -1060,12 +1081,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
hash_hairpin_info(peer_id, match_prio));
mutex_unlock(&tc->hairpin_tbl_lock);
- params.log_data_size = 16;
- params.log_data_size = min_t(u8, params.log_data_size,
- MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
- params.log_data_size = max_t(u8, params.log_data_size,
- MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
-
+ params.log_data_size = clamp_t(u8, 16,
+ MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz),
+ MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
params.log_num_packets = params.log_data_size -
MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
params.log_num_packets = min_t(u8, params.log_num_packets,
@@ -1722,6 +1740,90 @@ clean_encap_dests(struct mlx5e_priv *priv,
}
static int
+verify_attr_actions(u32 actions, struct netlink_ext_ack *extack)
+{
+ if (!(actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
+ return -EOPNOTSUPP;
+ }
+
+ if (!(~actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
+ return -EOPNOTSUPP;
+ }
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+post_process_attr(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ bool is_post_act_attr,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
+ bool vf_tun;
+ int err = 0;
+
+ err = verify_attr_actions(attr->action, extack);
+ if (err)
+ goto err_out;
+
+ err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
+ if (err)
+ goto err_out;
+
+ if (mlx5e_is_eswitch_flow(flow)) {
+ err = mlx5_eswitch_add_vlan_action(esw, attr);
+ if (err)
+ goto err_out;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ if (vf_tun || is_post_act_attr) {
+ err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
+ if (err)
+ goto err_out;
+ } else {
+ err = mlx5e_attach_mod_hdr(flow->priv, flow, attr->parse_attr);
+ if (err)
+ goto err_out;
+ }
+ }
+
+ if (attr->branch_true &&
+ attr->branch_true->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_true);
+ if (err)
+ goto err_out;
+ }
+
+ if (attr->branch_false &&
+ attr->branch_false->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_false);
+ if (err)
+ goto err_out;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
+ if (err)
+ goto err_out;
+ }
+
+err_out:
+ return err;
+}
+
+static int
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
@@ -1731,7 +1833,6 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
u32 max_prio, max_chain;
- bool vf_tun;
int err = 0;
parse_attr = attr->parse_attr;
@@ -1821,32 +1922,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->int_port = int_port;
}
- err = set_encap_dests(priv, flow, attr, extack, &vf_tun);
+ err = post_process_attr(flow, attr, false, extack);
if (err)
goto err_out;
- err = mlx5_eswitch_add_vlan_action(esw, attr);
- if (err)
- goto err_out;
-
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- if (vf_tun) {
- err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr);
- if (err)
- goto err_out;
- } else {
- err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- if (err)
- goto err_out;
- }
- }
-
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- err = alloc_flow_attr_counter(esw_attr->counter_dev, attr);
- if (err)
- goto err_out;
- }
-
/* we get here if one of the following takes place:
* (1) there's no error
* (2) there's an encap action and we don't have valid neigh
@@ -1882,6 +1961,16 @@ static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
return !!geneve_tlv_opt_0_data;
}
+static void free_branch_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
+{
+ if (!attr)
+ return;
+
+ mlx5_free_flow_attr(flow, attr);
+ kvfree(attr->parse_attr);
+ kfree(attr);
+}
+
static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
@@ -1937,6 +2026,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5e_detach_decap(priv, flow);
free_flow_post_acts(flow);
+ free_branch_attr(flow, attr->branch_true);
+ free_branch_attr(flow, attr->branch_false);
if (flow->attr->lag.count)
mlx5_lag_del_mpesw_rule(esw->dev);
@@ -3510,36 +3601,6 @@ actions_match_supported(struct mlx5e_priv *priv,
ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
ct_flow = flow_flag_test(flow, CT) && !ct_clear;
- if (!(actions &
- (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
- NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
- return false;
- }
-
- if (!(~actions &
- (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
- NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
- return false;
- }
-
- if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
- actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
- NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
- return false;
- }
-
- if (!(~actions &
- (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
- NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
- return false;
- }
-
- if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
- actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
- NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
- return false;
- }
-
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
!modify_header_match_supported(priv, &parse_attr->spec, flow_action,
actions, ct_flow, ct_clear, extack))
@@ -3639,15 +3700,12 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
attr2->esw_attr->split_count = 0;
}
+ attr2->branch_true = NULL;
+ attr2->branch_false = NULL;
+ attr2->jumping_attr = NULL;
return attr2;
}
-static struct mlx5_core_dev *
-get_flow_counter_dev(struct mlx5e_tc_flow *flow)
-{
- return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
-}
-
struct mlx5_flow_attr *
mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
{
@@ -3683,28 +3741,15 @@ mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
static void
free_flow_post_acts(struct mlx5e_tc_flow *flow)
{
- struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
- struct mlx5e_post_act *post_act = get_post_action(flow->priv);
struct mlx5_flow_attr *attr, *tmp;
- bool vf_tun;
list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
if (list_is_last(&attr->list, &flow->attrs))
break;
- if (attr->post_act_handle)
- mlx5e_tc_post_act_del(post_act, attr->post_act_handle);
-
- clean_encap_dests(flow->priv, flow, attr, &vf_tun);
-
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
- mlx5_fc_destroy(counter_dev, attr->counter);
-
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
- if (attr->modify_hdr)
- mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
- }
+ mlx5_free_flow_attr(flow, attr);
+ free_branch_attr(flow, attr->branch_true);
+ free_branch_attr(flow, attr->branch_false);
list_del(&attr->list);
kvfree(attr->parse_attr);
@@ -3757,7 +3802,6 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
struct mlx5e_post_act *post_act = get_post_action(flow->priv);
struct mlx5_flow_attr *attr, *next_attr = NULL;
struct mlx5e_post_act_handle *handle;
- bool vf_tun;
int err;
/* This is going in reverse order as needed.
@@ -3767,7 +3811,9 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
if (!next_attr) {
/* Set counter action on last post act rule. */
attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
- } else {
+ }
+
+ if (next_attr && !(attr->flags & MLX5_ATTR_FLAG_TERMINATING)) {
err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
if (err)
goto out_free;
@@ -3779,26 +3825,14 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
if (list_is_last(&attr->list, &flow->attrs))
break;
- err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
+ err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
if (err)
goto out_free;
- err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
+ err = post_process_attr(flow, attr, true, extack);
if (err)
goto out_free;
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
- if (err)
- goto out_free;
- }
-
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
- if (err)
- goto out_free;
- }
-
handle = mlx5e_tc_post_act_add(post_act, attr);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
@@ -3806,6 +3840,13 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
}
attr->post_act_handle = handle;
+
+ if (attr->jumping_attr) {
+ err = mlx5e_tc_act_set_next_post_act(flow, attr->jumping_attr, attr);
+ if (err)
+ goto out_free;
+ }
+
next_attr = attr;
}
@@ -3825,12 +3866,145 @@ out_free:
}
static int
+alloc_branch_attr(struct mlx5e_tc_flow *flow,
+ struct mlx5e_tc_act_branch_ctrl *cond,
+ struct mlx5_flow_attr **cond_attr,
+ u32 *jump_count,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_flow_attr *attr;
+ int err = 0;
+
+ *cond_attr = mlx5e_clone_flow_attr_for_post_act(flow->attr,
+ mlx5e_get_flow_namespace(flow));
+ if (!(*cond_attr))
+ return -ENOMEM;
+
+ attr = *cond_attr;
+
+ switch (cond->act_id) {
+ case FLOW_ACTION_DROP:
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ break;
+ case FLOW_ACTION_ACCEPT:
+ case FLOW_ACTION_PIPE:
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
+ break;
+ case FLOW_ACTION_JUMP:
+ if (*jump_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload flows with nested jumps");
+ err = -EOPNOTSUPP;
+ goto out_err;
+ }
+ *jump_count = cond->extval;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto out_err;
+ }
+
+ return err;
+out_err:
+ kfree(*cond_attr);
+ *cond_attr = NULL;
+ return err;
+}
+
+static void
+dec_jump_count(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
+ struct mlx5_flow_attr *attr, struct mlx5e_priv *priv,
+ struct mlx5e_tc_jump_state *jump_state)
+{
+ if (!jump_state->jump_count)
+ return;
+
+ /* Single tc action can instantiate multiple offload actions (e.g. pedit)
+ * Jump only over a tc action
+ */
+ if (act->id == jump_state->last_id && act->hw_index == jump_state->last_index)
+ return;
+
+ jump_state->last_id = act->id;
+ jump_state->last_index = act->hw_index;
+
+ /* nothing to do for intermediate actions */
+ if (--jump_state->jump_count > 1)
+ return;
+
+ if (jump_state->jump_count == 1) { /* last action in the jump action list */
+
+ /* create a new attribute after this action */
+ jump_state->jump_target = true;
+
+ if (tc_act->is_terminating_action) { /* the branch ends here */
+ attr->flags |= MLX5_ATTR_FLAG_TERMINATING;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ } else { /* the branch continues executing the rest of the actions */
+ struct mlx5e_post_act *post_act;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ post_act = get_post_action(priv);
+ attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act);
+ }
+ } else if (jump_state->jump_count == 0) { /* first attr after the jump action list */
+ /* This is the post action for the jumping attribute (either red or green)
+ * Use the stored jumping_attr to set the post act id on the jumping attribute
+ */
+ attr->jumping_attr = jump_state->jumping_attr;
+ }
+}
+
+static int
+parse_branch_ctrl(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
+ struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr,
+ struct mlx5e_tc_jump_state *jump_state,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_tc_act_branch_ctrl cond_true, cond_false;
+ u32 jump_count = jump_state->jump_count;
+ int err;
+
+ if (!tc_act->get_branch_ctrl)
+ return 0;
+
+ tc_act->get_branch_ctrl(act, &cond_true, &cond_false);
+
+ err = alloc_branch_attr(flow, &cond_true,
+ &attr->branch_true, &jump_count, extack);
+ if (err)
+ goto out_err;
+
+ if (jump_count)
+ jump_state->jumping_attr = attr->branch_true;
+
+ err = alloc_branch_attr(flow, &cond_false,
+ &attr->branch_false, &jump_count, extack);
+ if (err)
+ goto err_branch_false;
+
+ if (jump_count && !jump_state->jumping_attr)
+ jump_state->jumping_attr = attr->branch_false;
+
+ jump_state->jump_count = jump_count;
+ return 0;
+
+err_branch_false:
+ free_branch_attr(flow, attr->branch_true);
+out_err:
+ return err;
+}
+
+static int
parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow_action flow_action_reorder;
struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct mlx5e_tc_jump_state jump_state = {};
struct mlx5_flow_attr *attr = flow->attr;
enum mlx5_flow_namespace_type ns_type;
struct mlx5e_priv *priv = flow->priv;
@@ -3850,6 +4024,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
list_add(&attr->list, &flow->attrs);
flow_action_for_each(i, _act, &flow_action_reorder) {
+ jump_state.jump_target = false;
act = *_act;
tc_act = mlx5e_tc_act_get(act->id, ns_type);
if (!tc_act) {
@@ -3867,12 +4042,19 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
goto out_free;
+ dec_jump_count(act, tc_act, attr, priv, &jump_state);
+
+ err = parse_branch_ctrl(act, tc_act, flow, attr, &jump_state, extack);
+ if (err)
+ goto out_free;
+
parse_state->actions |= attr->action;
/* Split attr for multi table act if not the last act. */
- if (tc_act->is_multi_table_act &&
+ if (jump_state.jump_target ||
+ (tc_act->is_multi_table_act &&
tc_act->is_multi_table_act(priv, act, attr) &&
- i < flow_action_reorder.num_entries - 1) {
+ i < flow_action_reorder.num_entries - 1)) {
err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
if (err)
goto out_free;
@@ -3954,6 +4136,10 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
if (err)
return err;
+ err = verify_attr_actions(attr->action, extack);
+ if (err)
+ return err;
+
if (!actions_match_supported(priv, flow_action, parse_state->actions,
parse_attr, flow, extack))
return -EOPNOTSUPP;
@@ -4191,6 +4377,30 @@ mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
return attr;
}
+static void
+mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
+ bool vf_tun;
+
+ if (!attr)
+ return;
+
+ if (attr->post_act_handle)
+ mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle);
+
+ clean_encap_dests(flow->priv, flow, attr, &vf_tun);
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ mlx5_fc_destroy(counter_dev, attr->counter);
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
+ if (attr->modify_hdr)
+ mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
+ }
+}
+
static int
mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
struct flow_cls_offload *f, unsigned long flow_flags,
@@ -4733,10 +4943,17 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
return err;
}
-int mlx5e_policer_validate(const struct flow_action *action,
- const struct flow_action_entry *act,
- struct netlink_ext_ack *extack)
+static int
+tc_matchall_police_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
{
+ if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not continue");
+ return -EOPNOTSUPP;
+ }
+
if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack,
"Offload not supported when exceed action is not drop");
@@ -4787,13 +5004,7 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
- if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
- NL_SET_ERR_MSG_MOD(extack,
- "Offload not supported when conform action is not continue");
- return -EOPNOTSUPP;
- }
-
- err = mlx5e_policer_validate(flow_action, act, extack);
+ err = tc_matchall_police_validate(flow_action, act, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 48241317a535..50af70ef22f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -95,10 +95,13 @@ struct mlx5_flow_attr {
*/
bool count;
} lag;
+ struct mlx5_flow_attr *branch_true;
+ struct mlx5_flow_attr *branch_false;
+ struct mlx5_flow_attr *jumping_attr;
/* keep this union last */
union {
- struct mlx5_esw_flow_attr esw_attr[0];
- struct mlx5_nic_flow_attr nic_attr[0];
+ DECLARE_FLEX_ARRAY(struct mlx5_esw_flow_attr, esw_attr);
+ DECLARE_FLEX_ARRAY(struct mlx5_nic_flow_attr, nic_attr);
};
};
@@ -110,6 +113,8 @@ enum {
MLX5_ATTR_FLAG_SAMPLE = BIT(4),
MLX5_ATTR_FLAG_ACCEPT = BIT(5),
MLX5_ATTR_FLAG_CT = BIT(6),
+ MLX5_ATTR_FLAG_TERMINATING = BIT(7),
+ MLX5_ATTR_FLAG_MTU = BIT(8),
};
/* Returns true if any of the flags that require skipping further TC/NF processing are set. */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index a0242dc15741..8f7580fec193 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -19,6 +19,7 @@
#include "diag/fw_tracer.h"
#include "mlx5_irq.h"
#include "devlink.h"
+#include "en_accel/ipsec.h"
enum {
MLX5_EQE_OWNER_INIT_VAL = 0x1,
@@ -578,6 +579,10 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
+ if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+ async_event_mask |=
+ (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
+
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 4fbff7bcc155..b176648d1343 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1722,7 +1722,7 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
if (!entry) {
esw_debug(br_offloads->esw->dev,
- "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
+ "FDB update entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
fdb_info->addr, fdb_info->vid, vport_num);
return;
}
@@ -1775,9 +1775,9 @@ void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_o
bridge = port->bridge;
entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
if (!entry) {
- esw_warn(esw->dev,
- "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
- fdb_info->addr, fdb_info->vid, vport_num);
+ esw_debug(esw->dev,
+ "FDB remove entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
+ fdb_info->addr, fdb_info->vid, vport_num);
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c
index 2db13c71e88c..3d0bbcca1cb9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/debugfs.c
@@ -12,10 +12,11 @@ enum vnic_diag_counter {
MLX5_VNIC_DIAG_CQ_OVERRUN,
MLX5_VNIC_DIAG_INVALID_COMMAND,
MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND,
+ MLX5_VNIC_DIAG_RX_STEERING_DISCARD,
};
static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_counter counter,
- u32 *val)
+ u64 *val)
{
u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
@@ -57,6 +58,10 @@ static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_cou
case MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND:
*val = MLX5_GET(vnic_diagnostic_statistics, vnic_diag_out, quota_exceeded_command);
break;
+ case MLX5_VNIC_DIAG_RX_STEERING_DISCARD:
+ *val = MLX5_GET64(vnic_diagnostic_statistics, vnic_diag_out,
+ nic_receive_steering_discard);
+ break;
}
return 0;
@@ -65,14 +70,14 @@ static int mlx5_esw_query_vnic_diag(struct mlx5_vport *vport, enum vnic_diag_cou
static int __show_vnic_diag(struct seq_file *file, struct mlx5_vport *vport,
enum vnic_diag_counter type)
{
- u32 val = 0;
+ u64 val = 0;
int ret;
ret = mlx5_esw_query_vnic_diag(vport, type, &val);
if (ret)
return ret;
- seq_printf(file, "%d\n", val);
+ seq_printf(file, "%llu\n", val);
return 0;
}
@@ -112,6 +117,11 @@ static int quota_exceeded_command_show(struct seq_file *file, void *priv)
return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_QOUTA_EXCEEDED_COMMAND);
}
+static int rx_steering_discard_show(struct seq_file *file, void *priv)
+{
+ return __show_vnic_diag(file, file->private, MLX5_VNIC_DIAG_RX_STEERING_DISCARD);
+}
+
DEFINE_SHOW_ATTRIBUTE(total_q_under_processor_handle);
DEFINE_SHOW_ATTRIBUTE(send_queue_priority_update_flow);
DEFINE_SHOW_ATTRIBUTE(comp_eq_overrun);
@@ -119,6 +129,7 @@ DEFINE_SHOW_ATTRIBUTE(async_eq_overrun);
DEFINE_SHOW_ATTRIBUTE(cq_overrun);
DEFINE_SHOW_ATTRIBUTE(invalid_command);
DEFINE_SHOW_ATTRIBUTE(quota_exceeded_command);
+DEFINE_SHOW_ATTRIBUTE(rx_steering_discard);
void mlx5_esw_vport_debugfs_destroy(struct mlx5_eswitch *esw, u16 vport_num)
{
@@ -179,4 +190,9 @@ void mlx5_esw_vport_debugfs_create(struct mlx5_eswitch *esw, u16 vport_num, bool
if (MLX5_CAP_GEN(esw->dev, quota_exceeded_count))
debugfs_create_file("quota_exceeded_command", 0444, vnic_diag, vport,
&quota_exceeded_command_fops);
+
+ if (MLX5_CAP_GEN(esw->dev, nic_receive_steering_discard))
+ debugfs_create_file("rx_steering_discard", 0444, vnic_diag, vport,
+ &rx_steering_discard_fops);
+
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 9bc7be95db54..084a910bb4e7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -91,7 +91,7 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
if (err)
goto reg_err;
- err = devl_rate_leaf_create(dl_port, vport);
+ err = devl_rate_leaf_create(dl_port, vport, NULL);
if (err)
goto rate_err;
@@ -160,7 +160,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
if (err)
return err;
- err = devl_rate_leaf_create(dl_port, vport);
+ err = devl_rate_leaf_create(dl_port, vport, NULL);
if (err)
goto rate_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 374e3fbdc2cf..527e4bffda8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -772,6 +772,41 @@ static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
+static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *query_ctx;
+ void *hca_caps;
+ int err;
+
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
+ return 0;
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
+ MLX5_CAP_GENERAL);
+ if (err)
+ goto out_free;
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce);
+
+ memset(query_ctx, 0, query_out_sz);
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
+ MLX5_CAP_GENERAL_2);
+ if (err)
+ goto out_free;
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ vport->info.mig_enabled = MLX5_GET(cmd_hca_cap_2, hca_caps, migratable);
+out_free:
+ kfree(query_ctx);
+ return err;
+}
+
static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
u16 vport_num = vport->vport;
@@ -785,6 +820,10 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
if (mlx5_esw_is_manager_vport(esw, vport_num))
return 0;
+ err = mlx5_esw_vport_caps_get(esw, vport);
+ if (err)
+ goto err_caps;
+
mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport_num, 1,
@@ -804,6 +843,10 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
vport->info.qos, flags);
return 0;
+
+err_caps:
+ esw_vport_cleanup_acl(esw, vport);
+ return err;
}
/* Don't cleanup vport->info, it's needed to restore vport configuration */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 3029bc1c0dd0..5a85a5d32be7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -153,6 +153,8 @@ struct mlx5_vport_info {
u8 qos;
u8 spoofchk: 1;
u8 trusted: 1;
+ u8 roce_enabled: 1;
+ u8 mig_enabled: 1;
};
/* Vport context events */
@@ -508,7 +510,14 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
const u8 *hw_addr, int hw_addr_len,
struct netlink_ext_ack *extack);
-
+int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -744,6 +753,11 @@ static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
return 0;
}
+static inline struct mlx5_flow_table *
+mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
+{
+ return esw->fdb_table.offloads.slow_fdb;
+}
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 8c6c9bcb3dc3..e455b215c708 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -50,6 +50,7 @@
#include "en/mapping.h"
#include "devlink.h"
#include "lag/lag.h"
+#include "en/tc/post_meter.h"
#define mlx5_esw_for_each_rep(esw, i, rep) \
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
@@ -202,6 +203,21 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
}
static int
+esw_setup_mtu_dest(struct mlx5_flow_destination *dest,
+ struct mlx5e_meter_attr *meter,
+ int i)
+{
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_RANGE;
+ dest[i].range.field = MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN;
+ dest[i].range.min = 0;
+ dest[i].range.max = meter->params.mtu;
+ dest[i].range.hit_ft = mlx5e_post_meter_get_mtu_true_ft(meter->post_meter);
+ dest[i].range.miss_ft = mlx5e_post_meter_get_mtu_false_ft(meter->post_meter);
+
+ return 0;
+}
+
+static int
esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
u32 sampler_id,
@@ -248,7 +264,7 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_ac
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = esw->fdb_table.offloads.slow_fdb;
+ dest[i].ft = mlx5_eswitch_get_slow_fdb(esw);
}
static int
@@ -479,16 +495,21 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
esw_src_port_rewrite_supported(esw))
attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
- if (attr->flags & MLX5_ATTR_FLAG_SAMPLE &&
- !(attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)) {
- esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
- (*i)++;
- } else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
+ if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
esw_setup_slow_path_dest(dest, flow_act, esw, *i);
(*i)++;
+ goto out;
+ }
+
+ if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
+ esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
+ (*i)++;
} else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
esw_setup_accept_dest(dest, flow_act, chains, *i);
(*i)++;
+ } else if (attr->flags & MLX5_ATTR_FLAG_MTU) {
+ err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
+ (*i)++;
} else if (esw_is_indir_table(esw, attr)) {
err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
} else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
@@ -506,6 +527,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
}
}
+out:
return err;
}
@@ -637,6 +659,11 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
goto err_esw_get;
}
+ if (!i) {
+ kfree(dest);
+ dest = NULL;
+ }
+
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
&flow_act, dest, i);
@@ -1046,7 +1073,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
if (rep->vport == MLX5_VPORT_UPLINK)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
- flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb,
+ flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
@@ -1095,7 +1122,7 @@ mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
dest.vport.num = vport_num;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
@@ -1248,7 +1275,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
spec, MLX5_VPORT_PF);
- flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
@@ -1260,7 +1287,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
- flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
@@ -1274,7 +1301,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
peer_dev->priv.eswitch,
spec, vport->vport);
- flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
@@ -1363,7 +1390,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dest.vport.num = esw->manager_vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
@@ -1378,7 +1405,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
outer_headers.dmac_47_16);
dmac_v[0] = 0x01;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
@@ -1927,7 +1954,7 @@ send_vport_err:
fdb_chains_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
tc_miss_table_err:
- mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
+ mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
slow_fdb_err:
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
@@ -1938,7 +1965,7 @@ ns_err:
static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
- if (!esw->fdb_table.offloads.slow_fdb)
+ if (!mlx5_eswitch_get_slow_fdb(esw))
return;
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
@@ -1954,7 +1981,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_chains_destroy(esw, esw_chains(esw));
mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
- mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
+ mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);
@@ -3886,7 +3913,7 @@ static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num,
if (!query_ctx)
return -ENOMEM;
- err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx);
+ err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
if (err)
goto out_free;
@@ -4019,3 +4046,212 @@ int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr);
}
+
+static struct mlx5_vport *
+mlx5_devlink_port_fn_get_vport(struct devlink_port *port, struct mlx5_eswitch *esw)
+{
+ u16 vport_num;
+
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ if (!is_port_function_supported(esw, vport_num))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return mlx5_eswitch_get_vport(esw, vport_num);
+}
+
+int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = -EOPNOTSUPP;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ if (!MLX5_CAP_GEN(esw->dev, migration)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
+ return err;
+ }
+
+ vport = mlx5_devlink_port_fn_get_vport(port, esw);
+ if (IS_ERR(vport)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid port");
+ return PTR_ERR(vport);
+ }
+
+ mutex_lock(&esw->state_lock);
+ if (vport->enabled) {
+ *is_enabled = vport->info.mig_enabled;
+ err = 0;
+ }
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack)
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ void *query_ctx;
+ void *hca_caps;
+ int err = -EOPNOTSUPP;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ if (!MLX5_CAP_GEN(esw->dev, migration)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
+ return err;
+ }
+
+ vport = mlx5_devlink_port_fn_get_vport(port, esw);
+ if (IS_ERR(vport)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid port");
+ return PTR_ERR(vport);
+ }
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ goto out;
+ }
+
+ if (vport->info.mig_enabled == enable) {
+ err = 0;
+ goto out;
+ }
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
+ MLX5_CAP_GENERAL_2);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
+ goto out_free;
+ }
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1);
+
+ err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA migratable cap");
+ goto out_free;
+ }
+
+ vport->info.mig_enabled = enable;
+
+out_free:
+ kfree(query_ctx);
+out:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = -EOPNOTSUPP;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport = mlx5_devlink_port_fn_get_vport(port, esw);
+ if (IS_ERR(vport)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid port");
+ return PTR_ERR(vport);
+ }
+
+ mutex_lock(&esw->state_lock);
+ if (vport->enabled) {
+ *is_enabled = vport->info.roce_enabled;
+ err = 0;
+ }
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack)
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = -EOPNOTSUPP;
+ void *query_ctx;
+ void *hca_caps;
+ u16 vport_num;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport = mlx5_devlink_port_fn_get_vport(port, esw);
+ if (IS_ERR(vport)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid port");
+ return PTR_ERR(vport);
+ }
+ vport_num = vport->vport;
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ goto out;
+ }
+
+ if (vport->info.roce_enabled == enable) {
+ err = 0;
+ goto out;
+ }
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
+ MLX5_CAP_GENERAL);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
+ goto out_free;
+ }
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
+
+ err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA roce cap");
+ goto out_free;
+ }
+
+ vport->info.roce_enabled = enable;
+
+out_free:
+ kfree(query_ctx);
+out:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index edd910258314..3a9a6bb9158d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -210,6 +210,18 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
return (port_mask & port_value) == MLX5_VPORT_UPLINK;
}
+static bool
+mlx5_eswitch_is_push_vlan_no_cap(struct mlx5_eswitch *esw,
+ struct mlx5_flow_act *flow_act)
+{
+ if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
+ !(mlx5_fs_get_capabilities(esw->dev, MLX5_FLOW_NAMESPACE_FDB) &
+ MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX))
+ return true;
+
+ return false;
+}
+
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
@@ -225,10 +237,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
(!mlx5_eswitch_offload_is_uplink_port(esw, spec) && !esw_attr->int_port))
return false;
- /* push vlan on RX */
- if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
- !(mlx5_fs_get_capabilities(esw->dev, MLX5_FLOW_NAMESPACE_FDB) &
- MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX))
+ if (mlx5_eswitch_is_push_vlan_no_cap(esw, flow_act))
return true;
/* hairpin */
@@ -252,19 +261,31 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_act term_tbl_act = {};
struct mlx5_flow_handle *rule = NULL;
bool term_table_created = false;
+ bool is_push_vlan_on_rx;
int num_vport_dests = 0;
int i, curr_dest;
+ is_push_vlan_on_rx = mlx5_eswitch_is_push_vlan_no_cap(esw, flow_act);
mlx5_eswitch_termtbl_actions_move(flow_act, &term_tbl_act);
term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < num_dest; i++) {
struct mlx5_termtbl_handle *tt;
+ bool hairpin = false;
/* only vport destinations can be terminated */
if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
continue;
+ if (attr->dests[num_vport_dests].rep &&
+ attr->dests[num_vport_dests].rep->vport == MLX5_VPORT_UPLINK)
+ hairpin = true;
+
+ if (!is_push_vlan_on_rx && !hairpin) {
+ num_vport_dests++;
+ continue;
+ }
+
if (attr->dests[num_vport_dests].flags & MLX5_ESW_DEST_ENCAP) {
term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
term_tbl_act.pkt_reformat = attr->dests[num_vport_dests].pkt_reformat;
@@ -312,6 +333,9 @@ revert_changes:
for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) {
struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl;
+ if (!tt)
+ continue;
+
attr->dests[curr_dest].termtbl = NULL;
/* search for the destination associated with the
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index d53749248fa0..5a85d8c1e797 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -111,8 +111,8 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
-/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
-#define KERNEL_NIC_PRIO_NUM_LEVELS 7
+/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 8
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
@@ -133,7 +133,7 @@
#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
#define KERNEL_TX_IPSEC_NUM_PRIOS 1
-#define KERNEL_TX_IPSEC_NUM_LEVELS 1
+#define KERNEL_TX_IPSEC_NUM_LEVELS 2
#define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
#define KERNEL_TX_MACSEC_NUM_PRIOS 1
@@ -448,7 +448,8 @@ static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
type == MLX5_FLOW_DESTINATION_TYPE_UPLINK ||
type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
- type == MLX5_FLOW_DESTINATION_TYPE_TIR;
+ type == MLX5_FLOW_DESTINATION_TYPE_TIR ||
+ type == MLX5_FLOW_DESTINATION_TYPE_RANGE;
}
static bool check_valid_spec(const struct mlx5_flow_spec *spec)
@@ -1578,7 +1579,13 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
d1->ft_num == d2->ft_num) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
- d1->sampler_id == d2->sampler_id))
+ d1->sampler_id == d2->sampler_id) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_RANGE &&
+ d1->range.field == d2->range.field &&
+ d1->range.hit_ft == d2->range.hit_ft &&
+ d1->range.miss_ft == d2->range.miss_ft &&
+ d1->range.min == d2->range.min &&
+ d1->range.max == d2->range.max))
return true;
}
@@ -1962,6 +1969,9 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (flow_act->fg && ft->autogroup.active)
return ERR_PTR(-EINVAL);
+ if (dest && dest_num <= 0)
+ return ERR_PTR(-EINVAL);
+
for (i = 0; i < dest_num; i++) {
if (!dest_is_valid(&dest[i], flow_act, ft))
return ERR_PTR(-EINVAL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 3af50fd04d28..f137a0611b77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -123,6 +123,7 @@ enum mlx5_flow_steering_mode {
enum mlx5_flow_steering_capabilty {
MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0,
MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1,
+ MLX5_FLOW_STEERING_CAP_MATCH_RANGES = 1UL << 2,
};
struct mlx5_flow_steering {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 4e3a75496dd9..7c5c500fd215 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -561,12 +561,17 @@ static int mlx5i_open(struct net_device *netdev)
if (err)
goto err_remove_fs_underlay_qp;
- epriv->profile->update_rx(epriv);
+ err = epriv->profile->update_rx(epriv);
+ if (err)
+ goto err_close_channels;
+
mlx5e_activate_priv_channels(epriv);
mutex_unlock(&epriv->state_lock);
return 0;
+err_close_channels:
+ mlx5e_close_channels(&epriv->channels);
err_remove_fs_underlay_qp:
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
err_reset_qp:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 0227a521d301..4d9c9e49645c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -221,12 +221,16 @@ static int mlx5i_pkey_open(struct net_device *netdev)
mlx5_core_warn(mdev, "opening child channels failed, %d\n", err);
goto err_clear_state_opened_flag;
}
- epriv->profile->update_rx(epriv);
+ err = epriv->profile->update_rx(epriv);
+ if (err)
+ goto err_close_channels;
mlx5e_activate_priv_channels(epriv);
mutex_unlock(&epriv->state_lock);
return 0;
+err_close_channels:
+ mlx5e_close_channels(&epriv->channels);
err_clear_state_opened_flag:
mlx5e_destroy_tis(mdev, epriv->tisn[0][0]);
err_remove_rx_uderlay_qp:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index 0259a149a64c..d9fcb9ed726f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -118,13 +118,41 @@ struct mlx5_fib_event_work {
};
};
+static struct net_device*
+mlx5_lag_get_next_fib_dev(struct mlx5_lag *ldev,
+ struct fib_info *fi,
+ struct net_device *current_dev)
+{
+ struct net_device *fib_dev;
+ int i, ldev_idx, nhs;
+
+ nhs = fib_info_num_path(fi);
+ i = 0;
+ if (current_dev) {
+ for (; i < nhs; i++) {
+ fib_dev = fib_info_nh(fi, i)->fib_nh_dev;
+ if (fib_dev == current_dev) {
+ i++;
+ break;
+ }
+ }
+ }
+ for (; i < nhs; i++) {
+ fib_dev = fib_info_nh(fi, i)->fib_nh_dev;
+ ldev_idx = mlx5_lag_dev_get_netdev_idx(ldev, fib_dev);
+ if (ldev_idx >= 0)
+ return ldev->pf[ldev_idx].netdev;
+ }
+
+ return NULL;
+}
+
static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
struct fib_entry_notifier_info *fen_info)
{
+ struct net_device *nh_dev0, *nh_dev1;
struct fib_info *fi = fen_info->fi;
struct lag_mp *mp = &ldev->lag_mp;
- struct fib_nh *fib_nh0, *fib_nh1;
- unsigned int nhs;
/* Handle delete event */
if (event == FIB_EVENT_ENTRY_DEL) {
@@ -140,16 +168,25 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
fi->fib_priority >= mp->fib.priority)
return;
+ nh_dev0 = mlx5_lag_get_next_fib_dev(ldev, fi, NULL);
+ nh_dev1 = mlx5_lag_get_next_fib_dev(ldev, fi, nh_dev0);
+
/* Handle add/replace event */
- nhs = fib_info_num_path(fi);
- if (nhs == 1) {
- if (__mlx5_lag_is_active(ldev)) {
- struct fib_nh *nh = fib_info_nh(fi, 0);
- struct net_device *nh_dev = nh->fib_nh_dev;
- int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
+ if (!nh_dev0) {
+ if (mp->fib.dst == fen_info->dst && mp->fib.dst_len == fen_info->dst_len)
+ mp->fib.mfi = NULL;
+ return;
+ }
- if (i < 0)
- return;
+ if (nh_dev0 == nh_dev1) {
+ mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
+ "Multipath offload doesn't support routes with multiple nexthops of the same device");
+ return;
+ }
+
+ if (!nh_dev1) {
+ if (__mlx5_lag_is_active(ldev)) {
+ int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev0);
i++;
mlx5_lag_set_port_affinity(ldev, i);
@@ -159,21 +196,6 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event,
return;
}
- if (nhs != 2)
- return;
-
- /* Verify next hops are ports of the same hca */
- fib_nh0 = fib_info_nh(fi, 0);
- fib_nh1 = fib_info_nh(fi, 1);
- if (!(fib_nh0->fib_nh_dev == ldev->pf[MLX5_LAG_P1].netdev &&
- fib_nh1->fib_nh_dev == ldev->pf[MLX5_LAG_P2].netdev) &&
- !(fib_nh0->fib_nh_dev == ldev->pf[MLX5_LAG_P2].netdev &&
- fib_nh1->fib_nh_dev == ldev->pf[MLX5_LAG_P1].netdev)) {
- mlx5_core_warn(ldev->pf[MLX5_LAG_P1].dev,
- "Multipath offload require two ports of the same HCA\n");
- return;
- }
-
/* First time we see multipath route */
if (!mp->fib.mfi && !__mlx5_lag_is_active(ldev)) {
struct lag_tracker tracker;
@@ -268,7 +290,6 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
struct mlx5_fib_event_work *fib_work;
struct fib_entry_notifier_info *fen_info;
struct fib_nh_notifier_info *fnh_info;
- struct net_device *fib_dev;
struct fib_info *fi;
if (info->family != AF_INET)
@@ -285,11 +306,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
fi = fen_info->fi;
if (fi->nh)
return NOTIFY_DONE;
- fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
- if (fib_dev != ldev->pf[MLX5_LAG_P1].netdev &&
- fib_dev != ldev->pf[MLX5_LAG_P2].netdev) {
- return NOTIFY_DONE;
- }
+
fib_work = mlx5_lag_init_fib_work(ldev, event);
if (!fib_work)
return NOTIFY_DONE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
index c971ff04dd04..5a80fb7dbbca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
@@ -334,9 +334,6 @@ err_cq:
void mlx5_aso_destroy(struct mlx5_aso *aso)
{
- if (IS_ERR_OR_NULL(aso))
- return;
-
mlx5_aso_destroy_sq(aso);
mlx5_aso_destroy_cq(&aso->cq);
kfree(aso);
@@ -356,12 +353,15 @@ void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
cseg->general_id = cpu_to_be32(obj_id);
}
-void *mlx5_aso_get_wqe(struct mlx5_aso *aso)
+struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso)
{
+ struct mlx5_aso_wqe *wqe;
u16 pi;
pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
- return mlx5_wq_cyc_get_wqe(&aso->wq, pi);
+ wqe = mlx5_wq_cyc_get_wqe(&aso->wq, pi);
+ memset(wqe, 0, sizeof(*wqe));
+ return wqe;
}
void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
index 2d40dcf9d42e..afb078bbb8ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
@@ -15,6 +15,7 @@
#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
#define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS))
+#define ASO_CTRL_READ_EN BIT(0)
struct mlx5_wqe_aso_ctrl_seg {
__be32 va_h;
__be32 va_l; /* include read_enable */
@@ -71,13 +72,14 @@ enum {
};
enum {
+ MLX5_ACCESS_ASO_OPC_MOD_IPSEC = 0x0,
MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
MLX5_ACCESS_ASO_OPC_MOD_MACSEC = 0x5,
};
struct mlx5_aso;
-void *mlx5_aso_get_wqe(struct mlx5_aso *aso);
+struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso);
void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
struct mlx5_aso_wqe *aso_wqe,
u32 obj_id, u32 opc_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index d3a9ae80fd30..69cfe60c558a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -339,35 +339,25 @@ static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
return mlx5_set_mtutc(mdev, in, sizeof(in));
}
-static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
- int neg_adj = 0;
- u32 diff;
- u64 adj;
+ u32 mult;
int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
- err = mlx5_ptp_adjfreq_real_time(mdev, delta);
+ err = mlx5_ptp_adjfreq_real_time(mdev, scaled_ppm_to_ppb(scaled_ppm));
if (err)
return err;
- if (delta < 0) {
- neg_adj = 1;
- delta = -delta;
- }
-
- adj = timer->nominal_c_mult;
- adj *= delta;
- diff = div_u64(adj, 1000000000ULL);
+ mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&timer->tc);
- timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff :
- timer->nominal_c_mult + diff;
+ timer->cycles.mult = mult;
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
@@ -697,7 +687,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.n_per_out = 0,
.n_pins = 0,
.pps = 0,
- .adjfreq = mlx5_ptp_adjfreq,
+ .adjfine = mlx5_ptp_adjfine,
.adjtime = mlx5_ptp_adjtime,
.gettimex64 = mlx5_ptp_gettimex,
.settime64 = mlx5_ptp_settime,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index e58775a7d955..7f5db13e3550 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -37,7 +37,6 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
-#include <linux/io-mapping.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/mlx5/driver.h>
@@ -1306,8 +1305,15 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_sf_dev_table_create(dev);
+ err = mlx5_devlink_traps_register(priv_to_devlink(dev));
+ if (err)
+ goto err_traps_reg;
+
return 0;
+err_traps_reg:
+ mlx5_sf_dev_table_destroy(dev);
+ mlx5_sriov_detach(dev);
err_sriov:
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
@@ -1336,6 +1342,7 @@ err_irq_table:
static void mlx5_unload(struct mlx5_core_dev *dev)
{
+ mlx5_devlink_traps_unregister(priv_to_devlink(dev));
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
mlx5_eswitch_disable(dev->priv.eswitch);
@@ -1580,14 +1587,22 @@ err:
return -ENOMEM;
}
+static int vhca_id_show(struct seq_file *file, void *priv)
+{
+ struct mlx5_core_dev *dev = file->private;
+
+ seq_printf(file, "0x%x\n", MLX5_CAP_GEN(dev, vhca_id));
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(vhca_id);
+
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{
struct mlx5_priv *priv = &dev->priv;
int err;
memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
- INIT_LIST_HEAD(&priv->ctx_list);
- spin_lock_init(&priv->ctx_lock);
lockdep_register_key(&dev->lock_key);
mutex_init(&dev->intf_state_mutex);
lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
@@ -1604,6 +1619,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device),
mlx5_debugfs_root);
+ debugfs_create_file("vhca_id", 0400, priv->dbg.dbg_root, dev, &vhca_id_fops);
INIT_LIST_HEAD(&priv->traps);
err = mlx5_tout_init(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a806e3de7b7c..029305a8b80a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -324,7 +324,10 @@ void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev);
int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
-int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out);
+int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 function_id,
+ u16 opmod);
+#define mlx5_vport_get_other_func_general_cap(dev, fid, out) \
+ mlx5_vport_get_other_func_cap(dev, fid, out, MLX5_CAP_GENERAL)
void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work);
static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 662f1d55e30e..6bde18bcd42f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -4,6 +4,7 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
#include "mlx5_irq.h"
#include "pci_irq.h"
@@ -101,7 +102,7 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
goto out;
}
- ret = mlx5_vport_get_other_func_cap(dev, function_id, query_cap);
+ ret = mlx5_vport_get_other_func_general_cap(dev, function_id, query_cap);
if (ret)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index b1dfad274a39..ee104cf04392 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -44,6 +44,7 @@ static const char * const action_type_to_str[] = {
[DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR",
[DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR",
[DR_ACTION_TYP_ASO_FLOW_METER] = "DR_ACTION_TYP_ASO_FLOW_METER",
+ [DR_ACTION_TYP_RANGE] = "DR_ACTION_TYP_RANGE",
[DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN",
};
@@ -61,6 +62,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
@@ -79,6 +81,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
@@ -94,6 +97,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
@@ -103,6 +107,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
@@ -116,6 +121,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN,
@@ -129,6 +135,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN,
@@ -141,6 +148,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
@@ -159,6 +167,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
[DR_ACTION_STATE_TERM] = {
@@ -169,6 +178,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_NO_ACTION] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
@@ -183,6 +193,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
@@ -190,6 +201,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO,
@@ -197,6 +209,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
@@ -207,6 +220,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
@@ -220,6 +234,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
@@ -231,6 +246,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
@@ -250,6 +266,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_TERM] = {
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM,
@@ -259,6 +276,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_NO_ACTION] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
@@ -276,6 +294,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
@@ -291,6 +310,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
@@ -299,6 +319,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
@@ -311,6 +332,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN,
@@ -324,6 +346,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN,
@@ -337,6 +360,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
@@ -354,6 +378,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_ASO] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
@@ -365,6 +390,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_NO_ACTION] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
@@ -380,6 +406,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
@@ -388,6 +415,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
@@ -396,6 +424,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
@@ -407,6 +436,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
},
[DR_ACTION_STATE_POP_VLAN] = {
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN,
@@ -421,6 +451,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_PUSH_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN,
@@ -433,6 +464,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
@@ -452,6 +484,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN,
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_RANGE] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO,
},
@@ -634,6 +667,83 @@ static void dr_action_print_sequence(struct mlx5dr_domain *dmn,
actions[i]->action_type);
}
+static int dr_action_get_dest_fw_tbl_addr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_action_dest_tbl *dest_tbl,
+ bool is_rx_rule,
+ u64 *final_icm_addr)
+{
+ struct mlx5dr_cmd_query_flow_table_details output;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ int ret;
+
+ if (!dest_tbl->fw_tbl.rx_icm_addr) {
+ ret = mlx5dr_cmd_query_flow_table(dmn->mdev,
+ dest_tbl->fw_tbl.type,
+ dest_tbl->fw_tbl.id,
+ &output);
+ if (ret) {
+ mlx5dr_err(dmn,
+ "Failed mlx5_cmd_query_flow_table ret: %d\n",
+ ret);
+ return ret;
+ }
+
+ dest_tbl->fw_tbl.tx_icm_addr = output.sw_owner_icm_root_1;
+ dest_tbl->fw_tbl.rx_icm_addr = output.sw_owner_icm_root_0;
+ }
+
+ *final_icm_addr = is_rx_rule ? dest_tbl->fw_tbl.rx_icm_addr :
+ dest_tbl->fw_tbl.tx_icm_addr;
+ return 0;
+}
+
+static int dr_action_get_dest_sw_tbl_addr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_action_dest_tbl *dest_tbl,
+ bool is_rx_rule,
+ u64 *final_icm_addr)
+{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_icm_chunk *chunk;
+
+ if (dest_tbl->tbl->dmn != dmn) {
+ mlx5dr_err(dmn,
+ "Destination table belongs to a different domain\n");
+ return -EINVAL;
+ }
+
+ if (dest_tbl->tbl->level <= matcher->tbl->level) {
+ mlx5_core_dbg_once(dmn->mdev,
+ "Connecting table to a lower/same level destination table\n");
+ mlx5dr_dbg(dmn,
+ "Connecting table at level %d to a destination table at level %d\n",
+ matcher->tbl->level,
+ dest_tbl->tbl->level);
+ }
+
+ chunk = is_rx_rule ? dest_tbl->tbl->rx.s_anchor->chunk :
+ dest_tbl->tbl->tx.s_anchor->chunk;
+
+ *final_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
+ return 0;
+}
+
+static int dr_action_get_dest_tbl_addr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_action_dest_tbl *dest_tbl,
+ bool is_rx_rule,
+ u64 *final_icm_addr)
+{
+ if (dest_tbl->is_fw_tbl)
+ return dr_action_get_dest_fw_tbl_addr(matcher,
+ dest_tbl,
+ is_rx_rule,
+ final_icm_addr);
+
+ return dr_action_get_dest_sw_tbl_addr(matcher,
+ dest_tbl,
+ is_rx_rule,
+ final_icm_addr);
+}
+
#define WITH_VLAN_NUM_HW_ACTIONS 6
int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
@@ -661,8 +771,6 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->type);
for (i = 0; i < num_actions; i++) {
- struct mlx5dr_action_dest_tbl *dest_tbl;
- struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_action *action;
int max_actions_type = 1;
u32 action_type;
@@ -676,50 +784,27 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
break;
case DR_ACTION_TYP_FT:
dest_action = action;
- dest_tbl = action->dest_tbl;
- if (!dest_tbl->is_fw_tbl) {
- if (dest_tbl->tbl->dmn != dmn) {
- mlx5dr_err(dmn,
- "Destination table belongs to a different domain\n");
- return -EINVAL;
- }
- if (dest_tbl->tbl->level <= matcher->tbl->level) {
- mlx5_core_dbg_once(dmn->mdev,
- "Connecting table to a lower/same level destination table\n");
- mlx5dr_dbg(dmn,
- "Connecting table at level %d to a destination table at level %d\n",
- matcher->tbl->level,
- dest_tbl->tbl->level);
- }
- chunk = rx_rule ? dest_tbl->tbl->rx.s_anchor->chunk :
- dest_tbl->tbl->tx.s_anchor->chunk;
- attr.final_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
- } else {
- struct mlx5dr_cmd_query_flow_table_details output;
- int ret;
-
- /* get the relevant addresses */
- if (!action->dest_tbl->fw_tbl.rx_icm_addr) {
- ret = mlx5dr_cmd_query_flow_table(dmn->mdev,
- dest_tbl->fw_tbl.type,
- dest_tbl->fw_tbl.id,
- &output);
- if (!ret) {
- dest_tbl->fw_tbl.tx_icm_addr =
- output.sw_owner_icm_root_1;
- dest_tbl->fw_tbl.rx_icm_addr =
- output.sw_owner_icm_root_0;
- } else {
- mlx5dr_err(dmn,
- "Failed mlx5_cmd_query_flow_table ret: %d\n",
- ret);
- return ret;
- }
- }
- attr.final_icm_addr = rx_rule ?
- dest_tbl->fw_tbl.rx_icm_addr :
- dest_tbl->fw_tbl.tx_icm_addr;
- }
+ ret = dr_action_get_dest_tbl_addr(matcher, action->dest_tbl,
+ rx_rule, &attr.final_icm_addr);
+ if (ret)
+ return ret;
+ break;
+ case DR_ACTION_TYP_RANGE:
+ ret = dr_action_get_dest_tbl_addr(matcher,
+ action->range->hit_tbl_action->dest_tbl,
+ rx_rule, &attr.final_icm_addr);
+ if (ret)
+ return ret;
+
+ ret = dr_action_get_dest_tbl_addr(matcher,
+ action->range->miss_tbl_action->dest_tbl,
+ rx_rule, &attr.range.miss_icm_addr);
+ if (ret)
+ return ret;
+
+ attr.range.definer_id = action->range->definer_id;
+ attr.range.min = action->range->min;
+ attr.range.max = action->range->max;
break;
case DR_ACTION_TYP_QP:
mlx5dr_info(dmn, "Domain doesn't support QP\n");
@@ -866,6 +951,7 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = {
[DR_ACTION_TYP_REMOVE_HDR] = sizeof(struct mlx5dr_action_reformat),
[DR_ACTION_TYP_SAMPLER] = sizeof(struct mlx5dr_action_sampler),
[DR_ACTION_TYP_ASO_FLOW_METER] = sizeof(struct mlx5dr_action_aso_flow_meter),
+ [DR_ACTION_TYP_RANGE] = sizeof(struct mlx5dr_action_range),
};
static struct mlx5dr_action *
@@ -933,6 +1019,123 @@ dec_ref:
return NULL;
}
+static void dr_action_range_definer_fill(u16 *format_id,
+ u8 *dw_selectors,
+ u8 *byte_selectors,
+ u8 *match_mask)
+{
+ int i;
+
+ *format_id = MLX5_IFC_DEFINER_FORMAT_ID_SELECT;
+
+ dw_selectors[0] = MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN / 4;
+
+ for (i = 1; i < MLX5_IFC_DEFINER_DW_SELECTORS_NUM; i++)
+ dw_selectors[i] = MLX5_IFC_DEFINER_FORMAT_OFFSET_UNUSED;
+
+ for (i = 0; i < MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM; i++)
+ byte_selectors[i] = MLX5_IFC_DEFINER_FORMAT_OFFSET_UNUSED;
+
+ MLX5_SET(match_definer_match_mask, match_mask,
+ match_dw_0, 0xffffUL << 16);
+}
+
+static int dr_action_create_range_definer(struct mlx5dr_action *action)
+{
+ u8 match_mask[MLX5_FLD_SZ_BYTES(match_definer, match_mask)] = {};
+ u8 byte_selectors[MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM] = {};
+ u8 dw_selectors[MLX5_IFC_DEFINER_DW_SELECTORS_NUM] = {};
+ struct mlx5dr_domain *dmn = action->range->dmn;
+ u32 definer_id;
+ u16 format_id;
+ int ret;
+
+ dr_action_range_definer_fill(&format_id,
+ dw_selectors,
+ byte_selectors,
+ match_mask);
+
+ ret = mlx5dr_definer_get(dmn, format_id,
+ dw_selectors, byte_selectors,
+ match_mask, &definer_id);
+ if (ret)
+ return ret;
+
+ action->range->definer_id = definer_id;
+ return 0;
+}
+
+static void dr_action_destroy_range_definer(struct mlx5dr_action *action)
+{
+ mlx5dr_definer_put(action->range->dmn, action->range->definer_id);
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_match_range(struct mlx5dr_domain *dmn,
+ u32 field,
+ struct mlx5_flow_table *hit_ft,
+ struct mlx5_flow_table *miss_ft,
+ u32 min,
+ u32 max)
+{
+ struct mlx5dr_action *action;
+ int ret;
+
+ if (!mlx5dr_supp_match_ranges(dmn->mdev)) {
+ mlx5dr_dbg(dmn, "SELECT definer support is needed for match range\n");
+ return NULL;
+ }
+
+ if (field != MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN ||
+ min > 0xffff || max > 0xffff) {
+ mlx5dr_err(dmn, "Invalid match range parameters\n");
+ return NULL;
+ }
+
+ action = dr_action_create_generic(DR_ACTION_TYP_RANGE);
+ if (!action)
+ return NULL;
+
+ action->range->hit_tbl_action =
+ mlx5dr_is_fw_table(hit_ft) ?
+ mlx5dr_action_create_dest_flow_fw_table(dmn, hit_ft) :
+ mlx5dr_action_create_dest_table(hit_ft->fs_dr_table.dr_table);
+
+ if (!action->range->hit_tbl_action)
+ goto free_action;
+
+ action->range->miss_tbl_action =
+ mlx5dr_is_fw_table(miss_ft) ?
+ mlx5dr_action_create_dest_flow_fw_table(dmn, miss_ft) :
+ mlx5dr_action_create_dest_table(miss_ft->fs_dr_table.dr_table);
+
+ if (!action->range->miss_tbl_action)
+ goto free_hit_tbl_action;
+
+ action->range->min = min;
+ action->range->max = max;
+ action->range->dmn = dmn;
+
+ ret = dr_action_create_range_definer(action);
+ if (ret)
+ goto free_miss_tbl_action;
+
+ /* No need to increase refcount on domain for this action,
+ * the hit/miss table actions will do it internally.
+ */
+
+ return action;
+
+free_miss_tbl_action:
+ mlx5dr_action_destroy(action->range->miss_tbl_action);
+free_hit_tbl_action:
+ mlx5dr_action_destroy(action->range->hit_tbl_action);
+free_action:
+ kfree(action);
+
+ return NULL;
+}
+
struct mlx5dr_action *
mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action_dest *dests,
@@ -1980,6 +2183,11 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
case DR_ACTION_TYP_ASO_FLOW_METER:
refcount_dec(&action->aso->dmn->refcount);
break;
+ case DR_ACTION_TYP_RANGE:
+ dr_action_destroy_range_definer(action);
+ mlx5dr_action_destroy(action->range->miss_tbl_action);
+ mlx5dr_action_destroy(action->range->hit_tbl_action);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
index 7df11a019df9..fe228d948b47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
@@ -15,8 +15,6 @@ int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
buddy->max_order = max_order;
INIT_LIST_HEAD(&buddy->list_node);
- INIT_LIST_HEAD(&buddy->used_list);
- INIT_LIST_HEAD(&buddy->hot_list);
buddy->bitmap = kcalloc(buddy->max_order + 1,
sizeof(*buddy->bitmap),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 16d65fe4f654..07b6a6dcb92f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -271,6 +271,13 @@ int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
{
u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
+ /* Skip SYNC in case the device is internal error state.
+ * Besides a device error, this also happens when we're
+ * in fast teardown
+ */
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return 0;
+
MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
return mlx5_cmd_exec_in(mdev, sync_steering, in);
@@ -557,6 +564,83 @@ void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
}
+static void dr_cmd_set_definer_format(void *ptr, u16 format_id,
+ u8 *dw_selectors,
+ u8 *byte_selectors)
+{
+ if (format_id != MLX5_IFC_DEFINER_FORMAT_ID_SELECT)
+ return;
+
+ MLX5_SET(match_definer, ptr, format_select_dw0, dw_selectors[0]);
+ MLX5_SET(match_definer, ptr, format_select_dw1, dw_selectors[1]);
+ MLX5_SET(match_definer, ptr, format_select_dw2, dw_selectors[2]);
+ MLX5_SET(match_definer, ptr, format_select_dw3, dw_selectors[3]);
+ MLX5_SET(match_definer, ptr, format_select_dw4, dw_selectors[4]);
+ MLX5_SET(match_definer, ptr, format_select_dw5, dw_selectors[5]);
+ MLX5_SET(match_definer, ptr, format_select_dw6, dw_selectors[6]);
+ MLX5_SET(match_definer, ptr, format_select_dw7, dw_selectors[7]);
+ MLX5_SET(match_definer, ptr, format_select_dw8, dw_selectors[8]);
+
+ MLX5_SET(match_definer, ptr, format_select_byte0, byte_selectors[0]);
+ MLX5_SET(match_definer, ptr, format_select_byte1, byte_selectors[1]);
+ MLX5_SET(match_definer, ptr, format_select_byte2, byte_selectors[2]);
+ MLX5_SET(match_definer, ptr, format_select_byte3, byte_selectors[3]);
+ MLX5_SET(match_definer, ptr, format_select_byte4, byte_selectors[4]);
+ MLX5_SET(match_definer, ptr, format_select_byte5, byte_selectors[5]);
+ MLX5_SET(match_definer, ptr, format_select_byte6, byte_selectors[6]);
+ MLX5_SET(match_definer, ptr, format_select_byte7, byte_selectors[7]);
+}
+
+int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
+ u16 format_id,
+ u8 *dw_selectors,
+ u8 *byte_selectors,
+ u8 *match_mask,
+ u32 *definer_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
+ void *ptr;
+ int err;
+
+ ptr = MLX5_ADDR_OF(create_match_definer_in, in,
+ general_obj_in_cmd_hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
+ MLX5_OBJ_TYPE_MATCH_DEFINER);
+
+ ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
+ MLX5_SET(match_definer, ptr, format_id, format_id);
+
+ dr_cmd_set_definer_format(ptr, format_id,
+ dw_selectors, byte_selectors);
+
+ ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
+ memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return 0;
+}
+
+void
+mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev, u32 definer_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
u16 index, struct mlx5dr_cmd_gid_attr *attr)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index 7adcf0eec13b..db81d881d38e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -49,7 +49,8 @@ enum dr_dump_rec_type {
DR_DUMP_REC_TYPE_ACTION_POP_VLAN = 3413,
DR_DUMP_REC_TYPE_ACTION_SAMPLER = 3415,
DR_DUMP_REC_TYPE_ACTION_INSERT_HDR = 3420,
- DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR = 3421
+ DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR = 3421,
+ DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE = 3425,
};
void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl)
@@ -107,6 +108,8 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
{
struct mlx5dr_action *action = action_mem->action;
const u64 action_id = DR_DBG_PTR_TO_ID(action);
+ u64 hit_tbl_ptr, miss_tbl_ptr;
+ u32 hit_tbl_id, miss_tbl_id;
switch (action->action_type) {
case DR_ACTION_TYP_DROP:
@@ -198,6 +201,30 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
action->sampler->rx_icm_addr,
action->sampler->tx_icm_addr);
break;
+ case DR_ACTION_TYP_RANGE:
+ if (action->range->hit_tbl_action->dest_tbl->is_fw_tbl) {
+ hit_tbl_id = action->range->hit_tbl_action->dest_tbl->fw_tbl.id;
+ hit_tbl_ptr = 0;
+ } else {
+ hit_tbl_id = action->range->hit_tbl_action->dest_tbl->tbl->table_id;
+ hit_tbl_ptr =
+ DR_DBG_PTR_TO_ID(action->range->hit_tbl_action->dest_tbl->tbl);
+ }
+
+ if (action->range->miss_tbl_action->dest_tbl->is_fw_tbl) {
+ miss_tbl_id = action->range->miss_tbl_action->dest_tbl->fw_tbl.id;
+ miss_tbl_ptr = 0;
+ } else {
+ miss_tbl_id = action->range->miss_tbl_action->dest_tbl->tbl->table_id;
+ miss_tbl_ptr =
+ DR_DBG_PTR_TO_ID(action->range->miss_tbl_action->dest_tbl->tbl);
+ }
+
+ seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id, rule_id,
+ hit_tbl_id, hit_tbl_ptr, miss_tbl_id, miss_tbl_ptr,
+ action->range->definer_id);
+ break;
default:
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c
new file mode 100644
index 000000000000..d5ea97751945
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "dr_types.h"
+#include "dr_ste.h"
+
+struct dr_definer_object {
+ u32 id;
+ u16 format_id;
+ u8 dw_selectors[MLX5_IFC_DEFINER_DW_SELECTORS_NUM];
+ u8 byte_selectors[MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM];
+ u8 match_mask[DR_STE_SIZE_MATCH_TAG];
+ refcount_t refcount;
+};
+
+static bool dr_definer_compare(struct dr_definer_object *definer,
+ u16 format_id, u8 *dw_selectors,
+ u8 *byte_selectors, u8 *match_mask)
+{
+ int i;
+
+ if (definer->format_id != format_id)
+ return false;
+
+ for (i = 0; i < MLX5_IFC_DEFINER_DW_SELECTORS_NUM; i++)
+ if (definer->dw_selectors[i] != dw_selectors[i])
+ return false;
+
+ for (i = 0; i < MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM; i++)
+ if (definer->byte_selectors[i] != byte_selectors[i])
+ return false;
+
+ if (memcmp(definer->match_mask, match_mask, DR_STE_SIZE_MATCH_TAG))
+ return false;
+
+ return true;
+}
+
+static struct dr_definer_object *
+dr_definer_find_obj(struct mlx5dr_domain *dmn, u16 format_id,
+ u8 *dw_selectors, u8 *byte_selectors, u8 *match_mask)
+{
+ struct dr_definer_object *definer_obj;
+ unsigned long id;
+
+ xa_for_each(&dmn->definers_xa, id, definer_obj) {
+ if (dr_definer_compare(definer_obj, format_id,
+ dw_selectors, byte_selectors,
+ match_mask))
+ return definer_obj;
+ }
+
+ return NULL;
+}
+
+static struct dr_definer_object *
+dr_definer_create_obj(struct mlx5dr_domain *dmn, u16 format_id,
+ u8 *dw_selectors, u8 *byte_selectors, u8 *match_mask)
+{
+ struct dr_definer_object *definer_obj;
+ int ret = 0;
+
+ definer_obj = kzalloc(sizeof(*definer_obj), GFP_KERNEL);
+ if (!definer_obj)
+ return NULL;
+
+ ret = mlx5dr_cmd_create_definer(dmn->mdev,
+ format_id,
+ dw_selectors,
+ byte_selectors,
+ match_mask,
+ &definer_obj->id);
+ if (ret)
+ goto err_free_definer_obj;
+
+ /* Definer ID can have 32 bits, but STE format
+ * supports only definers with 8 bit IDs.
+ */
+ if (definer_obj->id > 0xff) {
+ mlx5dr_err(dmn, "Unsupported definer ID (%d)\n", definer_obj->id);
+ goto err_destroy_definer;
+ }
+
+ definer_obj->format_id = format_id;
+ memcpy(definer_obj->dw_selectors, dw_selectors, sizeof(definer_obj->dw_selectors));
+ memcpy(definer_obj->byte_selectors, byte_selectors, sizeof(definer_obj->byte_selectors));
+ memcpy(definer_obj->match_mask, match_mask, sizeof(definer_obj->match_mask));
+
+ refcount_set(&definer_obj->refcount, 1);
+
+ ret = xa_insert(&dmn->definers_xa, definer_obj->id, definer_obj, GFP_KERNEL);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Couldn't insert new definer into xarray (%d)\n", ret);
+ goto err_destroy_definer;
+ }
+
+ return definer_obj;
+
+err_destroy_definer:
+ mlx5dr_cmd_destroy_definer(dmn->mdev, definer_obj->id);
+err_free_definer_obj:
+ kfree(definer_obj);
+
+ return NULL;
+}
+
+static void dr_definer_destroy_obj(struct mlx5dr_domain *dmn,
+ struct dr_definer_object *definer_obj)
+{
+ mlx5dr_cmd_destroy_definer(dmn->mdev, definer_obj->id);
+ xa_erase(&dmn->definers_xa, definer_obj->id);
+ kfree(definer_obj);
+}
+
+int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
+ u8 *dw_selectors, u8 *byte_selectors,
+ u8 *match_mask, u32 *definer_id)
+{
+ struct dr_definer_object *definer_obj;
+ int ret = 0;
+
+ definer_obj = dr_definer_find_obj(dmn, format_id, dw_selectors,
+ byte_selectors, match_mask);
+ if (!definer_obj) {
+ definer_obj = dr_definer_create_obj(dmn, format_id,
+ dw_selectors, byte_selectors,
+ match_mask);
+ if (!definer_obj)
+ return -ENOMEM;
+ } else {
+ refcount_inc(&definer_obj->refcount);
+ }
+
+ *definer_id = definer_obj->id;
+
+ return ret;
+}
+
+void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id)
+{
+ struct dr_definer_object *definer_obj;
+
+ definer_obj = xa_load(&dmn->definers_xa, definer_id);
+ if (!definer_obj) {
+ mlx5dr_err(dmn, "Definer ID %d not found\n", definer_id);
+ return;
+ }
+
+ if (refcount_dec_and_test(&definer_obj->refcount))
+ dr_definer_destroy_obj(dmn, definer_obj);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index fc6ae49b5ecc..5b8bb2ca31e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -56,6 +56,70 @@ int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
return 0;
}
+static int dr_domain_init_mem_resources(struct mlx5dr_domain *dmn)
+{
+ int ret;
+
+ dmn->chunks_kmem_cache = kmem_cache_create("mlx5_dr_chunks",
+ sizeof(struct mlx5dr_icm_chunk), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!dmn->chunks_kmem_cache) {
+ mlx5dr_err(dmn, "Couldn't create chunks kmem_cache\n");
+ return -ENOMEM;
+ }
+
+ dmn->htbls_kmem_cache = kmem_cache_create("mlx5_dr_htbls",
+ sizeof(struct mlx5dr_ste_htbl), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!dmn->htbls_kmem_cache) {
+ mlx5dr_err(dmn, "Couldn't create hash tables kmem_cache\n");
+ ret = -ENOMEM;
+ goto free_chunks_kmem_cache;
+ }
+
+ dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
+ if (!dmn->ste_icm_pool) {
+ mlx5dr_err(dmn, "Couldn't get icm memory\n");
+ ret = -ENOMEM;
+ goto free_htbls_kmem_cache;
+ }
+
+ dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
+ if (!dmn->action_icm_pool) {
+ mlx5dr_err(dmn, "Couldn't get action icm memory\n");
+ ret = -ENOMEM;
+ goto free_ste_icm_pool;
+ }
+
+ ret = mlx5dr_send_info_pool_create(dmn);
+ if (ret) {
+ mlx5dr_err(dmn, "Couldn't create send info pool\n");
+ goto free_action_icm_pool;
+ }
+
+ return 0;
+
+free_action_icm_pool:
+ mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
+free_ste_icm_pool:
+ mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+free_htbls_kmem_cache:
+ kmem_cache_destroy(dmn->htbls_kmem_cache);
+free_chunks_kmem_cache:
+ kmem_cache_destroy(dmn->chunks_kmem_cache);
+
+ return ret;
+}
+
+static void dr_domain_uninit_mem_resources(struct mlx5dr_domain *dmn)
+{
+ mlx5dr_send_info_pool_destroy(dmn);
+ mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
+ mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+ kmem_cache_destroy(dmn->htbls_kmem_cache);
+ kmem_cache_destroy(dmn->chunks_kmem_cache);
+}
+
static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
{
int ret;
@@ -79,32 +143,22 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
goto clean_pd;
}
- dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
- if (!dmn->ste_icm_pool) {
- mlx5dr_err(dmn, "Couldn't get icm memory\n");
- ret = -ENOMEM;
+ ret = dr_domain_init_mem_resources(dmn);
+ if (ret) {
+ mlx5dr_err(dmn, "Couldn't create domain memory resources\n");
goto clean_uar;
}
- dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
- if (!dmn->action_icm_pool) {
- mlx5dr_err(dmn, "Couldn't get action icm memory\n");
- ret = -ENOMEM;
- goto free_ste_icm_pool;
- }
-
ret = mlx5dr_send_ring_alloc(dmn);
if (ret) {
mlx5dr_err(dmn, "Couldn't create send-ring\n");
- goto free_action_icm_pool;
+ goto clean_mem_resources;
}
return 0;
-free_action_icm_pool:
- mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
-free_ste_icm_pool:
- mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+clean_mem_resources:
+ dr_domain_uninit_mem_resources(dmn);
clean_uar:
mlx5_put_uars_page(dmn->mdev, dmn->uar);
clean_pd:
@@ -116,8 +170,7 @@ clean_pd:
static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
{
mlx5dr_send_ring_free(dmn, dmn->send_ring);
- mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
- mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+ dr_domain_uninit_mem_resources(dmn);
mlx5_put_uars_page(dmn->mdev, dmn->uar);
mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
}
@@ -372,10 +425,11 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
refcount_set(&dmn->refcount, 1);
mutex_init(&dmn->info.rx.mutex);
mutex_init(&dmn->info.tx.mutex);
+ xa_init(&dmn->definers_xa);
if (dr_domain_caps_init(mdev, dmn)) {
mlx5dr_err(dmn, "Failed init domain, no caps\n");
- goto free_domain;
+ goto def_xa_destroy;
}
dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
@@ -400,7 +454,8 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
uninit_caps:
dr_domain_caps_uninit(dmn);
-free_domain:
+def_xa_destroy:
+ xa_destroy(&dmn->definers_xa);
kfree(dmn);
return NULL;
}
@@ -440,6 +495,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
dr_domain_uninit_csum_recalc_fts(dmn);
dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn);
+ xa_destroy(&dmn->definers_xa);
mutex_destroy(&dmn->info.tx.mutex);
mutex_destroy(&dmn->info.rx.mutex);
kfree(dmn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 4ca67fa24cc6..3eb6719bc8eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -4,14 +4,30 @@
#include "dr_types.h"
#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
+#define DR_ICM_POOL_HOT_MEMORY_FRACTION 4
+
+struct mlx5dr_icm_hot_chunk {
+ struct mlx5dr_icm_buddy_mem *buddy_mem;
+ unsigned int seg;
+ enum mlx5dr_icm_chunk_size size;
+};
struct mlx5dr_icm_pool {
enum mlx5dr_icm_type icm_type;
enum mlx5dr_icm_chunk_size max_log_chunk_sz;
struct mlx5dr_domain *dmn;
+ struct kmem_cache *chunks_kmem_cache;
+
/* memory management */
struct mutex mutex; /* protect the ICM pool and ICM buddy */
struct list_head buddy_mem_list;
+
+ /* Hardware may be accessing this memory but at some future,
+ * undetermined time, it might cease to do so.
+ * sync_ste command sets them free.
+ */
+ struct mlx5dr_icm_hot_chunk *hot_chunks_arr;
+ u32 hot_chunks_num;
u64 hot_memory_size;
};
@@ -177,46 +193,20 @@ static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
{
+ int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+ int ste_size = dr_icm_buddy_get_ste_size(buddy);
int index = offset / DR_STE_SIZE;
chunk->ste_arr = &buddy->ste_arr[index];
chunk->miss_list = &buddy->miss_list[index];
- chunk->hw_ste_arr = buddy->hw_ste_arr +
- index * dr_icm_buddy_get_ste_size(buddy);
-}
+ chunk->hw_ste_arr = buddy->hw_ste_arr + index * ste_size;
-static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
-{
- int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
- struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
-
- memset(chunk->hw_ste_arr, 0,
- num_of_entries * dr_icm_buddy_get_ste_size(buddy));
+ memset(chunk->hw_ste_arr, 0, num_of_entries * ste_size);
memset(chunk->ste_arr, 0,
num_of_entries * sizeof(chunk->ste_arr[0]));
}
-static enum mlx5dr_icm_type
-get_chunk_icm_type(struct mlx5dr_icm_chunk *chunk)
-{
- return chunk->buddy_mem->pool->icm_type;
-}
-
-static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
- struct mlx5dr_icm_buddy_mem *buddy)
-{
- enum mlx5dr_icm_type icm_type = get_chunk_icm_type(chunk);
-
- buddy->used_memory -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
- list_del(&chunk->chunk_list);
-
- if (icm_type == DR_ICM_TYPE_STE)
- dr_icm_chunk_ste_cleanup(chunk);
-
- kvfree(chunk);
-}
-
static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
{
int num_of_entries =
@@ -296,14 +286,6 @@ free_mr:
static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
{
- struct mlx5dr_icm_chunk *chunk, *next;
-
- list_for_each_entry_safe(chunk, next, &buddy->hot_list, chunk_list)
- dr_icm_chunk_destroy(chunk, buddy);
-
- list_for_each_entry_safe(chunk, next, &buddy->used_list, chunk_list)
- dr_icm_chunk_destroy(chunk, buddy);
-
dr_icm_pool_mr_destroy(buddy->icm_mr);
mlx5dr_buddy_cleanup(buddy);
@@ -314,53 +296,62 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
kvfree(buddy);
}
-static struct mlx5dr_icm_chunk *
-dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
- enum mlx5dr_icm_chunk_size chunk_size,
- struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
- unsigned int seg)
+static void
+dr_icm_chunk_init(struct mlx5dr_icm_chunk *chunk,
+ struct mlx5dr_icm_pool *pool,
+ enum mlx5dr_icm_chunk_size chunk_size,
+ struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
+ unsigned int seg)
{
- struct mlx5dr_icm_chunk *chunk;
int offset;
- chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL);
- if (!chunk)
- return NULL;
-
- offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
-
chunk->seg = seg;
chunk->size = chunk_size;
chunk->buddy_mem = buddy_mem_pool;
- if (pool->icm_type == DR_ICM_TYPE_STE)
+ if (pool->icm_type == DR_ICM_TYPE_STE) {
+ offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
dr_icm_chunk_ste_init(chunk, offset);
+ }
buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
- INIT_LIST_HEAD(&chunk->chunk_list);
-
- /* chunk now is part of the used_list */
- list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
-
- return chunk;
}
static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
{
int allow_hot_size;
- /* sync when hot memory reaches half of the pool size */
+ /* sync when hot memory reaches a certain fraction of the pool size */
allow_hot_size =
mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
- pool->icm_type) / 2;
+ pool->icm_type) /
+ DR_ICM_POOL_HOT_MEMORY_FRACTION;
return pool->hot_memory_size > allow_hot_size;
}
+static void dr_icm_pool_clear_hot_chunks_arr(struct mlx5dr_icm_pool *pool)
+{
+ struct mlx5dr_icm_hot_chunk *hot_chunk;
+ u32 i, num_entries;
+
+ for (i = 0; i < pool->hot_chunks_num; i++) {
+ hot_chunk = &pool->hot_chunks_arr[i];
+ num_entries = mlx5dr_icm_pool_chunk_size_to_entries(hot_chunk->size);
+ mlx5dr_buddy_free_mem(hot_chunk->buddy_mem,
+ hot_chunk->seg, ilog2(num_entries));
+ hot_chunk->buddy_mem->used_memory -=
+ mlx5dr_icm_pool_chunk_size_to_byte(hot_chunk->size,
+ pool->icm_type);
+ }
+
+ pool->hot_chunks_num = 0;
+ pool->hot_memory_size = 0;
+}
+
static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
{
struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
- u32 num_entries;
int err;
err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
@@ -369,16 +360,9 @@ static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
return err;
}
- list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) {
- struct mlx5dr_icm_chunk *chunk, *tmp_chunk;
-
- list_for_each_entry_safe(chunk, tmp_chunk, &buddy->hot_list, chunk_list) {
- num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
- mlx5dr_buddy_free_mem(buddy, chunk->seg, ilog2(num_entries));
- pool->hot_memory_size -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
- dr_icm_chunk_destroy(chunk, buddy);
- }
+ dr_icm_pool_clear_hot_chunks_arr(pool);
+ list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) {
if (!buddy->used_memory && pool->icm_type == DR_ICM_TYPE_STE)
dr_icm_buddy_destroy(buddy);
}
@@ -452,10 +436,12 @@ mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
if (ret)
goto out;
- chunk = dr_icm_chunk_create(pool, chunk_size, buddy, seg);
+ chunk = kmem_cache_alloc(pool->chunks_kmem_cache, GFP_KERNEL);
if (!chunk)
goto out_err;
+ dr_icm_chunk_init(chunk, pool, chunk_size, buddy, seg);
+
goto out;
out_err:
@@ -469,12 +455,23 @@ void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
{
struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
struct mlx5dr_icm_pool *pool = buddy->pool;
+ struct mlx5dr_icm_hot_chunk *hot_chunk;
+ struct kmem_cache *chunks_cache;
+
+ chunks_cache = pool->chunks_kmem_cache;
- /* move the memory to the waiting list AKA "hot" */
+ /* move the chunk to the waiting chunks array, AKA "hot" memory */
mutex_lock(&pool->mutex);
- list_move_tail(&chunk->chunk_list, &buddy->hot_list);
+
pool->hot_memory_size += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
+ hot_chunk = &pool->hot_chunks_arr[pool->hot_chunks_num++];
+ hot_chunk->buddy_mem = chunk->buddy_mem;
+ hot_chunk->seg = chunk->seg;
+ hot_chunk->size = chunk->size;
+
+ kmem_cache_free(chunks_cache, chunk);
+
/* Check if we have chunks that are waiting for sync-ste */
if (dr_icm_pool_is_sync_required(pool))
dr_icm_pool_sync_all_buddy_pools(pool);
@@ -482,9 +479,20 @@ void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
mutex_unlock(&pool->mutex);
}
+struct mlx5dr_ste_htbl *mlx5dr_icm_pool_alloc_htbl(struct mlx5dr_icm_pool *pool)
+{
+ return kmem_cache_alloc(pool->dmn->htbls_kmem_cache, GFP_KERNEL);
+}
+
+void mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool *pool, struct mlx5dr_ste_htbl *htbl)
+{
+ kmem_cache_free(pool->dmn->htbls_kmem_cache, htbl);
+}
+
struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
enum mlx5dr_icm_type icm_type)
{
+ u32 num_of_chunks, entry_size, max_hot_size;
enum mlx5dr_icm_chunk_size max_log_chunk_sz;
struct mlx5dr_icm_pool *pool;
@@ -500,21 +508,43 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
pool->dmn = dmn;
pool->icm_type = icm_type;
pool->max_log_chunk_sz = max_log_chunk_sz;
+ pool->chunks_kmem_cache = dmn->chunks_kmem_cache;
INIT_LIST_HEAD(&pool->buddy_mem_list);
mutex_init(&pool->mutex);
+ entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type);
+
+ max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+ pool->icm_type) /
+ DR_ICM_POOL_HOT_MEMORY_FRACTION;
+
+ num_of_chunks = DIV_ROUND_UP(max_hot_size, entry_size) + 1;
+
+ pool->hot_chunks_arr = kvcalloc(num_of_chunks,
+ sizeof(struct mlx5dr_icm_hot_chunk),
+ GFP_KERNEL);
+ if (!pool->hot_chunks_arr)
+ goto free_pool;
+
return pool;
+
+free_pool:
+ kvfree(pool);
+ return NULL;
}
void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
{
struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
+ dr_icm_pool_clear_hot_chunks_arr(pool);
+
list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node)
dr_icm_buddy_destroy(buddy);
+ kvfree(pool->hot_chunks_arr);
mutex_destroy(&pool->mutex);
kvfree(pool);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 91ff19f67695..74cbe53ee9db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -3,13 +3,16 @@
#include "dr_types.h"
-#define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
+#define DR_RULE_MAX_STES_OPTIMIZED 5
+#define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
-static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
+static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn,
+ enum mlx5dr_domain_nic_type nic_type,
struct mlx5dr_ste *new_last_ste,
struct list_head *miss_list,
struct list_head *send_list)
{
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info *ste_info_last;
struct mlx5dr_ste *last_ste;
@@ -17,7 +20,7 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
WARN_ON(!last_ste);
- ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
+ ste_info_last = mlx5dr_send_info_alloc(dmn, nic_type);
if (!ste_info_last)
return -ENOMEM;
@@ -32,16 +35,28 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
return 0;
}
+static void dr_rule_set_last_ste_miss_addr(struct mlx5dr_matcher *matcher,
+ struct mlx5dr_matcher_rx_tx *nic_matcher,
+ u8 *hw_ste)
+{
+ struct mlx5dr_ste_ctx *ste_ctx = matcher->tbl->dmn->ste_ctx;
+ u64 icm_addr;
+
+ if (mlx5dr_ste_is_miss_addr_set(ste_ctx, hw_ste))
+ return;
+
+ icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+ mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
+}
+
static struct mlx5dr_ste *
dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 *hw_ste)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
- struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_htbl *new_htbl;
struct mlx5dr_ste *ste;
- u64 icm_addr;
/* Create new table for miss entry */
new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
@@ -55,8 +70,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
/* One and only entry, never grows */
ste = new_htbl->chunk->ste_arr;
- icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
- mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
+ dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
mlx5dr_htbl_get(new_htbl);
return ste;
@@ -120,7 +134,7 @@ dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
goto out;
out:
- kfree(ste_info);
+ mlx5dr_send_info_free(ste_info);
return ret;
}
@@ -191,8 +205,8 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
new_ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(col_ste);
/* Update the previous from the list */
- ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
- mlx5dr_ste_get_miss_list(col_ste),
+ ret = dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
+ new_ste, mlx5dr_ste_get_miss_list(col_ste),
update_list);
if (ret) {
mlx5dr_dbg(dmn, "Failed update dup entry\n");
@@ -238,7 +252,6 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
bool use_update_list = false;
u8 hw_ste[DR_STE_SIZE] = {};
struct mlx5dr_ste *new_ste;
- u64 icm_addr;
int new_idx;
u8 sb_idx;
@@ -247,9 +260,8 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
/* Copy STE control and tag */
- icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
- mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
+ dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
new_ste = &new_htbl->chunk->ste_arr[new_idx];
@@ -278,7 +290,8 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
new_htbl->ctrl.num_of_valid_entries++;
if (use_update_list) {
- ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ ste_info = mlx5dr_send_info_alloc(dmn,
+ nic_matcher->nic_tbl->nic_dmn->type);
if (!ste_info)
goto err_exit;
@@ -357,6 +370,15 @@ static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
update_list);
if (err)
goto clean_copy;
+
+ /* In order to decrease the number of allocated ste_send_info
+ * structs, send the current table row now.
+ */
+ err = dr_rule_send_update_list(update_list, matcher->tbl->dmn, false);
+ if (err) {
+ mlx5dr_dbg(matcher->tbl->dmn, "Failed updating table to HW\n");
+ goto clean_copy;
+ }
}
clean_copy:
@@ -387,7 +409,8 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
nic_matcher = nic_rule->nic_matcher;
nic_dmn = nic_matcher->nic_tbl->nic_dmn;
- ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ ste_info = mlx5dr_send_info_alloc(dmn,
+ nic_matcher->nic_tbl->nic_dmn->type);
if (!ste_info)
return NULL;
@@ -473,13 +496,13 @@ free_ste_list:
list_for_each_entry_safe(del_ste_info, tmp_ste_info,
&rehash_table_send_list, send_list) {
list_del(&del_ste_info->send_list);
- kfree(del_ste_info);
+ mlx5dr_send_info_free(del_ste_info);
}
free_new_htbl:
mlx5dr_ste_htbl_free(new_htbl);
free_ste_info:
- kfree(ste_info);
+ mlx5dr_send_info_free(ste_info);
mlx5dr_info(dmn, "Failed creating rehash table\n");
return NULL;
}
@@ -512,11 +535,11 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
struct list_head *send_list)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
- struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info *ste_info;
struct mlx5dr_ste *new_ste;
- ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ ste_info = mlx5dr_send_info_alloc(dmn,
+ nic_matcher->nic_tbl->nic_dmn->type);
if (!ste_info)
return NULL;
@@ -524,8 +547,8 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
if (!new_ste)
goto free_send_info;
- if (dr_rule_append_to_miss_list(ste_ctx, new_ste,
- miss_list, send_list)) {
+ if (dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
+ new_ste, miss_list, send_list)) {
mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
goto err_exit;
}
@@ -541,7 +564,7 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
err_exit:
mlx5dr_ste_free(new_ste, matcher, nic_matcher);
free_send_info:
- kfree(ste_info);
+ mlx5dr_send_info_free(ste_info);
return NULL;
}
@@ -721,8 +744,8 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
list_add_tail(&action_ste->miss_list_node,
mlx5dr_ste_get_miss_list(action_ste));
- ste_info_arr[k] = kzalloc(sizeof(*ste_info_arr[k]),
- GFP_KERNEL);
+ ste_info_arr[k] = mlx5dr_send_info_alloc(dmn,
+ nic_matcher->nic_tbl->nic_dmn->type);
if (!ste_info_arr[k])
goto err_exit;
@@ -759,7 +782,6 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
- u64 icm_addr;
/* Take ref on table, only on first time this ste is used */
mlx5dr_htbl_get(cur_htbl);
@@ -767,12 +789,12 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
/* new entry -> new branch */
list_add_tail(&ste->miss_list_node, miss_list);
- icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
- mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste, icm_addr);
+ dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
ste->ste_chain_location = ste_location;
- ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
+ ste_info = mlx5dr_send_info_alloc(dmn,
+ nic_matcher->nic_tbl->nic_dmn->type);
if (!ste_info)
goto clean_ste_setting;
@@ -793,7 +815,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
return 0;
clean_ste_info:
- kfree(ste_info);
+ mlx5dr_send_info_free(ste_info);
clean_ste_setting:
list_del_init(&ste->miss_list_node);
mlx5dr_htbl_put(cur_htbl);
@@ -1089,6 +1111,7 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
size_t num_actions,
struct mlx5dr_action *actions[])
{
+ u8 hw_ste_arr_optimized[DR_RULE_MAX_STE_CHAIN_OPTIMIZED * DR_STE_SIZE] = {};
struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
struct mlx5dr_matcher *matcher = rule->matcher;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
@@ -1098,6 +1121,7 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
struct mlx5dr_ste_htbl *cur_htbl;
struct mlx5dr_ste *ste = NULL;
LIST_HEAD(send_ste_list);
+ bool hw_ste_arr_is_opt;
u8 *hw_ste_arr = NULL;
u32 new_hw_ste_arr_sz;
int ret, i;
@@ -1109,9 +1133,23 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
rule->flow_source))
return 0;
- hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
- if (!hw_ste_arr)
- return -ENOMEM;
+ ret = mlx5dr_matcher_select_builders(matcher,
+ nic_matcher,
+ dr_rule_get_ipv(&param->outer),
+ dr_rule_get_ipv(&param->inner));
+ if (ret)
+ return ret;
+
+ hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED;
+ if (likely(hw_ste_arr_is_opt)) {
+ hw_ste_arr = hw_ste_arr_optimized;
+ } else {
+ hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) *
+ DR_STE_SIZE, GFP_KERNEL);
+
+ if (!hw_ste_arr)
+ return -ENOMEM;
+ }
mlx5dr_domain_nic_lock(nic_dmn);
@@ -1119,13 +1157,6 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
if (ret)
goto free_hw_ste;
- ret = mlx5dr_matcher_select_builders(matcher,
- nic_matcher,
- dr_rule_get_ipv(&param->outer),
- dr_rule_get_ipv(&param->inner));
- if (ret)
- goto remove_from_nic_tbl;
-
/* Set the tag values inside the ste array */
ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
if (ret)
@@ -1187,7 +1218,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
mlx5dr_domain_nic_unlock(nic_dmn);
- kfree(hw_ste_arr);
+ if (unlikely(!hw_ste_arr_is_opt))
+ kfree(hw_ste_arr);
return 0;
@@ -1196,7 +1228,7 @@ free_rule:
/* Clean all ste_info's */
list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
list_del(&ste_info->send_list);
- kfree(ste_info);
+ mlx5dr_send_info_free(ste_info);
}
remove_from_nic_tbl:
@@ -1205,7 +1237,10 @@ remove_from_nic_tbl:
free_hw_ste:
mlx5dr_domain_nic_unlock(nic_dmn);
- kfree(hw_ste_arr);
+
+ if (unlikely(!hw_ste_arr_is_opt))
+ kfree(hw_ste_arr);
+
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index ef19a66f5233..a4476cb4c3b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -7,6 +7,7 @@
#define QUEUE_SIZE 128
#define SIGNAL_PER_DIV_QUEUE 16
#define TH_NUMS_TO_DRAIN 2
+#define DR_SEND_INFO_POOL_SIZE 1000
enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
@@ -49,6 +50,136 @@ struct dr_qp_init_attr {
u8 isolate_vl_tc:1;
};
+struct mlx5dr_send_info_pool_obj {
+ struct mlx5dr_ste_send_info ste_send_info;
+ struct mlx5dr_send_info_pool *pool;
+ struct list_head list_node;
+};
+
+struct mlx5dr_send_info_pool {
+ struct list_head free_list;
+};
+
+static int dr_send_info_pool_fill(struct mlx5dr_send_info_pool *pool)
+{
+ struct mlx5dr_send_info_pool_obj *pool_obj, *tmp_pool_obj;
+ int i;
+
+ for (i = 0; i < DR_SEND_INFO_POOL_SIZE; i++) {
+ pool_obj = kzalloc(sizeof(*pool_obj), GFP_KERNEL);
+ if (!pool_obj)
+ goto clean_pool;
+
+ pool_obj->pool = pool;
+ list_add_tail(&pool_obj->list_node, &pool->free_list);
+ }
+
+ return 0;
+
+clean_pool:
+ list_for_each_entry_safe(pool_obj, tmp_pool_obj, &pool->free_list, list_node) {
+ list_del(&pool_obj->list_node);
+ kfree(pool_obj);
+ }
+
+ return -ENOMEM;
+}
+
+static void dr_send_info_pool_destroy(struct mlx5dr_send_info_pool *pool)
+{
+ struct mlx5dr_send_info_pool_obj *pool_obj, *tmp_pool_obj;
+
+ list_for_each_entry_safe(pool_obj, tmp_pool_obj, &pool->free_list, list_node) {
+ list_del(&pool_obj->list_node);
+ kfree(pool_obj);
+ }
+
+ kfree(pool);
+}
+
+void mlx5dr_send_info_pool_destroy(struct mlx5dr_domain *dmn)
+{
+ dr_send_info_pool_destroy(dmn->send_info_pool_tx);
+ dr_send_info_pool_destroy(dmn->send_info_pool_rx);
+}
+
+static struct mlx5dr_send_info_pool *dr_send_info_pool_create(void)
+{
+ struct mlx5dr_send_info_pool *pool;
+ int ret;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ INIT_LIST_HEAD(&pool->free_list);
+
+ ret = dr_send_info_pool_fill(pool);
+ if (ret) {
+ kfree(pool);
+ return NULL;
+ }
+
+ return pool;
+}
+
+int mlx5dr_send_info_pool_create(struct mlx5dr_domain *dmn)
+{
+ dmn->send_info_pool_rx = dr_send_info_pool_create();
+ if (!dmn->send_info_pool_rx)
+ return -ENOMEM;
+
+ dmn->send_info_pool_tx = dr_send_info_pool_create();
+ if (!dmn->send_info_pool_tx) {
+ dr_send_info_pool_destroy(dmn->send_info_pool_rx);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+struct mlx5dr_ste_send_info
+*mlx5dr_send_info_alloc(struct mlx5dr_domain *dmn,
+ enum mlx5dr_domain_nic_type nic_type)
+{
+ struct mlx5dr_send_info_pool_obj *pool_obj;
+ struct mlx5dr_send_info_pool *pool;
+ int ret;
+
+ pool = nic_type == DR_DOMAIN_NIC_TYPE_RX ? dmn->send_info_pool_rx :
+ dmn->send_info_pool_tx;
+
+ if (unlikely(list_empty(&pool->free_list))) {
+ ret = dr_send_info_pool_fill(pool);
+ if (ret)
+ return NULL;
+ }
+
+ pool_obj = list_first_entry_or_null(&pool->free_list,
+ struct mlx5dr_send_info_pool_obj,
+ list_node);
+
+ if (likely(pool_obj)) {
+ list_del_init(&pool_obj->list_node);
+ } else {
+ WARN_ONCE(!pool_obj, "Failed getting ste send info obj from pool");
+ return NULL;
+ }
+
+ return &pool_obj->ste_send_info;
+}
+
+void mlx5dr_send_info_free(struct mlx5dr_ste_send_info *ste_send_info)
+{
+ struct mlx5dr_send_info_pool_obj *pool_obj;
+
+ pool_obj = container_of(ste_send_info,
+ struct mlx5dr_send_info_pool_obj,
+ ste_send_info);
+
+ list_add(&pool_obj->list_node, &pool_obj->pool->free_list);
+}
+
static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64)
{
unsigned int idx;
@@ -78,8 +209,15 @@ static int dr_cq_poll_one(struct mlx5dr_cq *dr_cq)
int err;
cqe64 = mlx5_cqwq_get_cqe(&dr_cq->wq);
- if (!cqe64)
+ if (!cqe64) {
+ if (unlikely(dr_cq->mdev->state ==
+ MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
+ mlx5_core_dbg_once(dr_cq->mdev,
+ "Polling CQ while device is shutting down\n");
+ return CQ_POLL_ERR;
+ }
return CQ_EMPTY;
+ }
mlx5_cqwq_pop(&dr_cq->wq);
err = dr_parse_cqe(dr_cq, cqe64);
@@ -833,6 +971,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
cq->mcq.vector = 0;
cq->mcq.uar = uar;
+ cq->mdev = mdev;
return cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 09ebd3088857..1e15f605df6e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -90,6 +90,16 @@ static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
hw_ste->mask[0] = 0;
}
+bool mlx5dr_ste_is_miss_addr_set(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste_p)
+{
+ if (!ste_ctx->is_miss_addr_set)
+ return false;
+
+ /* check if miss address is already set for this type of STE */
+ return ste_ctx->is_miss_addr_set(hw_ste_p);
+}
+
void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste_p, u64 miss_addr)
{
@@ -491,7 +501,7 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
u32 num_entries;
int i;
- htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
+ htbl = mlx5dr_icm_pool_alloc_htbl(pool);
if (!htbl)
return NULL;
@@ -503,6 +513,9 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
htbl->lu_type = lu_type;
htbl->byte_mask = byte_mask;
htbl->refcount = 0;
+ htbl->pointing_ste = NULL;
+ htbl->ctrl.num_of_valid_entries = 0;
+ htbl->ctrl.num_of_collisions = 0;
num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
for (i = 0; i < num_entries; i++) {
@@ -517,17 +530,20 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
return htbl;
out_free_htbl:
- kfree(htbl);
+ mlx5dr_icm_pool_free_htbl(pool, htbl);
return NULL;
}
int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
{
+ struct mlx5dr_icm_pool *pool = htbl->chunk->buddy_mem->pool;
+
if (htbl->refcount)
return -EBUSY;
mlx5dr_icm_free_chunk(htbl->chunk);
- kfree(htbl);
+ mlx5dr_icm_pool_free_htbl(pool, htbl);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
index 17513baff9b0..7075142bcfb6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -151,6 +151,7 @@ struct mlx5dr_ste_ctx {
bool is_rx, u16 gvmi);
void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type);
u16 (*get_next_lu_type)(u8 *hw_ste_p);
+ bool (*is_miss_addr_set)(u8 *hw_ste_p);
void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr);
u64 (*get_miss_addr)(u8 *hw_ste_p);
void (*set_hit_addr)(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index ee677a5c76be..084145f18084 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -13,6 +13,7 @@ enum dr_ste_v1_entry_format {
DR_STE_V1_TYPE_BWC_BYTE = 0x0,
DR_STE_V1_TYPE_BWC_DW = 0x1,
DR_STE_V1_TYPE_MATCH = 0x2,
+ DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
};
/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
@@ -267,6 +268,16 @@ static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
}
+bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p)
+{
+ u8 entry_type = MLX5_GET(ste_match_bwc_v1, hw_ste_p, entry_format);
+
+ /* unlike MATCH STE, for MATCH_RANGES STE both hit and miss addresses
+ * are part of the action, so they both set as part of STE init
+ */
+ return entry_type == DR_STE_V1_TYPE_MATCH_RANGES;
+}
+
void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
{
u64 index = miss_addr >> 6;
@@ -520,6 +531,27 @@ static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
init_color);
}
+static void dr_ste_v1_set_match_range_pkt_len(u8 *hw_ste_p, u32 definer_id,
+ u32 min, u32 max)
+{
+ MLX5_SET(ste_match_ranges_v1, hw_ste_p, match_definer_ctx_idx, definer_id);
+
+ /* When the STE will be sent, its mask and tags will be swapped in
+ * dr_ste_v1_prepare_for_postsend(). This, however, is match range STE
+ * which doesn't have mask, and shouldn't have mask/tag swapped.
+ * We're using the common utilities functions to send this STE, so need
+ * to allow for this swapping - place the values in the corresponding
+ * locations to allow flipping them when writing to ICM.
+ *
+ * min/max_value_2 corresponds to match_dw_0 in its definer.
+ * To allow mask/tag swapping, writing the min/max_2 to min/max_0.
+ *
+ * Pkt len is 2 bytes that are stored in the higher section of the DW.
+ */
+ MLX5_SET(ste_match_ranges_v1, hw_ste_p, min_value_0, min << 16);
+ MLX5_SET(ste_match_ranges_v1, hw_ste_p, max_value_0, max << 16);
+}
+
static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
u32 *added_stes,
u16 gvmi)
@@ -535,6 +567,14 @@ static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
}
+static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste,
+ u32 *added_stes,
+ u16 gvmi)
+{
+ dr_ste_v1_arr_init_next_match(last_ste, added_stes, gvmi);
+ dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES);
+}
+
void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
u8 *action_type_set,
u32 actions_caps,
@@ -670,6 +710,20 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_DOUBLE_SZ;
}
+ if (action_type_set[DR_ACTION_TYP_RANGE]) {
+ /* match ranges requires a new STE of its own type */
+ dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
+ dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
+
+ /* we do not support setting any action on the match ranges STE */
+ action_sz = 0;
+
+ dr_ste_v1_set_match_range_pkt_len(last_ste,
+ attr->range.definer_id,
+ attr->range.min,
+ attr->range.max);
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
@@ -858,6 +912,20 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_DOUBLE_SZ;
}
+ if (action_type_set[DR_ACTION_TYP_RANGE]) {
+ /* match ranges requires a new STE of its own type */
+ dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
+ dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
+
+ /* we do not support setting any action on the match ranges STE */
+ action_sz = 0;
+
+ dr_ste_v1_set_match_range_pkt_len(last_ste,
+ attr->range.definer_id,
+ attr->range.min,
+ attr->range.max);
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
@@ -2144,6 +2212,7 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = {
.ste_init = &dr_ste_v1_init,
.set_next_lu_type = &dr_ste_v1_set_next_lu_type,
.get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
.set_miss_addr = &dr_ste_v1_set_miss_addr,
.get_miss_addr = &dr_ste_v1_get_miss_addr,
.set_hit_addr = &dr_ste_v1_set_hit_addr,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
index 8a1d49790c6e..b5c0f0f8392f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
@@ -7,6 +7,7 @@
#include "dr_types.h"
#include "dr_ste.h"
+bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p);
void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
index c60fddd125d2..cf1a3c9a1cf4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
@@ -202,6 +202,7 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = {
.ste_init = &dr_ste_v1_init,
.set_next_lu_type = &dr_ste_v1_set_next_lu_type,
.get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .is_miss_addr_set = &dr_ste_v1_is_miss_addr_set,
.set_miss_addr = &dr_ste_v1_set_miss_addr,
.get_miss_addr = &dr_ste_v1_get_miss_addr,
.set_hit_addr = &dr_ste_v1_set_hit_addr,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
index f68461b13391..69294a66fd7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
@@ -295,7 +295,7 @@ int mlx5dr_table_destroy(struct mlx5dr_table *tbl)
mlx5dr_dbg_tbl_del(tbl);
ret = dr_table_destroy_sw_owned_tbl(tbl);
if (ret)
- return ret;
+ mlx5dr_err(tbl->dmn, "Failed to destroy sw owned table\n");
dr_table_uninit(tbl);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 1777a1e508e7..2b769dcbd453 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -81,6 +81,7 @@ mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)
enum {
DR_STE_SIZE = 64,
DR_STE_SIZE_CTRL = 32,
+ DR_STE_SIZE_MATCH_TAG = 32,
DR_STE_SIZE_TAG = 16,
DR_STE_SIZE_MASK = 16,
DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
@@ -128,6 +129,7 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_REMOVE_HDR,
DR_ACTION_TYP_SAMPLER,
DR_ACTION_TYP_ASO_FLOW_METER,
+ DR_ACTION_TYP_RANGE,
DR_ACTION_TYP_MAX,
};
@@ -146,6 +148,8 @@ struct mlx5dr_cmd_caps;
struct mlx5dr_rule_rx_tx;
struct mlx5dr_matcher_rx_tx;
struct mlx5dr_ste_ctx;
+struct mlx5dr_send_info_pool;
+struct mlx5dr_icm_hot_chunk;
struct mlx5dr_ste {
/* refcount: indicates the num of rules that using this ste */
@@ -235,6 +239,7 @@ static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
/* STE utils */
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
+bool mlx5dr_ste_is_miss_addr_set(struct mlx5dr_ste_ctx *ste_ctx, u8 *hw_ste_p);
void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
u8 *hw_ste, u64 miss_addr);
void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
@@ -279,6 +284,13 @@ struct mlx5dr_ste_actions_attr {
u8 dest_reg_id;
u8 init_color;
} aso_flow_meter;
+
+ struct {
+ u64 miss_icm_addr;
+ u32 definer_id;
+ u32 min;
+ u32 max;
+ } range;
};
void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
@@ -912,12 +924,17 @@ struct mlx5dr_domain {
refcount_t refcount;
struct mlx5dr_icm_pool *ste_icm_pool;
struct mlx5dr_icm_pool *action_icm_pool;
+ struct mlx5dr_send_info_pool *send_info_pool_rx;
+ struct mlx5dr_send_info_pool *send_info_pool_tx;
+ struct kmem_cache *chunks_kmem_cache;
+ struct kmem_cache *htbls_kmem_cache;
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
struct xarray csum_fts_xa;
struct mlx5dr_ste_ctx *ste_ctx;
struct list_head dbg_tbl_list;
struct mlx5dr_dbg_dump_info dump_info;
+ struct xarray definers_xa;
};
struct mlx5dr_table_rx_tx {
@@ -1020,6 +1037,15 @@ struct mlx5dr_action_dest_tbl {
};
};
+struct mlx5dr_action_range {
+ struct mlx5dr_domain *dmn;
+ struct mlx5dr_action *hit_tbl_action;
+ struct mlx5dr_action *miss_tbl_action;
+ u32 definer_id;
+ u32 min;
+ u32 max;
+};
+
struct mlx5dr_action_ctr {
u32 ctr_id;
u32 offset;
@@ -1066,6 +1092,7 @@ struct mlx5dr_action {
struct mlx5dr_action_push_vlan *push_vlan;
struct mlx5dr_action_flow_tag *flow_tag;
struct mlx5dr_action_aso_flow_meter *aso;
+ struct mlx5dr_action_range *range;
};
};
@@ -1105,7 +1132,6 @@ int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
struct mlx5dr_icm_chunk {
struct mlx5dr_icm_buddy_mem *buddy_mem;
- struct list_head chunk_list;
/* indicates the index of this chunk in the whole memory,
* used for deleting the chunk from the buddy
@@ -1158,6 +1184,9 @@ u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk);
u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk);
u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste);
+struct mlx5dr_ste_htbl *mlx5dr_icm_pool_alloc_htbl(struct mlx5dr_icm_pool *pool);
+void mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool *pool, struct mlx5dr_ste_htbl *htbl);
+
static inline int
mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type)
{
@@ -1287,6 +1316,14 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
u32 *reformat_id);
void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
u32 reformat_id);
+int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
+ u16 format_id,
+ u8 *dw_selectors,
+ u8 *byte_selectors,
+ u8 *match_mask,
+ u32 *definer_id);
+void mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev,
+ u32 definer_id);
struct mlx5dr_cmd_gid_attr {
u8 gid[16];
@@ -1404,6 +1441,12 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action);
+int mlx5dr_send_info_pool_create(struct mlx5dr_domain *dmn);
+void mlx5dr_send_info_pool_destroy(struct mlx5dr_domain *dmn);
+struct mlx5dr_ste_send_info *mlx5dr_send_info_alloc(struct mlx5dr_domain *dmn,
+ enum mlx5dr_domain_nic_type nic_type);
+void mlx5dr_send_info_free(struct mlx5dr_ste_send_info *ste_send_info);
+
struct mlx5dr_cmd_ft_info {
u32 id;
u16 vport;
@@ -1469,4 +1512,18 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
u32 flow_source);
void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
u32 group_id);
+
+static inline bool mlx5dr_is_fw_table(struct mlx5_flow_table *ft)
+{
+ return !ft->fs_dr_table.dr_table;
+}
+
+static inline bool mlx5dr_supp_match_ranges(struct mlx5_core_dev *dev)
+{
+ return (MLX5_CAP_GEN(dev, steering_format_version) >=
+ MLX5_STEERING_FORMAT_CONNECTX_6DX) &&
+ (MLX5_CAP_GEN_64(dev, match_definer_format_supported) &
+ (1ULL << MLX5_IFC_DEFINER_FORMAT_ID_SELECT));
+}
+
#endif /* _DR_TYPES_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 13b6d4721e17..984653756779 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -7,10 +7,11 @@
#include "fs_cmd.h"
#include "mlx5dr.h"
#include "fs_dr.h"
+#include "dr_types.h"
-static bool mlx5_dr_is_fw_table(u32 flags)
+static bool dr_is_fw_term_table(struct mlx5_flow_table *ft)
{
- if (flags & MLX5_FLOW_TABLE_TERMINATION)
+ if (ft->flags & MLX5_FLOW_TABLE_TERMINATION)
return true;
return false;
@@ -69,7 +70,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
u32 flags;
int err;
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
ft_attr,
next_ft);
@@ -109,7 +110,7 @@ static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
int err;
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
@@ -134,7 +135,7 @@ static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
return set_miss_action(ns, ft, next_ft);
@@ -153,7 +154,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
match_criteria_enable);
struct mlx5dr_match_parameters mask;
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
fg);
@@ -178,7 +179,7 @@ static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
@@ -209,11 +210,22 @@ static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
{
struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
- if (mlx5_dr_is_fw_table(dest_ft->flags))
+ if (mlx5dr_is_fw_table(dest_ft))
return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
}
+static struct mlx5dr_action *create_range_action(struct mlx5dr_domain *domain,
+ struct mlx5_flow_rule *dst)
+{
+ return mlx5dr_action_create_dest_match_range(domain,
+ dst->dest_attr.range.field,
+ dst->dest_attr.range.hit_ft,
+ dst->dest_attr.range.miss_ft,
+ dst->dest_attr.range.min,
+ dst->dest_attr.range.max);
+}
+
static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domain,
struct mlx5_fs_vlan *vlan)
{
@@ -260,7 +272,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
int err = 0;
int i;
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
@@ -467,6 +479,15 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
term_actions[num_term_actions++].dest = tmp_action;
break;
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ tmp_action = create_range_action(domain, dst);
+ if (!tmp_action) {
+ err = -ENOMEM;
+ goto free_actions;
+ }
+ fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+ term_actions[num_term_actions++].dest = tmp_action;
+ break;
default:
err = -EOPNOTSUPP;
goto free_actions;
@@ -702,7 +723,7 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
int err;
int i;
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
err = mlx5dr_rule_destroy(rule->dr_rule);
@@ -727,7 +748,7 @@ static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
struct fs_fte fte_tmp = {};
int ret;
- if (mlx5_dr_is_fw_table(ft->flags))
+ if (dr_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte);
/* Backup current dr rule details */
@@ -780,11 +801,19 @@ static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
+ u32 steering_caps = 0;
+
if (ft_type != FS_FT_FDB ||
MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5)
return 0;
- return MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX | MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
+ steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX;
+ steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
+
+ if (mlx5dr_supp_match_ranges(ns->dev))
+ steering_caps |= MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
+
+ return steering_caps;
}
bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
index 34c2bd17a8b4..790a17d6207f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
@@ -165,6 +165,41 @@ struct mlx5_ifc_ste_mask_and_match_v1_bits {
u8 action[0x60];
};
+struct mlx5_ifc_ste_match_ranges_v1_bits {
+ u8 entry_format[0x8];
+ u8 counter_id[0x18];
+
+ u8 miss_address_63_48[0x10];
+ u8 match_definer_ctx_idx[0x8];
+ u8 miss_address_39_32[0x8];
+
+ u8 miss_address_31_6[0x1a];
+ u8 reserved_at_5a[0x1];
+ u8 match_polarity[0x1];
+ u8 reparse[0x1];
+ u8 reserved_at_5d[0x3];
+
+ u8 next_table_base_63_48[0x10];
+ u8 hash_definer_ctx_idx[0x8];
+ u8 next_table_base_39_32_size[0x8];
+
+ u8 next_table_base_31_5_size[0x1b];
+ u8 hash_type[0x2];
+ u8 hash_after_actions[0x1];
+ u8 reserved_at_9e[0x2];
+
+ u8 action[0x60];
+
+ u8 max_value_0[0x20];
+ u8 min_value_0[0x20];
+ u8 max_value_1[0x20];
+ u8 min_value_1[0x20];
+ u8 max_value_2[0x20];
+ u8 min_value_2[0x20];
+ u8 max_value_3[0x20];
+ u8 min_value_3[0x20];
+};
+
struct mlx5_ifc_ste_eth_l2_src_v1_bits {
u8 reserved_at_0[0x1];
u8 sx_sniffer[0x1];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 226a0d7bb06d..9afd268a2573 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -140,8 +140,21 @@ mlx5dr_action_create_aso(struct mlx5dr_domain *dmn,
u8 init_color,
u8 meter_id);
+struct mlx5dr_action *
+mlx5dr_action_create_dest_match_range(struct mlx5dr_domain *dmn,
+ u32 field,
+ struct mlx5_flow_table *hit_ft,
+ struct mlx5_flow_table *miss_ft,
+ u32 min,
+ u32 max);
+
int mlx5dr_action_destroy(struct mlx5dr_action *action);
+int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
+ u8 *dw_selectors, u8 *byte_selectors,
+ u8 *match_mask, u32 *definer_id);
+void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
+
static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev)
{
@@ -164,16 +177,9 @@ struct mlx5dr_icm_buddy_mem {
struct mlx5dr_icm_mr *icm_mr;
struct mlx5dr_icm_pool *pool;
- /* This is the list of used chunks. HW may be accessing this memory */
- struct list_head used_list;
+ /* Amount of memory in used chunks - HW may be accessing this memory */
u64 used_memory;
- /* Hardware may be accessing this memory but at some future,
- * undetermined time, it might cease to do so.
- * sync_ste command sets them free.
- */
- struct list_head hot_list;
-
/* Memory optimisation */
struct mlx5dr_ste *ste_arr;
struct list_head *miss_list;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 8455e79bc44a..1513112ecec8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index d5c317325030..ba7e3df22413 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1160,14 +1160,40 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
-int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out)
+int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out,
+ u16 opmod)
{
- u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
+ opmod = (opmod << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
MLX5_SET(query_hca_cap_in, in, function_id, function_id);
MLX5_SET(query_hca_cap_in, in, other_function, true);
return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
}
+EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap);
+
+int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap,
+ u16 function_id, u16 opmod)
+{
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *set_hca_cap;
+ void *set_ctx;
+ int ret;
+
+ set_ctx = kzalloc(set_sz, GFP_KERNEL);
+ if (!set_ctx)
+ return -ENOMEM;
+
+ MLX5_SET(set_hca_cap_in, set_ctx, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, set_ctx, op_mod, opmod << 1);
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
+ memcpy(set_hca_cap, hca_cap, MLX5_ST_SZ_BYTES(cmd_hca_cap));
+ MLX5_SET(set_hca_cap_in, set_ctx, function_id, function_id);
+ MLX5_SET(set_hca_cap_in, set_ctx, other_function, true);
+ ret = mlx5_cmd_exec_in(dev, set_hca_cap, set_ctx);
+
+ kfree(set_ctx);
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 4d629e5ddbc7..e4ef1d24a3ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -243,6 +243,23 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
return cqe;
}
+static inline
+struct mlx5_cqe64 *mlx5_cqwq_get_cqe_enahnced_comp(struct mlx5_cqwq *wq)
+{
+ u8 sw_validity_iteration_count = mlx5_cqwq_get_wrap_cnt(wq) & 0xff;
+ u32 ci = mlx5_cqwq_get_ci(wq);
+ struct mlx5_cqe64 *cqe;
+
+ cqe = mlx5_cqwq_get_wqe(wq, ci);
+ if (cqe->validity_iteration_count != sw_validity_iteration_count)
+ return NULL;
+
+ /* ensure cqe content is read after cqe ownership bit/validity byte */
+ dma_rmb();
+
+ return cqe;
+}
+
static inline u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
{
return (u32)wq->fbc.sz_m1 + 1;