From 3df5b3c67546fb05266766b6abaf71563f82efe4 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 22 Nov 2016 23:09:54 +0200 Subject: net: Add net-device param to the get offloaded stats ndo Some drivers would need to check few internal matters for that. To be used in downstream mlx5 commit. Signed-off-by: Or Gerlitz Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 4a1f9d5f7c03..e0d7d5adbaee 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -857,7 +857,7 @@ mlxsw_sp_port_get_sw_stats64(const struct net_device *dev, return 0; } -static bool mlxsw_sp_port_has_offload_stats(int attr_id) +static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id) { switch (attr_id) { case IFLA_OFFLOAD_XSTATS_CPU_HIT: -- cgit v1.2.3 From 370bad0f9a5261da0ef0bc76705f5b0b8af148ab Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 22 Nov 2016 23:09:55 +0200 Subject: net/mlx5e: Support HW (offloaded) and SW counters for SRIOV switchdev mode Switchdev driver net-device port statistics should follow the model introduced in commit a5ea31f57309 'Merge branch net-offloaded-stats'. For VF reps we return the SRIOV eswitch vport stats as the usual ones and SW stats if asked. For the PF, if we're in the switchdev mode, we return the uplink stats and SW stats if asked, otherwise as before. The uplink stats are implemented using the PPCNT 802_3 counters which are already being read/cached by the driver. Signed-off-by: Or Gerlitz Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 9 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 31 +++--- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 111 +++++++++++++++++++-- drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 1 + 4 files changed, 128 insertions(+), 24 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index ac09767b6984..ebf5dbc85bff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -874,6 +874,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr); void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); +void mlx5e_update_hw_rep_counters(struct mlx5e_priv *priv); int mlx5e_create_direct_rqts(struct mlx5e_priv *priv); void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); @@ -890,12 +891,16 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv); int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); -struct rtnl_link_stats64 * -mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti); void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti); +int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp); +bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id); + +bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); +bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 695760878898..8e8d809bf3fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -470,16 +470,6 @@ static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq) kfree(rq->mpwqe.info); } -static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) -{ - struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv; - - if (rep && rep->vport != FDB_UPLINK_VPORT) - return true; - - return false; -} - static int mlx5e_create_rq(struct mlx5e_channel *c, struct mlx5e_rq_param *param, struct mlx5e_rq *rq) @@ -2664,7 +2654,7 @@ mqprio: return mlx5e_setup_tc(dev, tc->tc); } -struct rtnl_link_stats64 * +static struct rtnl_link_stats64 * mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -2672,13 +2662,20 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_pport_stats *pstats = &priv->stats.pport; - stats->rx_packets = sstats->rx_packets; - stats->rx_bytes = sstats->rx_bytes; - stats->tx_packets = sstats->tx_packets; - stats->tx_bytes = sstats->tx_bytes; + if (mlx5e_is_uplink_rep(priv)) { + stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); + stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); + stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); + stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); + } else { + stats->rx_packets = sstats->rx_packets; + stats->rx_bytes = sstats->rx_bytes; + stats->tx_packets = sstats->tx_packets; + stats->tx_bytes = sstats->tx_bytes; + stats->tx_dropped = sstats->tx_queue_dropped; + } stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer; - stats->tx_dropped = sstats->tx_queue_dropped; stats->rx_length_errors = PPORT_802_3_GET(pstats, a_in_range_length_errors) + @@ -3290,6 +3287,8 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx5e_netpoll, #endif + .ndo_has_offload_stats = mlx5e_has_offload_stats, + .ndo_get_offload_stats = mlx5e_get_offload_stats, }; static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index a84825d59f33..e0d1a561308f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -72,7 +72,29 @@ static void mlx5e_rep_get_strings(struct net_device *dev, } } -static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv) +static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_eswitch_rep *rep = priv->ppriv; + struct rtnl_link_stats64 *vport_stats; + struct ifla_vf_stats vf_stats; + int err; + + err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats); + if (err) { + pr_warn("vport %d error %d reading stats\n", rep->vport, err); + return; + } + + vport_stats = &priv->stats.vf_vport; + /* flip tx/rx as we are reporting the counters for the switch vport */ + vport_stats->rx_packets = vf_stats.tx_packets; + vport_stats->rx_bytes = vf_stats.tx_bytes; + vport_stats->tx_packets = vf_stats.rx_packets; + vport_stats->tx_bytes = vf_stats.rx_bytes; +} + +static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) { struct mlx5e_sw_stats *s = &priv->stats.sw; struct mlx5e_rq_stats *rq_stats; @@ -95,6 +117,12 @@ static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv) } } +static void mlx5e_rep_update_stats(struct mlx5e_priv *priv) +{ + mlx5e_rep_update_sw_counters(priv); + mlx5e_rep_update_hw_counters(priv); +} + static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -106,7 +134,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, mutex_lock(&priv->state_lock); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - mlx5e_update_sw_rep_counters(priv); + mlx5e_rep_update_sw_counters(priv); mutex_unlock(&priv->state_lock); for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++) @@ -245,6 +273,77 @@ static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle, } } +bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + if (rep && rep->vport == FDB_UPLINK_VPORT && esw->mode == SRIOV_OFFLOADS) + return true; + + return false; +} + +bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv; + + if (rep && rep->vport != FDB_UPLINK_VPORT) + return true; + + return false; +} + +bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + switch (attr_id) { + case IFLA_OFFLOAD_XSTATS_CPU_HIT: + if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv)) + return true; + } + + return false; +} + +static int +mlx5e_get_sw_stats64(const struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_sw_stats *sstats = &priv->stats.sw; + + stats->rx_packets = sstats->rx_packets; + stats->rx_bytes = sstats->rx_bytes; + stats->tx_packets = sstats->tx_packets; + stats->tx_bytes = sstats->tx_bytes; + + stats->tx_dropped = sstats->tx_queue_dropped; + + return 0; +} + +int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp) +{ + switch (attr_id) { + case IFLA_OFFLOAD_XSTATS_CPU_HIT: + return mlx5e_get_sw_stats64(dev, sp); + } + + return -EINVAL; +} + +static struct rtnl_link_stats64 * +mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); + return stats; +} + static const struct switchdev_ops mlx5e_rep_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; @@ -255,9 +354,9 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_start_xmit = mlx5e_xmit, .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, - .ndo_get_stats64 = mlx5e_get_stats, - .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, - .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_get_stats64 = mlx5e_rep_get_stats, + .ndo_has_offload_stats = mlx5e_has_offload_stats, + .ndo_get_offload_stats = mlx5e_get_offload_stats, }; static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev, @@ -407,7 +506,7 @@ static struct mlx5e_profile mlx5e_rep_profile = { .cleanup_rx = mlx5e_cleanup_rep_rx, .init_tx = mlx5e_init_rep_tx, .cleanup_tx = mlx5e_cleanup_nic_tx, - .update_stats = mlx5e_update_sw_rep_counters, + .update_stats = mlx5e_rep_update_stats, .max_nch = mlx5e_get_rep_max_num_channels, .max_tc = 1, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 5da6a1c0de14..f202f872f57f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -407,6 +407,7 @@ struct mlx5e_stats { struct mlx5e_vport_stats vport; struct mlx5e_pport_stats pport; struct mlx5e_pcie_stats pcie; + struct rtnl_link_stats64 vf_vport; }; static const struct counter_desc mlx5e_pme_status_desc[] = { -- cgit v1.2.3 From 20a1ea6747836e841feb1941892b9baa1dbcd0fb Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 22 Nov 2016 23:09:56 +0200 Subject: net/mlx5e: Support VF vport link state control for SRIOV switchdev mode Reflect the administative link changes done on the VF representor to the VF e-switch vport. This means that doing ip link set down/up commands on the VF rep will modify the e-switch vport state which in turn will make proper VF drivers to set their carrier accordingly. Signed-off-by: Or Gerlitz Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 33 ++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index e0d1a561308f..5e33f6bb16e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -236,6 +236,35 @@ void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, mlx5e_tc_init(priv); } +static int mlx5e_rep_open(struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + int err; + + err = mlx5e_open(dev); + if (err) + return err; + + err = mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP); + if (!err) + netif_carrier_on(dev); + + return 0; +} + +static int mlx5e_rep_close(struct net_device *dev) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); + + return mlx5e_close(dev); +} + static int mlx5e_rep_get_phys_port_name(struct net_device *dev, char *buf, size_t len) { @@ -349,8 +378,8 @@ static const struct switchdev_ops mlx5e_rep_switchdev_ops = { }; static const struct net_device_ops mlx5e_netdev_ops_rep = { - .ndo_open = mlx5e_open, - .ndo_stop = mlx5e_close, + .ndo_open = mlx5e_rep_open, + .ndo_stop = mlx5e_rep_close, .ndo_start_xmit = mlx5e_xmit, .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, -- cgit v1.2.3 From 34e4e99078667d30f71a50c1e5181e4270e9d8bb Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Tue, 22 Nov 2016 23:09:58 +0200 Subject: net/mlx5: Enable to query min inline for a specific vport Also move the inline capablities enum to a shared header vport.h Signed-off-by: Roi Dayan Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 6 ------ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 11 +++++------ drivers/net/ethernet/mellanox/mlx5/core/vport.c | 14 ++++++++------ include/linux/mlx5/vport.h | 10 ++++++++-- 4 files changed, 21 insertions(+), 20 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index ebf5dbc85bff..a2b32ed24315 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -150,12 +150,6 @@ static inline int mlx5_max_log_rq_size(int wq_type) } } -enum { - MLX5E_INLINE_MODE_L2, - MLX5E_INLINE_MODE_VPORT_CONTEXT, - MLX5_INLINE_MODE_NOT_REQUIRED, -}; - struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_eth_seg eth; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8e8d809bf3fd..19403d6bf369 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -957,7 +957,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; sq->max_inline = param->max_inline; sq->min_inline_mode = - MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5E_INLINE_MODE_VPORT_CONTEXT ? + MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT ? param->min_inline_mode : 0; err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)); @@ -3417,14 +3417,13 @@ static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline_mode) { switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { - case MLX5E_INLINE_MODE_L2: + case MLX5_CAP_INLINE_MODE_L2: *min_inline_mode = MLX5_INLINE_MODE_L2; break; - case MLX5E_INLINE_MODE_VPORT_CONTEXT: - mlx5_query_nic_vport_min_inline(mdev, - min_inline_mode); + case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: + mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode); break; - case MLX5_INLINE_MODE_NOT_REQUIRED: + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: *min_inline_mode = MLX5_INLINE_MODE_NONE; break; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 525f17af108e..269e4401c342 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -113,15 +113,17 @@ static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in, return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); } -void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, - u8 *min_inline_mode) +int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, + u16 vport, u8 *min_inline) { u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0}; + int err; - mlx5_query_nic_vport_context(mdev, 0, out, sizeof(out)); - - *min_inline_mode = MLX5_GET(query_nic_vport_context_out, out, - nic_vport_context.min_wqe_inline_mode); + err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out)); + if (!err) + *min_inline = MLX5_GET(query_nic_vport_context_out, out, + nic_vport_context.min_wqe_inline_mode); + return err; } EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline); diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 451b0bde9083..ec35157ea725 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -36,6 +36,12 @@ #include #include +enum { + MLX5_CAP_INLINE_MODE_L2, + MLX5_CAP_INLINE_MODE_VPORT_CONTEXT, + MLX5_CAP_INLINE_MODE_NOT_REQUIRED, +}; + u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); @@ -43,8 +49,8 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport, u8 state); int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr); -void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, - u8 *min_inline); +int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, + u16 vport, u8 *min_inline); int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, -- cgit v1.2.3 From bffaa916588ebb065cfa0287195d9ad35612eecf Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Tue, 22 Nov 2016 23:09:59 +0200 Subject: net/mlx5: E-Switch, Add control for inline mode Implement devlink show and set of HW inline-mode. The supported modes: none, link, network, transport. We currently support one mode for all vports so set is done on all vports. When eswitch is first initialized the inline-mode is queried from the FW. Signed-off-by: Roi Dayan Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 4 + .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 141 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 + 4 files changed, 148 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 9734ac89826e..d6807c3cc461 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1798,6 +1798,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->total_vports = total_vports; esw->enabled_vports = 0; esw->mode = SRIOV_NONE; + esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; dev->priv.eswitch = esw; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 40482e841413..cf1aa56424bd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -200,6 +200,7 @@ struct mlx5_esw_offload { struct mlx5_flow_group *vport_rx_group; struct mlx5_eswitch_rep *vport_reps; DECLARE_HASHTABLE(encap_tbl, 8); + u8 inline_mode; }; struct mlx5_eswitch { @@ -309,6 +310,9 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode); int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); +int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode); +int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); +int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode); void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, int vport_index, struct mlx5_eswitch_rep *rep); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 731f28625cc3..5c015501b164 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -657,6 +657,14 @@ static int esw_offloads_start(struct mlx5_eswitch *esw) if (err1) esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); } + if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { + if (mlx5_eswitch_inline_mode_get(esw, + num_vfs, + &esw->offloads.inline_mode)) { + esw->offloads.inline_mode = MLX5_INLINE_MODE_L2; + esw_warn(esw->dev, "Inline mode is different between vports\n"); + } + } return err; } @@ -771,6 +779,50 @@ static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) return 0; } +static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode) +{ + switch (mode) { + case DEVLINK_ESWITCH_INLINE_MODE_NONE: + *mlx5_mode = MLX5_INLINE_MODE_NONE; + break; + case DEVLINK_ESWITCH_INLINE_MODE_LINK: + *mlx5_mode = MLX5_INLINE_MODE_L2; + break; + case DEVLINK_ESWITCH_INLINE_MODE_NETWORK: + *mlx5_mode = MLX5_INLINE_MODE_IP; + break; + case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT: + *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) +{ + switch (mlx5_mode) { + case MLX5_INLINE_MODE_NONE: + *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE; + break; + case MLX5_INLINE_MODE_L2: + *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK; + break; + case MLX5_INLINE_MODE_IP: + *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK; + break; + case MLX5_INLINE_MODE_TCP_UDP: + *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT; + break; + default: + return -EINVAL; + } + + return 0; +} + int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) { struct mlx5_core_dev *dev; @@ -815,6 +867,95 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); } +int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_eswitch *esw = dev->priv.eswitch; + int num_vports = esw->enabled_vports; + int err; + int vport; + u8 mlx5_mode; + + if (!MLX5_CAP_GEN(dev, vport_group_manager)) + return -EOPNOTSUPP; + + if (esw->mode == SRIOV_NONE) + return -EOPNOTSUPP; + + if (MLX5_CAP_ETH(dev, wqe_inline_mode) != + MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) + return -EOPNOTSUPP; + + err = esw_inline_mode_from_devlink(mode, &mlx5_mode); + if (err) + goto out; + + for (vport = 1; vport < num_vports; vport++) { + err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); + if (err) { + esw_warn(dev, "Failed to set min inline on vport %d\n", + vport); + goto revert_inline_mode; + } + } + + esw->offloads.inline_mode = mlx5_mode; + return 0; + +revert_inline_mode: + while (--vport > 0) + mlx5_modify_nic_vport_min_inline(dev, + vport, + esw->offloads.inline_mode); +out: + return err; +} + +int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_eswitch *esw = dev->priv.eswitch; + + if (!MLX5_CAP_GEN(dev, vport_group_manager)) + return -EOPNOTSUPP; + + if (esw->mode == SRIOV_NONE) + return -EOPNOTSUPP; + + if (MLX5_CAP_ETH(dev, wqe_inline_mode) != + MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) + return -EOPNOTSUPP; + + return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); +} + +int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode) +{ + struct mlx5_core_dev *dev = esw->dev; + int vport; + u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2; + + if (!MLX5_CAP_GEN(dev, vport_group_manager)) + return -EOPNOTSUPP; + + if (esw->mode == SRIOV_NONE) + return -EOPNOTSUPP; + + if (MLX5_CAP_ETH(dev, wqe_inline_mode) != + MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) + return -EOPNOTSUPP; + + for (vport = 1; vport <= nvfs; vport++) { + mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode); + if (vport > 1 && prev_mlx5_mode != mlx5_mode) + return -EINVAL; + prev_mlx5_mode = mlx5_mode; + } + + *mode = mlx5_mode; + return 0; +} + void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, int vport_index, struct mlx5_eswitch_rep *__rep) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index f28df33e2ef7..b440a16101d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1239,6 +1239,8 @@ static const struct devlink_ops mlx5_devlink_ops = { #ifdef CONFIG_MLX5_CORE_EN .eswitch_mode_set = mlx5_devlink_eswitch_mode_set, .eswitch_mode_get = mlx5_devlink_eswitch_mode_get, + .eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set, + .eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get, #endif }; -- cgit v1.2.3 From de0af0bf64fc11fab253f63424bd3aba12e5f614 Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Tue, 22 Nov 2016 23:10:00 +0200 Subject: net/mlx5e: Enforce min inline mode when offloading flows A flow should be offloaded only if the matches are allowed according to min inline mode. Signed-off-by: Roi Dayan Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 46 +++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 4b991124bc57..4d06fab842d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -279,8 +279,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, return 0; } -static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, - struct tc_cls_flower_offload *f) +static int __parse_cls_flower(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct tc_cls_flower_offload *f, + u8 *min_inline) { void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); @@ -289,6 +291,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec u16 addr_type = 0; u8 ip_proto = 0; + *min_inline = MLX5_INLINE_MODE_L2; + if (f->dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | @@ -362,6 +366,9 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec mask->ip_proto); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, key->ip_proto); + + if (mask->ip_proto) + *min_inline = MLX5_INLINE_MODE_IP; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { @@ -432,6 +439,9 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), &key->dst, sizeof(key->dst)); + + if (mask->src || mask->dst) + *min_inline = MLX5_INLINE_MODE_IP; } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { @@ -457,6 +467,10 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), &key->dst, sizeof(key->dst)); + + if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY || + ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY) + *min_inline = MLX5_INLINE_MODE_IP; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { @@ -497,11 +511,39 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec "Only UDP and TCP transport are supported\n"); return -EINVAL; } + + if (mask->src || mask->dst) + *min_inline = MLX5_INLINE_MODE_TCP_UDP; } return 0; } +static int parse_cls_flower(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct tc_cls_flower_offload *f) +{ + struct mlx5_core_dev *dev = priv->mdev; + struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_eswitch_rep *rep = priv->ppriv; + u8 min_inline; + int err; + + err = __parse_cls_flower(priv, spec, f, &min_inline); + + if (!err && esw->mode == SRIOV_OFFLOADS && + rep->vport != FDB_UPLINK_VPORT) { + if (min_inline > esw->offloads.inline_mode) { + netdev_warn(priv->netdev, + "Flow is not offloaded due to min inline setting, required %d actual %d\n", + min_inline, esw->offloads.inline_mode); + return -EOPNOTSUPP; + } + } + + return err; +} + static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, u32 *action, u32 *flow_tag) { -- cgit v1.2.3