summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-11-01 11:37:17 +0900
committerDavid S. Miller <davem@davemloft.net>2017-11-01 11:37:17 +0900
commit2278f51421f54432dc2e4e25c39d46925f59a359 (patch)
tree00e0dcc03398a8361423210c658551d49c00001e
parente8a8d8672824474720f86b91e30eeec2a258bc42 (diff)
parent1fe850062c9ee15a3bea1ae90aef386a492a1c5e (diff)
downloadlinux-2278f51421f54432dc2e4e25c39d46925f59a359.tar.bz2
Merge tag 'mlx5-updates-2017-10-31' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says: ==================== mlx5-updates-2017-10-31 mlx5e stats groups This series from Kamal introduces an important refactoring for mlx5e stats handling, which groups the stats into generic groups structure which allows to control the behavior and stats reporting per group in a modular way. In the first patch Kamal introduces a new data type "mlx5e_stats_grp" This change defines a new API to create a group of stats and simplifies the way of handling them. This struct will define the following behavior per group: - get_num_stats() - return the number of counters in the group. - fill_strings() - fill counters strings within the group. - fill_stats() - fill counters values within the group. All other patches will be straight forward refactoring per stats group, where Kamal will move each mlx5e stats group to use the new API. The idea is to have better flexibility and modularity to add new counters, all ethtool logic was rendered generic and loops through the generic stats groups and calls the groups callbacks to figure out how and what to report back to user space. Introducing new file en_stats.c to hold all the new stat groups logic and implementation. Static structures (counters descriptors) moved from en_stats.h to en_stats.c which reduces the mlx5_core binary footprint, originally reported and addressed by Stephen Hemminger: ("mlx5: fix space waste from ethtool descriptions") which was waived due to this re-design. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c256
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c895
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h287
4 files changed, 914 insertions, 526 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 100fe4ecad9b..f391c7cb7656 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -13,7 +13,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
- en_tx.o en_rx.o en_rx_am.o en_txrx.o vxlan.o \
+ en_tx.o en_rx.o en_rx_am.o en_txrx.o en_stats.o vxlan.o \
en_arfs.o en_fs_ethtool.o en_selftest.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 81a112e40fe3..b34aa8efb036 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -31,7 +31,6 @@
*/
#include "en.h"
-#include "en_accel/ipsec.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
@@ -136,59 +135,15 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
}
-static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- u8 pfc_en_tx;
- u8 pfc_en_rx;
- int err;
-
- if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
- return 0;
-
- err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
-
- return err ? 0 : pfc_en_tx | pfc_en_rx;
-}
-
-static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 rx_pause;
- u32 tx_pause;
- int err;
-
- if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
- return false;
-
- err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
-
- return err ? false : rx_pause | tx_pause;
-}
-
-#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
-#define MLX5E_NUM_RQ_STATS(priv) (NUM_RQ_STATS * (priv)->channels.num)
-#define MLX5E_NUM_SQ_STATS(priv) \
- (NUM_SQ_STATS * (priv)->channels.num * (priv)->channels.params.num_tc)
-#define MLX5E_NUM_PFC_COUNTERS(priv) \
- ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
- NUM_PPORT_PER_PRIO_PFC_COUNTERS)
-
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{
+ int i, num_stats = 0;
+
switch (sset) {
case ETH_SS_STATS:
- return NUM_SW_COUNTERS +
- MLX5E_NUM_Q_CNTRS(priv) +
- NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) +
- NUM_PCIE_COUNTERS(priv) +
- MLX5E_NUM_RQ_STATS(priv) +
- MLX5E_NUM_SQ_STATS(priv) +
- MLX5E_NUM_PFC_COUNTERS(priv) +
- ARRAY_SIZE(mlx5e_pme_status_desc) +
- ARRAY_SIZE(mlx5e_pme_error_desc) +
- mlx5e_ipsec_get_count(priv);
-
+ for (i = 0; i < mlx5e_num_stats_grps; i++)
+ num_stats += mlx5e_stats_grps[i].get_num_stats(priv);
+ return num_stats;
case ETH_SS_PRIV_FLAGS:
return ARRAY_SIZE(mlx5e_priv_flags);
case ETH_SS_TEST:
@@ -208,104 +163,10 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data)
{
- int i, j, tc, prio, idx = 0;
- unsigned long pfc_combined;
-
- /* SW counters */
- for (i = 0; i < NUM_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
-
- /* Q counters */
- for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
-
- /* VPORT counters */
- for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_stats_desc[i].format);
-
- /* PPORT counters */
- for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_802_3_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_2863_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_2819_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_phy_statistical_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_eth_ext_stats_desc[i].format);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc[i].format);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc64[i].format);
-
- for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stall_stats_desc[i].format);
-
- for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_traffic_stats_desc[i].format, prio);
- }
-
- pfc_combined = mlx5e_query_pfc_combined(priv);
- for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- char pfc_string[ETH_GSTRING_LEN];
-
- snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_pfc_stats_desc[i].format, pfc_string);
- }
- }
-
- if (mlx5e_query_global_pause_combined(priv)) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_pfc_stats_desc[i].format, "global");
- }
- }
-
- /* port module event counters */
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
-
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
-
- /* IPSec counters */
- idx += mlx5e_ipsec_get_strings(priv, data + idx * ETH_GSTRING_LEN);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- return;
-
- /* per channel counters */
- for (i = 0; i < priv->channels.num; i++)
- for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- rq_stats_desc[j].format, i);
+ int i, idx = 0;
- for (tc = 0; tc < priv->channels.params.num_tc; tc++)
- for (i = 0; i < priv->channels.num; i++)
- for (j = 0; j < NUM_SQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- sq_stats_desc[j].format,
- priv->channel_tc2txq[i][tc]);
+ for (i = 0; i < mlx5e_num_stats_grps; i++)
+ idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx);
}
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
@@ -340,10 +201,7 @@ static void mlx5e_get_strings(struct net_device *dev, u32 stringset, u8 *data)
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data)
{
- struct mlx5e_channels *channels;
- struct mlx5_priv *mlx5_priv;
- int i, j, tc, prio, idx = 0;
- unsigned long pfc_combined;
+ int i, idx = 0;
if (!data)
return;
@@ -351,102 +209,10 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_update_stats(priv, true);
- channels = &priv->channels;
mutex_unlock(&priv->state_lock);
- for (i = 0; i < NUM_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
- sw_stats_desc, i);
-
- for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
- data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
- q_stats_desc, i);
-
- for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
- vport_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
- pport_802_3_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
- pport_2863_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
- pport_2819_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
- pport_phy_statistical_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
- pport_eth_ext_stats_desc, i);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc, i);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc64, i);
-
- for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stall_stats_desc, i);
-
- for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
- pport_per_prio_traffic_stats_desc, i);
- }
-
- pfc_combined = mlx5e_query_pfc_combined(priv);
- for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
- pport_per_prio_pfc_stats_desc, i);
- }
- }
-
- if (mlx5e_query_global_pause_combined(priv)) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
- pport_per_prio_pfc_stats_desc, i);
- }
- }
-
- /* port module event counters */
- mlx5_priv = &priv->mdev->priv;
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
- mlx5e_pme_status_desc, i);
-
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
- mlx5e_pme_error_desc, i);
-
- /* IPSec counters */
- idx += mlx5e_ipsec_get_stats(priv, data + idx);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- return;
-
- /* per channel counters */
- for (i = 0; i < channels->num; i++)
- for (j = 0; j < NUM_RQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
- rq_stats_desc, j);
-
- for (tc = 0; tc < priv->channels.params.num_tc; tc++)
- for (i = 0; i < channels->num; i++)
- for (j = 0; j < NUM_SQ_STATS; j++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
- sq_stats_desc, j);
+ for (i = 0; i < mlx5e_num_stats_grps; i++)
+ idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx);
}
static void mlx5e_get_ethtool_stats(struct net_device *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
new file mode 100644
index 000000000000..8bc30484ecc1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -0,0 +1,895 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+#include "en_accel/ipsec.h"
+
+static const struct counter_desc sw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
+};
+
+#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
+
+static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_SW_COUNTERS;
+}
+
+static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_SW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
+ return idx;
+}
+
+static const struct counter_desc q_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
+};
+
+#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
+
+static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
+{
+ return priv->q_counter ? NUM_Q_COUNTERS : 0;
+}
+
+static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
+ data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, q_stats_desc, i);
+ return idx;
+}
+
+#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
+static const struct counter_desc vport_stats_desc[] = {
+ { "rx_vport_unicast_packets",
+ VPORT_COUNTER_OFF(received_eth_unicast.packets) },
+ { "rx_vport_unicast_bytes",
+ VPORT_COUNTER_OFF(received_eth_unicast.octets) },
+ { "tx_vport_unicast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
+ { "tx_vport_unicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
+ { "rx_vport_multicast_packets",
+ VPORT_COUNTER_OFF(received_eth_multicast.packets) },
+ { "rx_vport_multicast_bytes",
+ VPORT_COUNTER_OFF(received_eth_multicast.octets) },
+ { "tx_vport_multicast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
+ { "tx_vport_multicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
+ { "rx_vport_broadcast_packets",
+ VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
+ { "rx_vport_broadcast_bytes",
+ VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
+ { "tx_vport_broadcast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
+ { "tx_vport_broadcast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
+ { "rx_vport_rdma_unicast_packets",
+ VPORT_COUNTER_OFF(received_ib_unicast.packets) },
+ { "rx_vport_rdma_unicast_bytes",
+ VPORT_COUNTER_OFF(received_ib_unicast.octets) },
+ { "tx_vport_rdma_unicast_packets",
+ VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
+ { "tx_vport_rdma_unicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
+ { "rx_vport_rdma_multicast_packets",
+ VPORT_COUNTER_OFF(received_ib_multicast.packets) },
+ { "rx_vport_rdma_multicast_bytes",
+ VPORT_COUNTER_OFF(received_ib_multicast.octets) },
+ { "tx_vport_rdma_multicast_packets",
+ VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
+ { "tx_vport_rdma_multicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
+};
+
+#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
+
+static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_VPORT_COUNTERS;
+}
+
+static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+ vport_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_802_3_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_802_3_stats_desc[] = {
+ { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
+ { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
+ { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
+ { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
+ { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
+ { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
+ { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
+ { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
+ { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
+ { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
+ { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
+ { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
+ { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
+ { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
+ { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
+ { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
+ { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
+ { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
+};
+
+#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
+
+static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_802_3_COUNTERS;
+}
+
+static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
+ pport_802_3_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_2863_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_2863_stats_desc[] = {
+ { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
+ { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
+ { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
+};
+
+#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
+
+static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_2863_COUNTERS;
+}
+
+static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
+ pport_2863_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_2819_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_2819_stats_desc[] = {
+ { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
+ { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
+ { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
+ { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
+ { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
+ { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
+ { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
+ { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
+ { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
+ { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
+ { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
+ { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
+ { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
+};
+
+#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
+
+static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_2819_COUNTERS;
+}
+
+static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
+ pport_2819_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_PHY_STATISTICAL_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.phys_layer_statistical_cntrs.c##_high)
+static const struct counter_desc pport_phy_statistical_stats_desc[] = {
+ { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
+ { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
+};
+
+#define NUM_PPORT_PHY_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
+
+static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
+{
+ return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ?
+ NUM_PPORT_PHY_COUNTERS : 0;
+}
+
+static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+ for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_phy_statistical_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+ for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_ETH_EXT_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_eth_ext_stats_desc[] = {
+ { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
+};
+
+#define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
+
+static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
+{
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
+ return NUM_PPORT_ETH_EXT_COUNTERS;
+
+ return 0;
+}
+
+static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
+ for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_eth_ext_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
+ for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
+ pport_eth_ext_stats_desc, i);
+ return idx;
+}
+
+#define PCIE_PERF_OFF(c) \
+ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
+static const struct counter_desc pcie_perf_stats_desc[] = {
+ { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
+ { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
+};
+
+#define PCIE_PERF_OFF64(c) \
+ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pcie_perf_stats_desc64[] = {
+ { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
+};
+
+static const struct counter_desc pcie_perf_stall_stats_desc[] = {
+ { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
+ { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
+ { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
+ { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
+};
+
+#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
+#define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
+#define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
+
+static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
+{
+ int num_stats = 0;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
+ num_stats += NUM_PCIE_PERF_COUNTERS;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
+ num_stats += NUM_PCIE_PERF_COUNTERS64;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
+ num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
+
+ return num_stats;
+}
+
+static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_perf_stats_desc[i].format);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_perf_stats_desc64[i].format);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
+ for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_perf_stall_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc, i);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc64, i);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
+ for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stall_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_PER_PRIO_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_per_prio_grp_data_layout.c##_high)
+static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
+ { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
+ { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+ { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
+ { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
+};
+
+#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
+
+static int mlx5e_grp_per_prio_traffic_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
+}
+
+static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
+ u8 *data,
+ int idx)
+{
+ int i, prio;
+
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_traffic_stats_desc[i].format, prio);
+ }
+
+ return idx;
+}
+
+static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
+ u64 *data,
+ int idx)
+{
+ int i, prio;
+
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+ pport_per_prio_traffic_stats_desc, i);
+ }
+
+ return idx;
+}
+
+static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
+ /* %s is "global" or "prio{i}" */
+ { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+ { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+ { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+ { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+ { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+};
+
+#define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
+
+static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 pfc_en_tx;
+ u8 pfc_en_rx;
+ int err;
+
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return 0;
+
+ err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
+
+ return err ? 0 : pfc_en_tx | pfc_en_rx;
+}
+
+static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 rx_pause;
+ u32 tx_pause;
+ int err;
+
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return false;
+
+ err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
+
+ return err ? false : rx_pause | tx_pause;
+}
+
+static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
+{
+ return (mlx5e_query_global_pause_combined(priv) +
+ hweight8(mlx5e_query_pfc_combined(priv))) *
+ NUM_PPORT_PER_PRIO_PFC_COUNTERS;
+}
+
+static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
+ u8 *data,
+ int idx)
+{
+ unsigned long pfc_combined;
+ int i, prio;
+
+ pfc_combined = mlx5e_query_pfc_combined(priv);
+ for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ char pfc_string[ETH_GSTRING_LEN];
+
+ snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, pfc_string);
+ }
+ }
+
+ if (mlx5e_query_global_pause_combined(priv)) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, "global");
+ }
+ }
+
+ return idx;
+}
+
+static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
+ u64 *data,
+ int idx)
+{
+ unsigned long pfc_combined;
+ int i, prio;
+
+ pfc_combined = mlx5e_query_pfc_combined(priv);
+ for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+ pport_per_prio_pfc_stats_desc, i);
+ }
+ }
+
+ if (mlx5e_query_global_pause_combined(priv)) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
+ pport_per_prio_pfc_stats_desc, i);
+ }
+ }
+
+ return idx;
+}
+
+static const struct counter_desc mlx5e_pme_status_desc[] = {
+ { "module_unplug", 8 },
+};
+
+static const struct counter_desc mlx5e_pme_error_desc[] = {
+ { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
+ { "module_high_temp", 48 }, /* high temperature */
+ { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
+};
+
+#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
+#define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
+
+static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
+}
+
+static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PME_STATUS_STATS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
+
+ for (i = 0; i < NUM_PME_ERR_STATS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
+
+ return idx;
+}
+
+static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ struct mlx5_priv *mlx5_priv = &priv->mdev->priv;
+ int i;
+
+ for (i = 0; i < NUM_PME_STATUS_STATS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
+ mlx5e_pme_status_desc, i);
+
+ for (i = 0; i < NUM_PME_ERR_STATS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
+ mlx5e_pme_error_desc, i);
+
+ return idx;
+}
+
+static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
+{
+ return mlx5e_ipsec_get_count(priv);
+}
+
+static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ return idx + mlx5e_ipsec_get_strings(priv,
+ data + idx * ETH_GSTRING_LEN);
+}
+
+static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ return idx + mlx5e_ipsec_get_stats(priv, data + idx);
+}
+
+static const struct counter_desc rq_stats_desc[] = {
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
+};
+
+static const struct counter_desc sq_stats_desc[] = {
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
+};
+
+#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
+#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
+
+static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
+{
+ return (NUM_RQ_STATS * priv->channels.num) +
+ (NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc);
+}
+
+static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i, j, tc;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return idx;
+
+ for (i = 0; i < priv->channels.num; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
+
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++)
+ for (i = 0; i < priv->channels.num; i++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ sq_stats_desc[j].format,
+ priv->channel_tc2txq[i][tc]);
+
+ return idx;
+}
+
+static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ struct mlx5e_channels *channels = &priv->channels;
+ int i, j, tc;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return idx;
+
+ for (i = 0; i < channels->num; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
+ rq_stats_desc, j);
+
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++)
+ for (i = 0; i < channels->num; i++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
+ sq_stats_desc, j);
+
+ return idx;
+}
+
+const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
+ {
+ .get_num_stats = mlx5e_grp_sw_get_num_stats,
+ .fill_strings = mlx5e_grp_sw_fill_strings,
+ .fill_stats = mlx5e_grp_sw_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_q_get_num_stats,
+ .fill_strings = mlx5e_grp_q_fill_strings,
+ .fill_stats = mlx5e_grp_q_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_vport_get_num_stats,
+ .fill_strings = mlx5e_grp_vport_fill_strings,
+ .fill_stats = mlx5e_grp_vport_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_802_3_get_num_stats,
+ .fill_strings = mlx5e_grp_802_3_fill_strings,
+ .fill_stats = mlx5e_grp_802_3_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_2863_get_num_stats,
+ .fill_strings = mlx5e_grp_2863_fill_strings,
+ .fill_stats = mlx5e_grp_2863_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_2819_get_num_stats,
+ .fill_strings = mlx5e_grp_2819_fill_strings,
+ .fill_stats = mlx5e_grp_2819_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_phy_get_num_stats,
+ .fill_strings = mlx5e_grp_phy_fill_strings,
+ .fill_stats = mlx5e_grp_phy_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
+ .fill_strings = mlx5e_grp_eth_ext_fill_strings,
+ .fill_stats = mlx5e_grp_eth_ext_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_pcie_get_num_stats,
+ .fill_strings = mlx5e_grp_pcie_fill_strings,
+ .fill_stats = mlx5e_grp_pcie_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_per_prio_traffic_get_num_stats,
+ .fill_strings = mlx5e_grp_per_prio_traffic_fill_strings,
+ .fill_stats = mlx5e_grp_per_prio_traffic_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_per_prio_pfc_get_num_stats,
+ .fill_strings = mlx5e_grp_per_prio_pfc_fill_strings,
+ .fill_stats = mlx5e_grp_per_prio_pfc_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_pme_get_num_stats,
+ .fill_strings = mlx5e_grp_pme_fill_strings,
+ .fill_stats = mlx5e_grp_pme_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
+ .fill_strings = mlx5e_grp_ipsec_fill_strings,
+ .fill_stats = mlx5e_grp_ipsec_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_channels_get_num_stats,
+ .fill_strings = mlx5e_grp_channels_fill_strings,
+ .fill_stats = mlx5e_grp_channels_fill_stats,
+ }
+};
+
+const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index f8637213afc0..d094663edd9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -91,54 +91,10 @@ struct mlx5e_sw_stats {
u64 link_down_events_phy;
};
-static const struct counter_desc sw_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
-};
-
struct mlx5e_qcounter_stats {
u32 rx_out_of_buffer;
};
-static const struct counter_desc q_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
-};
-
-#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
vstats->query_vport_out, c)
@@ -146,83 +102,22 @@ struct mlx5e_vport_stats {
__be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
};
-static const struct counter_desc vport_stats_desc[] = {
- { "rx_vport_unicast_packets",
- VPORT_COUNTER_OFF(received_eth_unicast.packets) },
- { "rx_vport_unicast_bytes",
- VPORT_COUNTER_OFF(received_eth_unicast.octets) },
- { "tx_vport_unicast_packets",
- VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
- { "tx_vport_unicast_bytes",
- VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
- { "rx_vport_multicast_packets",
- VPORT_COUNTER_OFF(received_eth_multicast.packets) },
- { "rx_vport_multicast_bytes",
- VPORT_COUNTER_OFF(received_eth_multicast.octets) },
- { "tx_vport_multicast_packets",
- VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
- { "tx_vport_multicast_bytes",
- VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
- { "rx_vport_broadcast_packets",
- VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
- { "rx_vport_broadcast_bytes",
- VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
- { "tx_vport_broadcast_packets",
- VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
- { "tx_vport_broadcast_bytes",
- VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
- { "rx_vport_rdma_unicast_packets",
- VPORT_COUNTER_OFF(received_ib_unicast.packets) },
- { "rx_vport_rdma_unicast_bytes",
- VPORT_COUNTER_OFF(received_ib_unicast.octets) },
- { "tx_vport_rdma_unicast_packets",
- VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
- { "tx_vport_rdma_unicast_bytes",
- VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
- { "rx_vport_rdma_multicast_packets",
- VPORT_COUNTER_OFF(received_ib_multicast.packets) },
- { "rx_vport_rdma_multicast_bytes",
- VPORT_COUNTER_OFF(received_ib_multicast.octets) },
- { "tx_vport_rdma_multicast_packets",
- VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
- { "tx_vport_rdma_multicast_bytes",
- VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
-};
-
-#define PPORT_802_3_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
#define PPORT_802_3_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
-#define PPORT_2863_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
#define PPORT_2863_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
-#define PPORT_2819_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
#define PPORT_2819_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
-#define PPORT_PHY_STATISTICAL_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.phys_layer_statistical_cntrs.c##_high)
#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
counter_set.phys_layer_statistical_cntrs.c##_high)
-#define PPORT_PER_PRIO_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_per_prio_grp_data_layout.c##_high)
#define PPORT_PER_PRIO_GET(pstats, prio, c) \
MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
counter_set.eth_per_prio_grp_data_layout.c##_high)
#define NUM_PPORT_PRIO 8
-#define PPORT_ETH_EXT_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
#define PPORT_ETH_EXT_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \
counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
@@ -237,82 +132,10 @@ struct mlx5e_pport_stats {
__be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
};
-static const struct counter_desc pport_802_3_stats_desc[] = {
- { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
- { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
- { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
- { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
- { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
- { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
- { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
- { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
- { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
- { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
- { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
- { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
- { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
- { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
- { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
- { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
- { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
- { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
-};
-
-static const struct counter_desc pport_2863_stats_desc[] = {
- { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
- { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
- { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
-};
-
-static const struct counter_desc pport_2819_stats_desc[] = {
- { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
- { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
- { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
- { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
- { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
- { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
- { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
- { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
- { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
- { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
- { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
- { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
- { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
-};
-
-static const struct counter_desc pport_phy_statistical_stats_desc[] = {
- { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
- { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
-};
-
-static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
- { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
- { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
- { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
- { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
-};
-
-static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
- /* %s is "global" or "prio{i}" */
- { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
- { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
- { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
- { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
- { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
-};
-
-static const struct counter_desc pport_eth_ext_stats_desc[] = {
- { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
-};
-
-#define PCIE_PERF_OFF(c) \
- MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
#define PCIE_PERF_GET(pcie_stats, c) \
MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
counter_set.pcie_perf_cntrs_grp_data_layout.c)
-#define PCIE_PERF_OFF64(c) \
- MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
#define PCIE_PERF_GET64(pcie_stats, c) \
MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
@@ -321,22 +144,6 @@ struct mlx5e_pcie_stats {
__be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
};
-static const struct counter_desc pcie_perf_stats_desc[] = {
- { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
- { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
-};
-
-static const struct counter_desc pcie_perf_stats_desc64[] = {
- { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
-};
-
-static const struct counter_desc pcie_perf_stall_stats_desc[] = {
- { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
- { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
- { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
- { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
-};
-
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
@@ -362,31 +169,6 @@ struct mlx5e_rq_stats {
u64 cache_waive;
};
-static const struct counter_desc rq_stats_desc[] = {
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
-};
-
struct mlx5e_sq_stats {
/* commonly accessed in data path */
u64 packets;
@@ -406,61 +188,6 @@ struct mlx5e_sq_stats {
u64 dropped;
};
-static const struct counter_desc sq_stats_desc[] = {
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
-};
-
-#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
-#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
-#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
-#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
-#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
-#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
-#define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \
- (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \
- MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
-#define NUM_PCIE_PERF_COUNTERS(priv) \
- (ARRAY_SIZE(pcie_perf_stats_desc) * \
- MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
-#define NUM_PCIE_PERF_COUNTERS64(priv) \
- (ARRAY_SIZE(pcie_perf_stats_desc64) * \
- MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
-#define NUM_PCIE_PERF_STALL_COUNTERS(priv) \
- (ARRAY_SIZE(pcie_perf_stall_stats_desc) * \
- MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
-#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
- ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
-#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
- ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
-#define NUM_PPORT_ETH_EXT_COUNTERS(priv) \
- (ARRAY_SIZE(pport_eth_ext_stats_desc) * \
- MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
-#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \
- NUM_PPORT_2863_COUNTERS + \
- NUM_PPORT_2819_COUNTERS + \
- NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \
- NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
- NUM_PPORT_PRIO + \
- NUM_PPORT_ETH_EXT_COUNTERS(priv))
-#define NUM_PCIE_COUNTERS(priv) (NUM_PCIE_PERF_COUNTERS(priv) + \
- NUM_PCIE_PERF_COUNTERS64(priv) +\
- NUM_PCIE_PERF_STALL_COUNTERS(priv))
-#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
-#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
-
struct mlx5e_stats {
struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt;
@@ -470,14 +197,14 @@ struct mlx5e_stats {
struct mlx5e_pcie_stats pcie;
};
-static const struct counter_desc mlx5e_pme_status_desc[] = {
- { "module_unplug", 8 },
+struct mlx5e_priv;
+struct mlx5e_stats_grp {
+ int (*get_num_stats)(struct mlx5e_priv *priv);
+ int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
+ int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
};
-static const struct counter_desc mlx5e_pme_error_desc[] = {
- { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
- { "module_high_temp", 48 }, /* high temperature */
- { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
-};
+extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
+extern const int mlx5e_num_stats_grps;
#endif /* __MLX5_EN_STATS_H__ */