summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c101
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_mac.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c9
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c100
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h314
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c255
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h18
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c96
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c42
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c188
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c16
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c657
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.h48
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c150
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.h1
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c367
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.h4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h21
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c1
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c6
65 files changed, 2419 insertions, 306 deletions
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index c38a4b8a14cb..611875ef2cd1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -786,7 +786,7 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
int err = 0;
if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) {
- err = EBADRQC;
+ err = -EBADRQC;
goto err_exit;
}
for (cfg->mc_list_count = 0U; cfg->mc_list_count < count; ++cfg->mc_list_count) {
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
index 43d11c38b38a..4cddd628d41b 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -1167,7 +1167,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct)
oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
CN23XX_PCIE_SRIOV_FDL_MASK);
} else {
- ret = EINVAL;
+ ret = -EINVAL;
/* Under some virtual environments, extended PCI regs are
* inaccessible, in which case the above read will have failed.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index adbc0d088070..9cb8b229c1b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1438,6 +1438,8 @@ enum {
NAT_MODE_ALL /* NAT on entire 4-tuple */
};
+#define CXGB4_FILTER_TYPE_MAX 2
+
/* Host shadow copy of ingress filter entry. This is in host native format
* and doesn't match the ordering or bit order, etc. of the hardware of the
* firmware command. The use of bit-field structure elements is purely to
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 12ef9ddd1e54..9f3173f86eed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -31,7 +31,7 @@ enum cxgb4_ethtool_tests {
};
static const char cxgb4_selftest_strings[CXGB4_ETHTOOL_MAX_TEST][ETH_GSTRING_LEN] = {
- "Loop back test",
+ "Loop back test (offline)",
};
static const char * const flash_region_strings[] = {
@@ -2095,12 +2095,13 @@ static void cxgb4_self_test(struct net_device *netdev,
memset(data, 0, sizeof(u64) * CXGB4_ETHTOOL_MAX_TEST);
- if (!(adap->flags & CXGB4_FW_OK)) {
+ if (!(adap->flags & CXGB4_FULL_INIT_DONE) ||
+ !(adap->flags & CXGB4_FW_OK)) {
eth_test->flags |= ETH_TEST_FL_FAILED;
return;
}
- if (eth_test->flags == ETH_TEST_FL_OFFLINE)
+ if (eth_test->flags & ETH_TEST_FL_OFFLINE)
cxgb4_lb_test(netdev, &data[CXGB4_ETHTOOL_LB_TEST]);
if (data[CXGB4_ETHTOOL_LB_TEST])
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 6251444ffa58..f642c1b475c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -80,6 +80,19 @@ static void cxgb4_process_flow_match(struct net_device *dev,
struct flow_rule *rule,
struct ch_filter_specification *fs)
{
+ u16 addr_type = 0;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
u16 ethtype_key, ethtype_mask;
@@ -102,7 +115,7 @@ static void cxgb4_process_flow_match(struct net_device *dev,
fs->mask.proto = match.mask->ip_proto;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs match;
flow_rule_match_ipv4_addrs(rule, &match);
@@ -117,7 +130,7 @@ static void cxgb4_process_flow_match(struct net_device *dev,
memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
struct flow_match_ipv6_addrs match;
flow_rule_match_ipv6_addrs(rule, &match);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index e377e50c2492..2e309f6673f7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -231,8 +231,26 @@ static void cxgb4_matchall_mirror_free(struct net_device *dev)
tc_port_matchall->ingress.viid_mirror = 0;
}
-static int cxgb4_matchall_alloc_filter(struct net_device *dev,
- struct tc_cls_matchall_offload *cls)
+static int cxgb4_matchall_del_filter(struct net_device *dev, u8 filter_type)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ int ret;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+ ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid[filter_type],
+ &tc_port_matchall->ingress.fs[filter_type]);
+ if (ret)
+ return ret;
+
+ tc_port_matchall->ingress.tid[filter_type] = 0;
+ return 0;
+}
+
+static int cxgb4_matchall_add_filter(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls,
+ u8 filter_type)
{
struct netlink_ext_ack *extack = cls->common.extack;
struct cxgb4_tc_port_matchall *tc_port_matchall;
@@ -244,28 +262,24 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
/* Get a free filter entry TID, where we can insert this new
* rule. Only insert rule if its prio doesn't conflict with
* existing rules.
- *
- * 1 slot is enough to create a wildcard matchall VIID rule.
*/
- fidx = cxgb4_get_free_ftid(dev, PF_INET, false, cls->common.prio);
+ fidx = cxgb4_get_free_ftid(dev, filter_type ? PF_INET6 : PF_INET,
+ false, cls->common.prio);
if (fidx < 0) {
NL_SET_ERR_MSG_MOD(extack,
"No free LETCAM index available");
return -ENOMEM;
}
- ret = cxgb4_matchall_mirror_alloc(dev, cls);
- if (ret)
- return ret;
-
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
- fs = &tc_port_matchall->ingress.fs;
+ fs = &tc_port_matchall->ingress.fs[filter_type];
memset(fs, 0, sizeof(*fs));
if (fidx < adap->tids.nhpftids)
fs->prio = 1;
fs->tc_prio = cls->common.prio;
fs->tc_cookie = cls->cookie;
+ fs->type = filter_type;
fs->hitcnts = 1;
fs->val.pfvf_vld = 1;
@@ -276,13 +290,39 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
ret = cxgb4_set_filter(dev, fidx, fs);
if (ret)
- goto out_free;
+ return ret;
+
+ tc_port_matchall->ingress.tid[filter_type] = fidx;
+ return 0;
+}
+
+static int cxgb4_matchall_alloc_filter(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ int ret, i;
+
+ tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+
+ ret = cxgb4_matchall_mirror_alloc(dev, cls);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
+ ret = cxgb4_matchall_add_filter(dev, cls, i);
+ if (ret)
+ goto out_free;
+ }
- tc_port_matchall->ingress.tid = fidx;
tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
return 0;
out_free:
+ while (i-- > 0)
+ cxgb4_matchall_del_filter(dev, i);
+
cxgb4_matchall_mirror_free(dev);
return ret;
}
@@ -293,20 +333,21 @@ static int cxgb4_matchall_free_filter(struct net_device *dev)
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
int ret;
+ u8 i;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
- ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid,
- &tc_port_matchall->ingress.fs);
- if (ret)
- return ret;
+ for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
+ ret = cxgb4_matchall_del_filter(dev, i);
+ if (ret)
+ return ret;
+ }
cxgb4_matchall_mirror_free(dev);
tc_port_matchall->ingress.packets = 0;
tc_port_matchall->ingress.bytes = 0;
tc_port_matchall->ingress.last_used = 0;
- tc_port_matchall->ingress.tid = 0;
tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
return 0;
}
@@ -362,8 +403,12 @@ int cxgb4_tc_matchall_destroy(struct net_device *dev,
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
if (ingress) {
+ /* All the filter types of this matchall rule save the
+ * same cookie. So, checking for the first one is
+ * enough.
+ */
if (cls_matchall->cookie !=
- tc_port_matchall->ingress.fs.tc_cookie)
+ tc_port_matchall->ingress.fs[0].tc_cookie)
return -ENOENT;
return cxgb4_matchall_free_filter(dev);
@@ -379,21 +424,29 @@ int cxgb4_tc_matchall_destroy(struct net_device *dev,
int cxgb4_tc_matchall_stats(struct net_device *dev,
struct tc_cls_matchall_offload *cls_matchall)
{
+ u64 tmp_packets, tmp_bytes, packets = 0, bytes = 0;
struct cxgb4_tc_port_matchall *tc_port_matchall;
+ struct cxgb4_matchall_ingress_entry *ingress;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
- u64 packets, bytes;
int ret;
+ u8 i;
tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
return -ENOENT;
- ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid,
- &packets, &bytes,
- tc_port_matchall->ingress.fs.hash);
- if (ret)
- return ret;
+ ingress = &tc_port_matchall->ingress;
+ for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
+ ret = cxgb4_get_filter_counters(dev, ingress->tid[i],
+ &tmp_packets, &tmp_bytes,
+ ingress->fs[i].hash);
+ if (ret)
+ return ret;
+
+ packets += tmp_packets;
+ bytes += tmp_bytes;
+ }
if (tc_port_matchall->ingress.packets != packets) {
flow_stats_update(&cls_matchall->stats,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h
index e264b6e606c4..fe7ec423a4c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h
@@ -19,8 +19,9 @@ struct cxgb4_matchall_egress_entry {
struct cxgb4_matchall_ingress_entry {
enum cxgb4_matchall_state state; /* Current MATCHALL offload state */
- u32 tid; /* Index to hardware filter entry */
- struct ch_filter_specification fs; /* Filter entry */
+ u32 tid[CXGB4_FILTER_TYPE_MAX]; /* Index to hardware filter entries */
+ /* Filter entries */
+ struct ch_filter_specification fs[CXGB4_FILTER_TYPE_MAX];
u16 viid_mirror; /* Identifier for allocated Mirror VI */
u64 bytes; /* # of bytes hitting the filter */
u64 packets; /* # of packets hitting the filter */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 9b4028c0e34c..50f52fe2012f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -2330,7 +2330,7 @@ static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
/* Update NAPI statistics */
ch->stats.cdan++;
- napi_schedule_irqoff(&ch->napi);
+ napi_schedule(&ch->napi);
}
/* Allocate and configure a DPCON object */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index f50353cbb4db..f78ca7b343d2 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -270,7 +270,7 @@ static irqreturn_t enetc_msix(int irq, void *data)
for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
- napi_schedule_irqoff(&v->napi);
+ napi_schedule(&v->napi);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index a9939550fa8f..1c4a535890da 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -1130,7 +1130,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
!is_zero_ether_addr(match.mask->src)) {
NL_SET_ERR_MSG_MOD(extack,
"Cannot match on both source and destination MAC");
- err = EINVAL;
+ err = -EINVAL;
goto free_filter;
}
@@ -1138,7 +1138,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
if (!is_broadcast_ether_addr(match.mask->dst)) {
NL_SET_ERR_MSG_MOD(extack,
"Masked matching on destination MAC not supported");
- err = EINVAL;
+ err = -EINVAL;
goto free_filter;
}
ether_addr_copy(filter->sid.dst_mac, match.key->dst);
@@ -1149,7 +1149,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
if (!is_broadcast_ether_addr(match.mask->src)) {
NL_SET_ERR_MSG_MOD(extack,
"Masked matching on source MAC not supported");
- err = EINVAL;
+ err = -EINVAL;
goto free_filter;
}
ether_addr_copy(filter->sid.src_mac, match.key->src);
@@ -1157,7 +1157,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
}
} else {
NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS");
- err = EINVAL;
+ err = -EINVAL;
goto free_filter;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index f151d6e111dd..ef67e8599b39 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -1398,8 +1398,7 @@ static void enable_time_stamp(struct fman *fman)
{
struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs;
u16 fm_clk_freq = fman->state->fm_clk_freq;
- u32 tmp, intgr, ts_freq;
- u64 frac;
+ u32 tmp, intgr, ts_freq, frac;
ts_freq = (u32)(1 << fman->state->count1_micro_bit);
/* configure timestamp so that bit 8 will count 1 microsecond
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 004c266802a8..bce3c9398887 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1200,7 +1200,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
list_for_each(pos,
&dtsec->multicast_addr_hash->lsts[bucket]) {
hash_entry = ETH_HASH_ENTRY_OBJ(pos);
- if (hash_entry->addr == addr) {
+ if (hash_entry && hash_entry->addr == addr) {
list_del_init(&hash_entry->node);
kfree(hash_entry);
break;
@@ -1213,7 +1213,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
list_for_each(pos,
&dtsec->unicast_addr_hash->lsts[bucket]) {
hash_entry = ETH_HASH_ENTRY_OBJ(pos);
- if (hash_entry->addr == addr) {
+ if (hash_entry && hash_entry->addr == addr) {
list_del_init(&hash_entry->node);
kfree(hash_entry);
break;
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
index dd6d0526f6c1..19f327efdaff 100644
--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -252,7 +252,7 @@ static inline struct eth_hash_t *alloc_hash_table(u16 size)
struct eth_hash_t *hash;
/* Allocate address hash table */
- hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL);
+ hash = kmalloc(sizeof(*hash), GFP_KERNEL);
if (!hash)
return NULL;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index a5500ede4070..645764abdaae 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -852,7 +852,6 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
tmp = ioread32be(&regs->command_config);
tmp &= ~CMD_CFG_PFC_MODE;
- priority = 0;
iowrite32be(tmp, &regs->command_config);
@@ -982,7 +981,7 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) {
hash_entry = ETH_HASH_ENTRY_OBJ(pos);
- if (hash_entry->addr == addr) {
+ if (hash_entry && hash_entry->addr == addr) {
list_del_init(&hash_entry->node);
kfree(hash_entry);
break;
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 87b26f063cc8..c27df153f895 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1767,6 +1767,7 @@ static int fman_port_probe(struct platform_device *of_dev)
struct fman_port *port;
struct fman *fman;
struct device_node *fm_node, *port_node;
+ struct platform_device *fm_pdev;
struct resource res;
struct resource *dev_res;
u32 val;
@@ -1791,8 +1792,14 @@ static int fman_port_probe(struct platform_device *of_dev)
goto return_err;
}
- fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev);
+ fm_pdev = of_find_device_by_node(fm_node);
of_node_put(fm_node);
+ if (!fm_pdev) {
+ err = -EINVAL;
+ goto return_err;
+ }
+
+ fman = dev_get_drvdata(&fm_pdev->dev);
if (!fman) {
err = -EINVAL;
goto return_err;
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 8c7eb878d5b4..41946b16f6c7 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -626,7 +626,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) {
hash_entry = ETH_HASH_ENTRY_OBJ(pos);
- if (hash_entry->addr == addr) {
+ if (hash_entry && hash_entry->addr == addr) {
list_del_init(&hash_entry->node);
kfree(hash_entry);
break;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 7d194d2a031a..fe140ff38f74 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -256,6 +256,7 @@ struct ice_vsi {
u32 tx_busy;
u32 rx_buf_failed;
u32 rx_page_failed;
+ u32 rx_gro_dropped;
u16 num_q_vectors;
u16 base_vector; /* IRQ base for OS reserved vectors */
enum ice_vsi_type type;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 7fec9309d379..ba9375218fef 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1581,7 +1581,7 @@ struct ice_aqc_get_set_rss_keys {
struct ice_aqc_get_set_rss_lut {
#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
-#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
+#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
__le16 vsi_id;
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index ad5941ec7b11..34abfcea9858 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1027,23 +1027,23 @@ void ice_deinit_hw(struct ice_hw *hw)
*/
enum ice_status ice_check_reset(struct ice_hw *hw)
{
- u32 cnt, reg = 0, grst_delay, uld_mask;
+ u32 cnt, reg = 0, grst_timeout, uld_mask;
/* Poll for Device Active state in case a recent CORER, GLOBR,
* or EMPR has occurred. The grst delay value is in 100ms units.
* Add 1sec for outstanding AQ commands that can take a long time.
*/
- grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
- GLGEN_RSTCTL_GRSTDEL_S) + 10;
+ grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
+ GLGEN_RSTCTL_GRSTDEL_S) + 10;
- for (cnt = 0; cnt < grst_delay; cnt++) {
+ for (cnt = 0; cnt < grst_timeout; cnt++) {
mdelay(100);
reg = rd32(hw, GLGEN_RSTAT);
if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
break;
}
- if (cnt == grst_delay) {
+ if (cnt == grst_timeout) {
ice_debug(hw, ICE_DBG_INIT,
"Global reset polling failed to complete.\n");
return ICE_ERR_RESET_FAILED;
@@ -1718,8 +1718,7 @@ ice_alloc_res_exit:
* @num: number of resources
* @res: pointer to array that contains the resources to free
*/
-enum ice_status
-ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
enum ice_status status;
@@ -2121,7 +2120,7 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
* @cap_count: the number of capabilities
*
* Helper device to parse device (0x000B) capabilities list. For
- * capabilities shared between device and device, this relies on
+ * capabilities shared between device and function, this relies on
* ice_parse_common_caps.
*
* Loop through the list of provided capabilities and extract the relevant
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index dbbd8b6f9d1a..111d6bfe4222 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -357,7 +357,7 @@ void ice_devlink_unregister(struct ice_pf *pf)
*
* Create and register a devlink_port for this PF. Note that although each
* physical function is connected to a separate devlink instance, the port
- * will still be numbered according to the physical function id.
+ * will still be numbered according to the physical function ID.
*
* Return: zero on success or an error code on failure.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 06b93e97892d..9e8e9531cd87 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -59,8 +59,11 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
+ ICE_VSI_STAT("rx_gro_dropped", rx_gro_dropped),
ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
ICE_VSI_STAT("tx_linearize", tx_linearize),
+ ICE_VSI_STAT("tx_busy", tx_busy),
+ ICE_VSI_STAT("tx_restart", tx_restart),
};
enum ice_ethtool_test_id {
@@ -100,6 +103,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast),
ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast),
ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors),
+ ICE_PF_STAT("tx_timeout.nic", tx_timeout_count),
ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64),
ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64),
ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127),
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 3c217e51b27e..b17ae3e20157 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -644,7 +644,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
* This function generates a key from a value, a don't care mask and a never
* match mask.
* upd, dc, and nm are optional parameters, and can be NULL:
- * upd == NULL --> udp mask is all 1's (update all bits)
+ * upd == NULL --> upd mask is all 1's (update all bits)
* dc == NULL --> dc mask is all 0's (no don't care bits)
* nm == NULL --> nm mask is all 0's (no never match bits)
*/
@@ -2921,6 +2921,8 @@ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
ICE_FLOW_ENTRY_HNDL(e));
list_del(&p->l_entry);
+
+ mutex_destroy(&p->entries_lock);
devm_kfree(ice_hw_to_dev(hw), p);
}
mutex_unlock(&hw->fl_profs_locks[blk_idx]);
@@ -3038,7 +3040,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
memset(prof_redir->t, 0,
prof_redir->count * sizeof(*prof_redir->t));
- memset(es->t, 0, es->count * sizeof(*es->t));
+ memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
memset(es->written, 0, es->count * sizeof(*es->written));
}
@@ -3149,10 +3151,12 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->ref_count),
GFP_KERNEL);
+ if (!es->ref_count)
+ goto err;
es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->written), GFP_KERNEL);
- if (!es->ref_count)
+ if (!es->written)
goto err;
}
return 0;
@@ -3874,16 +3878,16 @@ err_ice_add_prof:
}
/**
- * ice_search_prof_id_low - Search for a profile tracking ID low level
+ * ice_search_prof_id - Search for a profile tracking ID
* @hw: pointer to the HW struct
* @blk: hardware block
* @id: profile tracking ID
*
- * This will search for a profile tracking ID which was previously added. This
- * version assumes that the caller has already acquired the prof map lock.
+ * This will search for a profile tracking ID which was previously added.
+ * The profile map lock should be held before calling this function.
*/
static struct ice_prof_map *
-ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *entry = NULL;
struct ice_prof_map *map;
@@ -3898,26 +3902,6 @@ ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
}
/**
- * ice_search_prof_id - Search for a profile tracking ID
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @id: profile tracking ID
- *
- * This will search for a profile tracking ID which was previously added.
- */
-static struct ice_prof_map *
-ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
-{
- struct ice_prof_map *entry;
-
- mutex_lock(&hw->blk[blk].es.prof_map_lock);
- entry = ice_search_prof_id_low(hw, blk, id);
- mutex_unlock(&hw->blk[blk].es.prof_map_lock);
-
- return entry;
-}
-
-/**
* ice_vsig_prof_id_count - count profiles in a VSIG
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -4133,7 +4117,7 @@ enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
mutex_lock(&hw->blk[blk].es.prof_map_lock);
- pmap = ice_search_prof_id_low(hw, blk, id);
+ pmap = ice_search_prof_id(hw, blk, id);
if (!pmap) {
status = ICE_ERR_DOES_NOT_EXIST;
goto err_ice_rem_prof;
@@ -4166,22 +4150,28 @@ static enum ice_status
ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
struct list_head *chg)
{
+ enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_chs_chg *p;
u16 i;
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_get_prof;
+ }
for (i = 0; i < map->ptg_cnt; i++)
if (!hw->blk[blk].es.written[map->prof_id]) {
/* add ES to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
GFP_KERNEL);
- if (!p)
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_get_prof;
+ }
p->type = ICE_PTG_ES_ADD;
p->ptype = 0;
@@ -4196,11 +4186,10 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
list_add(&p->list_entry, chg);
}
- return 0;
-
err_ice_get_prof:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
/* let caller clean up the change list */
- return ICE_ERR_NO_MEMORY;
+ return status;
}
/**
@@ -4254,17 +4243,23 @@ static enum ice_status
ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
struct list_head *lst, u64 hdl)
{
+ enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *p;
u16 i;
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_add_prof_to_lst;
+ }
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
- if (!p)
- return ICE_ERR_NO_MEMORY;
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_ice_add_prof_to_lst;
+ }
p->profile_cookie = map->profile_cookie;
p->prof_id = map->prof_id;
@@ -4278,7 +4273,9 @@ ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
list_add(&p->list, lst);
- return 0;
+err_ice_add_prof_to_lst:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+ return status;
}
/**
@@ -4496,16 +4493,12 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+ enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
u16 vsig_idx, i;
- /* Get the details on the profile specified by the handle ID */
- map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
-
/* Error, if this VSIG already has this profile */
if (ice_has_prof_vsig(hw, blk, vsig, hdl))
return ICE_ERR_ALREADY_EXISTS;
@@ -4515,19 +4508,28 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
if (!t)
return ICE_ERR_NO_MEMORY;
+ mutex_lock(&hw->blk[blk].es.prof_map_lock);
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_add_prof_id_vsig;
+ }
+
t->profile_cookie = map->profile_cookie;
t->prof_id = map->prof_id;
t->tcam_count = map->ptg_cnt;
/* create TCAM entries */
for (i = 0; i < map->ptg_cnt; i++) {
- enum ice_status status;
u16 tcam_idx;
/* add TCAM to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
- if (!p)
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_add_prof_id_vsig;
+ }
/* allocate the TCAM entry index */
status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
@@ -4571,12 +4573,14 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
list_add(&t->list,
&hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
- return 0;
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
+ return status;
err_ice_add_prof_id_vsig:
+ mutex_unlock(&hw->blk[blk].es.prof_map_lock);
/* let caller clean up the change list */
devm_kfree(ice_hw_to_dev(hw), t);
- return ICE_ERR_NO_MEMORY;
+ return status;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index d74e5290677f..fe677621dd51 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -1187,7 +1187,7 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
if (list_empty(&hw->fl_profs[blk]))
return 0;
- mutex_lock(&hw->fl_profs_locks[blk]);
+ mutex_lock(&hw->rss_locks);
list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry)
if (test_bit(vsi_handle, p->vsis)) {
status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
@@ -1195,12 +1195,12 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
break;
if (bitmap_empty(p->vsis, ICE_MAX_VSI)) {
- status = ice_flow_rem_prof_sync(hw, blk, p);
+ status = ice_flow_rem_prof(hw, blk, p->id);
if (status)
break;
}
}
- mutex_unlock(&hw->fl_profs_locks[blk]);
+ mutex_unlock(&hw->rss_locks);
return status;
}
@@ -1597,7 +1597,8 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
*/
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
{
- struct ice_rss_cfg *r, *rss_cfg = NULL;
+ u64 rss_hash = ICE_HASH_INVALID;
+ struct ice_rss_cfg *r;
/* verify if the protocol header is non zero and VSI is valid */
if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
@@ -1607,10 +1608,10 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
list_for_each_entry(r, &hw->rss_list_head, l_entry)
if (test_bit(vsi_handle, r->vsis) &&
r->packet_hdr == hdrs) {
- rss_cfg = r;
+ rss_hash = r->hashed_flds;
break;
}
mutex_unlock(&hw->rss_locks);
- return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
+ return rss_hash;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 92e4abca62a4..90abc8612a6a 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -57,7 +57,7 @@
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
-#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */
+#define PRTDCB_TUP2TC 0x001D26C0
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
@@ -362,6 +362,7 @@
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
+#define PRTRPB_RDPC 0x000AC260
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
#define VSIQF_FD_CNT_FD_GCNT_S 0
#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
@@ -378,6 +379,5 @@
#define PFPM_WUS_FW_RST_WK_M BIT(31)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
-#define PRTRPB_RDPC 0x000AC260
#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 14dfbbc1b2cf..4ec24c3e813f 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -601,6 +601,7 @@ struct ice_tlan_ctx {
/* shorter macros makes the table fit but are terse */
#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
+#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
@@ -608,6 +609,319 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
ICE_PTT_UNUSED_ENTRY(0),
ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(3),
+ ICE_PTT_UNUSED_ENTRY(4),
+ ICE_PTT_UNUSED_ENTRY(5),
+ ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(8),
+ ICE_PTT_UNUSED_ENTRY(9),
+ ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(12),
+ ICE_PTT_UNUSED_ENTRY(13),
+ ICE_PTT_UNUSED_ENTRY(14),
+ ICE_PTT_UNUSED_ENTRY(15),
+ ICE_PTT_UNUSED_ENTRY(16),
+ ICE_PTT_UNUSED_ENTRY(17),
+ ICE_PTT_UNUSED_ENTRY(18),
+ ICE_PTT_UNUSED_ENTRY(19),
+ ICE_PTT_UNUSED_ENTRY(20),
+ ICE_PTT_UNUSED_ENTRY(21),
+
+ /* Non Tunneled IPv4 */
+ ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(25),
+ ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(32),
+ ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(39),
+ ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(47),
+ ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(54),
+ ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(62),
+ ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(69),
+ ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(77),
+ ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(84),
+ ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ ICE_PTT_UNUSED_ENTRY(91),
+ ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(98),
+ ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(105),
+ ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(113),
+ ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(120),
+ ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(128),
+ ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(135),
+ ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(143),
+ ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(150),
+ ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ ICE_PTT_UNUSED_ENTRY(154),
+ ICE_PTT_UNUSED_ENTRY(155),
+ ICE_PTT_UNUSED_ENTRY(156),
+ ICE_PTT_UNUSED_ENTRY(157),
+ ICE_PTT_UNUSED_ENTRY(158),
+ ICE_PTT_UNUSED_ENTRY(159),
+
+ ICE_PTT_UNUSED_ENTRY(160),
+ ICE_PTT_UNUSED_ENTRY(161),
+ ICE_PTT_UNUSED_ENTRY(162),
+ ICE_PTT_UNUSED_ENTRY(163),
+ ICE_PTT_UNUSED_ENTRY(164),
+ ICE_PTT_UNUSED_ENTRY(165),
+ ICE_PTT_UNUSED_ENTRY(166),
+ ICE_PTT_UNUSED_ENTRY(167),
+ ICE_PTT_UNUSED_ENTRY(168),
+ ICE_PTT_UNUSED_ENTRY(169),
+
+ ICE_PTT_UNUSED_ENTRY(170),
+ ICE_PTT_UNUSED_ENTRY(171),
+ ICE_PTT_UNUSED_ENTRY(172),
+ ICE_PTT_UNUSED_ENTRY(173),
+ ICE_PTT_UNUSED_ENTRY(174),
+ ICE_PTT_UNUSED_ENTRY(175),
+ ICE_PTT_UNUSED_ENTRY(176),
+ ICE_PTT_UNUSED_ENTRY(177),
+ ICE_PTT_UNUSED_ENTRY(178),
+ ICE_PTT_UNUSED_ENTRY(179),
+
+ ICE_PTT_UNUSED_ENTRY(180),
+ ICE_PTT_UNUSED_ENTRY(181),
+ ICE_PTT_UNUSED_ENTRY(182),
+ ICE_PTT_UNUSED_ENTRY(183),
+ ICE_PTT_UNUSED_ENTRY(184),
+ ICE_PTT_UNUSED_ENTRY(185),
+ ICE_PTT_UNUSED_ENTRY(186),
+ ICE_PTT_UNUSED_ENTRY(187),
+ ICE_PTT_UNUSED_ENTRY(188),
+ ICE_PTT_UNUSED_ENTRY(189),
+
+ ICE_PTT_UNUSED_ENTRY(190),
+ ICE_PTT_UNUSED_ENTRY(191),
+ ICE_PTT_UNUSED_ENTRY(192),
+ ICE_PTT_UNUSED_ENTRY(193),
+ ICE_PTT_UNUSED_ENTRY(194),
+ ICE_PTT_UNUSED_ENTRY(195),
+ ICE_PTT_UNUSED_ENTRY(196),
+ ICE_PTT_UNUSED_ENTRY(197),
+ ICE_PTT_UNUSED_ENTRY(198),
+ ICE_PTT_UNUSED_ENTRY(199),
+
+ ICE_PTT_UNUSED_ENTRY(200),
+ ICE_PTT_UNUSED_ENTRY(201),
+ ICE_PTT_UNUSED_ENTRY(202),
+ ICE_PTT_UNUSED_ENTRY(203),
+ ICE_PTT_UNUSED_ENTRY(204),
+ ICE_PTT_UNUSED_ENTRY(205),
+ ICE_PTT_UNUSED_ENTRY(206),
+ ICE_PTT_UNUSED_ENTRY(207),
+ ICE_PTT_UNUSED_ENTRY(208),
+ ICE_PTT_UNUSED_ENTRY(209),
+
+ ICE_PTT_UNUSED_ENTRY(210),
+ ICE_PTT_UNUSED_ENTRY(211),
+ ICE_PTT_UNUSED_ENTRY(212),
+ ICE_PTT_UNUSED_ENTRY(213),
+ ICE_PTT_UNUSED_ENTRY(214),
+ ICE_PTT_UNUSED_ENTRY(215),
+ ICE_PTT_UNUSED_ENTRY(216),
+ ICE_PTT_UNUSED_ENTRY(217),
+ ICE_PTT_UNUSED_ENTRY(218),
+ ICE_PTT_UNUSED_ENTRY(219),
+
+ ICE_PTT_UNUSED_ENTRY(220),
+ ICE_PTT_UNUSED_ENTRY(221),
+ ICE_PTT_UNUSED_ENTRY(222),
+ ICE_PTT_UNUSED_ENTRY(223),
+ ICE_PTT_UNUSED_ENTRY(224),
+ ICE_PTT_UNUSED_ENTRY(225),
+ ICE_PTT_UNUSED_ENTRY(226),
+ ICE_PTT_UNUSED_ENTRY(227),
+ ICE_PTT_UNUSED_ENTRY(228),
+ ICE_PTT_UNUSED_ENTRY(229),
+
+ ICE_PTT_UNUSED_ENTRY(230),
+ ICE_PTT_UNUSED_ENTRY(231),
+ ICE_PTT_UNUSED_ENTRY(232),
+ ICE_PTT_UNUSED_ENTRY(233),
+ ICE_PTT_UNUSED_ENTRY(234),
+ ICE_PTT_UNUSED_ENTRY(235),
+ ICE_PTT_UNUSED_ENTRY(236),
+ ICE_PTT_UNUSED_ENTRY(237),
+ ICE_PTT_UNUSED_ENTRY(238),
+ ICE_PTT_UNUSED_ENTRY(239),
+
+ ICE_PTT_UNUSED_ENTRY(240),
+ ICE_PTT_UNUSED_ENTRY(241),
+ ICE_PTT_UNUSED_ENTRY(242),
+ ICE_PTT_UNUSED_ENTRY(243),
+ ICE_PTT_UNUSED_ENTRY(244),
+ ICE_PTT_UNUSED_ENTRY(245),
+ ICE_PTT_UNUSED_ENTRY(246),
+ ICE_PTT_UNUSED_ENTRY(247),
+ ICE_PTT_UNUSED_ENTRY(248),
+ ICE_PTT_UNUSED_ENTRY(249),
+
+ ICE_PTT_UNUSED_ENTRY(250),
+ ICE_PTT_UNUSED_ENTRY(251),
+ ICE_PTT_UNUSED_ENTRY(252),
+ ICE_PTT_UNUSED_ENTRY(253),
+ ICE_PTT_UNUSED_ENTRY(254),
+ ICE_PTT_UNUSED_ENTRY(255),
};
static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 84202c814c3b..f2682776f8c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2017,6 +2017,13 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
if (!vsi)
return -EINVAL;
+ /* Don't enable VLAN pruning if the netdev is currently in promiscuous
+ * mode. VLAN pruning will be enabled when the interface exits
+ * promiscuous mode if any VLAN filters are active.
+ */
+ if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena)
+ return 0;
+
pf = vsi->back;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c0bde24ab344..22e3d32463f1 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -369,6 +369,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
~IFF_PROMISC;
goto out_promisc;
}
+ ice_cfg_vlan_pruning(vsi, false, false);
}
} else {
/* Clear Rx filter to remove traffic from wire */
@@ -381,6 +382,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
IFF_PROMISC;
goto out_promisc;
}
+ if (vsi->num_vlan > 1)
+ ice_cfg_vlan_pruning(vsi, true, false);
}
}
}
@@ -4467,7 +4470,7 @@ err_reinit:
* Power Management callback to quiesce the device and prepare
* for D3 transition.
*/
-static int ice_suspend(struct device *dev)
+static int __maybe_unused ice_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ice_pf *pf;
@@ -4531,7 +4534,7 @@ static int ice_suspend(struct device *dev)
* ice_resume - PM callback for waking up from D3
* @dev: generic device information structure
*/
-static int ice_resume(struct device *dev)
+static int __maybe_unused ice_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
enum ice_reset_req reset_type;
@@ -5290,6 +5293,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi->tx_linearize = 0;
vsi->rx_buf_failed = 0;
vsi->rx_page_failed = 0;
+ vsi->rx_gro_dropped = 0;
rcu_read_lock();
@@ -5304,6 +5308,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
vsi_stats->rx_bytes += bytes;
vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
+ vsi->rx_gro_dropped += ring->rx_stats.gro_dropped;
}
/* update XDP Tx rings counters */
@@ -5335,7 +5340,7 @@ void ice_update_vsi_stats(struct ice_vsi *vsi)
ice_update_eth_stats(vsi);
cur_ns->tx_errors = cur_es->tx_errors;
- cur_ns->rx_dropped = cur_es->rx_discards;
+ cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped;
cur_ns->tx_dropped = cur_es->tx_discards;
cur_ns->multicast = cur_es->rx_multicast;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 355f727563e4..44a228530253 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -170,7 +170,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
return ICE_ERR_PARAM;
}
- /* query the current node information from FW before additing it
+ /* query the current node information from FW before adding it
* to the SW DB
*/
status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
@@ -578,7 +578,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
/**
* ice_aq_rl_profile - performs a rate limiting task
* @hw: pointer to the HW struct
- * @opcode:opcode for add, query, or remove profile(s)
+ * @opcode: opcode for add, query, or remove profile(s)
* @num_profiles: the number of profiles
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index a508d4f463e9..9d0d6b0025cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -631,10 +631,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */
- if (likely(page)) {
- rx_ring->rx_stats.page_reuse_count++;
+ if (likely(page))
return true;
- }
/* alloc new page for storage */
page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
@@ -1033,7 +1031,6 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (ice_can_reuse_rx_page(rx_buf)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
- rx_ring->rx_stats.page_reuse_count++;
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
@@ -1254,12 +1251,12 @@ construct_skb:
* @itr: ITR value to update
*
* Calculate how big of an increment should be applied to the ITR value passed
- * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
+ * in based on wmem_default, SKB overhead, ethernet overhead, and the current
* link speed.
*
* The following is a calculation derived from:
* wmem_default / (size + overhead) = desired_pkts_per_int
- * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
+ * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
* (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
*
* Assuming wmem_default is 212992 and overhead is 640 bytes per
@@ -2294,10 +2291,30 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
- stale = &skb_shinfo(skb)->frags[0];
- for (;;) {
+ for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
sum += skb_frag_size(frag++);
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > ICE_MAX_DATA_PER_TXD) {
+ int align_pad = -(skb_frag_off(stale)) &
+ (ICE_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
+ stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
+ } while (stale_size > ICE_MAX_DATA_PER_TXD);
+ }
+
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
@@ -2305,7 +2322,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
if (!nr_frags--)
break;
- sum -= skb_frag_size(stale++);
+ sum -= stale_size;
}
return false;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index e70c4619edc3..51b4df7a59d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -193,7 +193,7 @@ struct ice_rxq_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buf_failed;
- u64 page_reuse_count;
+ u64 gro_dropped; /* GRO returned dropped */
};
/* this enum matches hardware bits and is meant to be used by DYN_CTLN
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 02b12736ea80..bc2f4390b51d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -191,7 +191,12 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- napi_gro_receive(&rx_ring->q_vector->napi, skb);
+ if (napi_gro_receive(&rx_ring->q_vector->napi, skb) == GRO_DROP) {
+ /* this is tracked separately to help us debug stack drops */
+ rx_ring->rx_stats.gro_dropped++;
+ netdev_dbg(rx_ring->netdev, "Receive Queue %d: Dropped packet from GRO\n",
+ rx_ring->q_index);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 1eb83d9b0546..4cdccfadf274 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -321,7 +321,7 @@ struct ice_nvm_info {
u32 flash_size; /* Size of available flash in bytes */
u8 major_ver; /* major version of NVM package */
u8 minor_ver; /* minor version of dev starter */
- u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
struct ice_link_default_override_tlv {
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 7061730f0f37..71497776ac62 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -932,6 +932,8 @@ static int ice_set_per_vf_res(struct ice_pf *pf)
num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
+ } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
} else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
num_msix_per_vf = ICE_MIN_INTR_PER_VF;
} else {
@@ -2972,8 +2974,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->max_frame = qpi->rxq.max_pkt_size;
}
- /* VF can request to configure less than allocated queues
- * or default allocated queues. So update the VSI with new number
+ /* VF can request to configure less than allocated queues or default
+ * allocated queues. So update the VSI with new number
*/
vsi->num_txq = num_txq;
vsi->num_rxq = num_rxq;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 049e0b583383..0f519fba3770 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -32,6 +32,7 @@
#define ICE_MAX_RSS_QS_PER_VF 16
#define ICE_NUM_VF_MSIX_MED 17
#define ICE_NUM_VF_MSIX_SMALL 5
+#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_TRIES 40
#define ICE_MAX_VF_RESET_SLEEP_MS 20
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 87862918bc7a..20ac5fca68c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -298,7 +298,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
}
}
-
/**
* ice_xsk_umem_disable - disable a UMEM region
* @vsi: Current VSI
@@ -594,7 +593,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (!size)
break;
-
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
rx_buf->xdp->data_end = rx_buf->xdp->data + size;
xsk_buff_dma_sync_for_cpu(rx_buf->xdp);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 6a3f356640a0..4298a029be55 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3508,6 +3508,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
err = mvpp2_rx_refill(port, bm_pool, pp, pool);
if (err) {
netdev_err(port->dev, "failed to refill BM pools\n");
+ dev_kfree_skb_any(skb);
goto err_drop_frame;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 866381e72960..08d101138fbe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1177,14 +1177,15 @@ static void mlxsw_devlink_trap_fini(struct devlink *devlink,
static int mlxsw_devlink_trap_action_set(struct devlink *devlink,
const struct devlink_trap *trap,
- enum devlink_trap_action action)
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
if (!mlxsw_driver->trap_action_set)
return -EOPNOTSUPP;
- return mlxsw_driver->trap_action_set(mlxsw_core, trap, action);
+ return mlxsw_driver->trap_action_set(mlxsw_core, trap, action, extack);
}
static int
@@ -1202,14 +1203,15 @@ mlxsw_devlink_trap_group_init(struct devlink *devlink,
static int
mlxsw_devlink_trap_group_set(struct devlink *devlink,
const struct devlink_trap_group *group,
- const struct devlink_trap_policer *policer)
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
if (!mlxsw_driver->trap_group_set)
return -EOPNOTSUPP;
- return mlxsw_driver->trap_group_set(mlxsw_core, group, policer);
+ return mlxsw_driver->trap_group_set(mlxsw_core, group, policer, extack);
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c1c1e039323a..11af3308f8cc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -89,13 +89,15 @@ struct mlxsw_listener {
};
#define __MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \
- _dis_action, _enabled_on_register, _dis_trap_group) \
+ _dis_action, _enabled_on_register, _dis_trap_group, \
+ _mirror_reason) \
{ \
.trap_id = MLXSW_TRAP_ID_##_trap_id, \
.rx_listener = \
{ \
.func = _func, \
.local_port = MLXSW_PORT_DONT_CARE, \
+ .mirror_reason = _mirror_reason, \
.trap_id = MLXSW_TRAP_ID_##_trap_id, \
}, \
.en_action = MLXSW_REG_HPKT_ACTION_##_en_action, \
@@ -109,12 +111,17 @@ struct mlxsw_listener {
#define MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _trap_group, \
_dis_action) \
__MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _trap_group, \
- _dis_action, true, _trap_group)
+ _dis_action, true, _trap_group, 0)
#define MLXSW_RXL_DIS(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \
_dis_action, _dis_trap_group) \
__MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \
- _dis_action, false, _dis_trap_group)
+ _dis_action, false, _dis_trap_group, 0)
+
+#define MLXSW_RXL_MIRROR(_func, _session_id, _trap_group, _mirror_reason) \
+ __MLXSW_RXL(_func, MIRROR_SESSION##_session_id, TRAP_TO_CPU, false, \
+ _trap_group, TRAP_TO_CPU, true, _trap_group, \
+ _mirror_reason)
#define MLXSW_EVENTL(_func, _trap_id, _trap_group) \
{ \
@@ -326,12 +333,14 @@ struct mlxsw_driver {
const struct devlink_trap *trap, void *trap_ctx);
int (*trap_action_set)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap,
- enum devlink_trap_action action);
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
int (*trap_group_init)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group);
int (*trap_group_set)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group,
- const struct devlink_trap_policer *policer);
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack);
int (*trap_policer_init)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_policer *policer);
void (*trap_policer_fini)(struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 28a2576eb783..079b080de7f7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5614,6 +5614,7 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_EXCEPTIONS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_BUFFER_DISCARDS,
__MLXSW_REG_HTGT_TRAP_GROUP_MAX,
MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 519eb44e4097..fdf9aa8314b2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3055,6 +3055,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
+ mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
mlxsw_sp->listeners = mlxsw_sp1_listener;
mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
@@ -3084,6 +3085,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
+ mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -3111,6 +3113,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
+ mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 6ab1b6d725af..f9ba59641b4d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -177,6 +177,7 @@ struct mlxsw_sp {
const struct mlxsw_sp_ptp_ops *ptp_ops;
const struct mlxsw_sp_span_ops *span_ops;
const struct mlxsw_sp_policer_core_ops *policer_core_ops;
+ const struct mlxsw_sp_trap_ops *trap_ops;
const struct mlxsw_listener *listeners;
size_t listeners_count;
u32 lowest_shaper_bs;
@@ -983,6 +984,10 @@ struct mlxsw_sp_mall_mirror_entry {
int span_id;
};
+struct mlxsw_sp_mall_trap_entry {
+ int span_id;
+};
+
struct mlxsw_sp_mall_entry {
struct list_head list;
unsigned long cookie;
@@ -991,6 +996,7 @@ struct mlxsw_sp_mall_entry {
bool ingress;
union {
struct mlxsw_sp_mall_mirror_entry mirror;
+ struct mlxsw_sp_mall_trap_entry trap;
struct mlxsw_sp_port_sample sample;
};
struct rcu_head rcu;
@@ -1177,12 +1183,14 @@ void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap, void *trap_ctx);
int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap,
- enum devlink_trap_action action);
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group);
int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group,
- const struct devlink_trap_policer *policer);
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack);
int
mlxsw_sp_trap_policer_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_policer *policer);
@@ -1196,6 +1204,8 @@ int
mlxsw_sp_trap_policer_counter_get(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_policer *policer,
u64 *p_drops);
+int mlxsw_sp_trap_group_policer_hw_id_get(struct mlxsw_sp *mlxsw_sp, u16 id,
+ bool *p_enabled, u16 *p_hw_id);
static inline struct net *mlxsw_sp_net(struct mlxsw_sp *mlxsw_sp)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index a5ce1eec5418..964fd444bb10 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -1289,19 +1289,18 @@ struct mlxsw_sp_qevent_binding {
static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
-static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mall_entry *mall_entry,
- struct mlxsw_sp_qevent_binding *qevent_binding)
+static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ const struct mlxsw_sp_span_agent_parms *agent_parms,
+ int *p_span_id)
{
struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
struct mlxsw_sp_span_trigger_parms trigger_parms = {};
- struct mlxsw_sp_span_agent_parms agent_parms = {
- .to_dev = mall_entry->mirror.to_dev,
- };
int span_id;
int err;
- err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, &agent_parms);
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, agent_parms);
if (err)
return err;
@@ -1320,7 +1319,7 @@ static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_trigger_enable;
- mall_entry->mirror.span_id = span_id;
+ *p_span_id = span_id;
return 0;
err_trigger_enable:
@@ -1333,13 +1332,13 @@ err_analyzed_port_get:
return err;
}
-static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mall_entry *mall_entry,
- struct mlxsw_sp_qevent_binding *qevent_binding)
+static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ int span_id)
{
struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
struct mlxsw_sp_span_trigger_parms trigger_parms = {
- .span_id = mall_entry->mirror.span_id,
+ .span_id = span_id,
};
mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger,
@@ -1347,7 +1346,51 @@ static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
&trigger_parms);
mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
- mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
+ mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
+}
+
+static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+ struct mlxsw_sp_span_agent_parms agent_parms = {
+ .to_dev = mall_entry->mirror.to_dev,
+ };
+
+ return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
+ &agent_parms, &mall_entry->mirror.span_id);
+}
+
+static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+ mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->mirror.span_id);
+}
+
+static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+ struct mlxsw_sp_span_agent_parms agent_parms = {};
+ int err;
+
+ err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
+ &agent_parms.policer_enable,
+ &agent_parms.policer_id);
+ if (err)
+ return err;
+
+ return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
+ &agent_parms, &mall_entry->trap.span_id);
+}
+
+static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+ mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
}
static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
@@ -1357,6 +1400,8 @@ static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
switch (mall_entry->type) {
case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
+ case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
+ return mlxsw_sp_qevent_trap_configure(mlxsw_sp, mall_entry, qevent_binding);
default:
/* This should have been validated away. */
WARN_ON(1);
@@ -1371,6 +1416,8 @@ static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
switch (mall_entry->type) {
case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
+ case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
+ return mlxsw_sp_qevent_trap_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
default:
WARN_ON(1);
return;
@@ -1490,6 +1537,8 @@ static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
if (act->id == FLOW_ACTION_MIRRED) {
mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
mall_entry->mirror.to_dev = act->dev;
+ } else if (act->id == FLOW_ACTION_TRAP) {
+ mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_TRAP;
} else {
NL_SET_ERR_MSG(f->common.extack, "Unsupported action");
err = -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 323eaf979aea..5c959a995199 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -837,7 +837,8 @@ static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span,
static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span)
{
- refcount_dec(&span->policer_id_base_ref_count);
+ if (refcount_dec_and_test(&span->policer_id_base_ref_count))
+ span->policer_id_base = 0;
}
static struct mlxsw_sp_span_entry *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 1e38dfe7cf64..2e41c5519c1b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -21,6 +21,7 @@ struct mlxsw_sp_trap_group_item {
struct devlink_trap_group group;
u16 hw_group_id;
u8 priority;
+ u8 fixed_policer:1; /* Whether policer binding can change */
};
#define MLXSW_SP_TRAP_LISTENERS_MAX 3
@@ -28,6 +29,7 @@ struct mlxsw_sp_trap_group_item {
struct mlxsw_sp_trap_item {
struct devlink_trap trap;
struct mlxsw_listener listeners_arr[MLXSW_SP_TRAP_LISTENERS_MAX];
+ u8 is_source:1;
};
/* All driver-specific traps must be documented in
@@ -46,6 +48,11 @@ enum {
#define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
+enum {
+ /* Packet was early dropped. */
+ MLXSW_SP_MIRROR_REASON_INGRESS_WRED = 9,
+};
+
static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
u8 local_port,
struct mlxsw_sp_port *mlxsw_sp_port)
@@ -222,6 +229,11 @@ static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port,
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
MLXSW_SP_TRAP_METADATA | (_metadata))
+#define MLXSW_SP_TRAP_BUFFER_DROP(_id) \
+ DEVLINK_TRAP_GENERIC(DROP, TRAP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS, \
+ MLXSW_SP_TRAP_METADATA)
+
#define MLXSW_SP_TRAP_DRIVER_DROP(_id, _group_id) \
DEVLINK_TRAP_DRIVER(DROP, DROP, DEVLINK_MLXSW_TRAP_ID_##_id, \
DEVLINK_MLXSW_TRAP_NAME_##_id, \
@@ -248,6 +260,10 @@ static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port,
TRAP_EXCEPTION_TO_CPU, false, SP_##_en_group_id, \
SET_FW_DEFAULT, SP_##_dis_group_id)
+#define MLXSW_SP_RXL_BUFFER_DISCARD(_mirror_reason) \
+ MLXSW_RXL_MIRROR(mlxsw_sp_rx_drop_listener, 0, SP_BUFFER_DISCARDS, \
+ MLXSW_SP_MIRROR_REASON_##_mirror_reason)
+
#define MLXSW_SP_RXL_EXCEPTION(_id, _group_id, _action) \
MLXSW_RXL(mlxsw_sp_rx_mark_listener, _id, \
_action, false, SP_##_group_id, SET_FW_DEFAULT)
@@ -331,6 +347,9 @@ mlxsw_sp_trap_policer_items_arr[] = {
{
.policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512),
},
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(20, 10240, 4096),
+ },
};
static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
@@ -1063,10 +1082,10 @@ static int mlxsw_sp_trap_dummy_group_init(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_trap_policer_items_arr_init(struct mlxsw_sp *mlxsw_sp)
{
+ size_t arr_size = ARRAY_SIZE(mlxsw_sp_trap_policer_items_arr);
size_t elem_size = sizeof(struct mlxsw_sp_trap_policer_item);
- u64 arr_size = ARRAY_SIZE(mlxsw_sp_trap_policer_items_arr);
struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
- u64 free_policers = 0;
+ size_t free_policers = 0;
u32 last_id;
int i;
@@ -1159,6 +1178,43 @@ static void mlxsw_sp_trap_policers_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp);
}
+static int mlxsw_sp_trap_group_items_arr_init(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t common_groups_count = ARRAY_SIZE(mlxsw_sp_trap_group_items_arr);
+ const struct mlxsw_sp_trap_group_item *spec_group_items_arr;
+ size_t elem_size = sizeof(struct mlxsw_sp_trap_group_item);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ size_t groups_count, spec_groups_count;
+ int err;
+
+ err = mlxsw_sp->trap_ops->groups_init(mlxsw_sp, &spec_group_items_arr,
+ &spec_groups_count);
+ if (err)
+ return err;
+
+ /* The group items array is created by concatenating the common trap
+ * group items and the ASIC-specific trap group items.
+ */
+ groups_count = common_groups_count + spec_groups_count;
+ trap->group_items_arr = kcalloc(groups_count, elem_size, GFP_KERNEL);
+ if (!trap->group_items_arr)
+ return -ENOMEM;
+
+ memcpy(trap->group_items_arr, mlxsw_sp_trap_group_items_arr,
+ elem_size * common_groups_count);
+ memcpy(trap->group_items_arr + common_groups_count,
+ spec_group_items_arr, elem_size * spec_groups_count);
+
+ trap->groups_count = groups_count;
+
+ return 0;
+}
+
+static void mlxsw_sp_trap_group_items_arr_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->trap->group_items_arr);
+}
+
static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
@@ -1166,13 +1222,9 @@ static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
int err, i;
- trap->group_items_arr = kmemdup(mlxsw_sp_trap_group_items_arr,
- sizeof(mlxsw_sp_trap_group_items_arr),
- GFP_KERNEL);
- if (!trap->group_items_arr)
- return -ENOMEM;
-
- trap->groups_count = ARRAY_SIZE(mlxsw_sp_trap_group_items_arr);
+ err = mlxsw_sp_trap_group_items_arr_init(mlxsw_sp);
+ if (err)
+ return err;
for (i = 0; i < trap->groups_count; i++) {
group_item = &trap->group_items_arr[i];
@@ -1189,7 +1241,7 @@ err_trap_group_register:
group_item = &trap->group_items_arr[i];
devlink_trap_groups_unregister(devlink, &group_item->group, 1);
}
- kfree(trap->group_items_arr);
+ mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp);
return err;
}
@@ -1205,7 +1257,7 @@ static void mlxsw_sp_trap_groups_fini(struct mlxsw_sp *mlxsw_sp)
group_item = &trap->group_items_arr[i];
devlink_trap_groups_unregister(devlink, &group_item->group, 1);
}
- kfree(trap->group_items_arr);
+ mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp);
}
static bool
@@ -1214,6 +1266,43 @@ mlxsw_sp_trap_listener_is_valid(const struct mlxsw_listener *listener)
return listener->trap_id != 0;
}
+static int mlxsw_sp_trap_items_arr_init(struct mlxsw_sp *mlxsw_sp)
+{
+ size_t common_traps_count = ARRAY_SIZE(mlxsw_sp_trap_items_arr);
+ const struct mlxsw_sp_trap_item *spec_trap_items_arr;
+ size_t elem_size = sizeof(struct mlxsw_sp_trap_item);
+ struct mlxsw_sp_trap *trap = mlxsw_sp->trap;
+ size_t traps_count, spec_traps_count;
+ int err;
+
+ err = mlxsw_sp->trap_ops->traps_init(mlxsw_sp, &spec_trap_items_arr,
+ &spec_traps_count);
+ if (err)
+ return err;
+
+ /* The trap items array is created by concatenating the common trap
+ * items and the ASIC-specific trap items.
+ */
+ traps_count = common_traps_count + spec_traps_count;
+ trap->trap_items_arr = kcalloc(traps_count, elem_size, GFP_KERNEL);
+ if (!trap->trap_items_arr)
+ return -ENOMEM;
+
+ memcpy(trap->trap_items_arr, mlxsw_sp_trap_items_arr,
+ elem_size * common_traps_count);
+ memcpy(trap->trap_items_arr + common_traps_count,
+ spec_trap_items_arr, elem_size * spec_traps_count);
+
+ trap->traps_count = traps_count;
+
+ return 0;
+}
+
+static void mlxsw_sp_trap_items_arr_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->trap->trap_items_arr);
+}
+
static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
@@ -1221,13 +1310,9 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_trap_item *trap_item;
int err, i;
- trap->trap_items_arr = kmemdup(mlxsw_sp_trap_items_arr,
- sizeof(mlxsw_sp_trap_items_arr),
- GFP_KERNEL);
- if (!trap->trap_items_arr)
- return -ENOMEM;
-
- trap->traps_count = ARRAY_SIZE(mlxsw_sp_trap_items_arr);
+ err = mlxsw_sp_trap_items_arr_init(mlxsw_sp);
+ if (err)
+ return err;
for (i = 0; i < trap->traps_count; i++) {
trap_item = &trap->trap_items_arr[i];
@@ -1244,7 +1329,7 @@ err_trap_register:
trap_item = &trap->trap_items_arr[i];
devlink_traps_unregister(devlink, &trap_item->trap, 1);
}
- kfree(trap->trap_items_arr);
+ mlxsw_sp_trap_items_arr_fini(mlxsw_sp);
return err;
}
@@ -1260,7 +1345,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
trap_item = &trap->trap_items_arr[i];
devlink_traps_unregister(devlink, &trap_item->trap, 1);
}
- kfree(trap->trap_items_arr);
+ mlxsw_sp_trap_items_arr_fini(mlxsw_sp);
}
int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
@@ -1352,7 +1437,8 @@ void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap,
- enum devlink_trap_action action)
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
const struct mlxsw_sp_trap_item *trap_item;
@@ -1362,6 +1448,11 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
if (WARN_ON(!trap_item))
return -EINVAL;
+ if (trap_item->is_source) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing the action of source traps is not supported");
+ return -EOPNOTSUPP;
+ }
+
for (i = 0; i < MLXSW_SP_TRAP_LISTENERS_MAX; i++) {
const struct mlxsw_listener *listener;
bool enabled;
@@ -1392,7 +1483,7 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
static int
__mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group,
- u32 policer_id)
+ u32 policer_id, struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u16 hw_policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
@@ -1403,6 +1494,11 @@ __mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
if (WARN_ON(!group_item))
return -EINVAL;
+ if (group_item->fixed_policer && policer_id != group->init_policer_id) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing the policer binding of this group is not supported");
+ return -EOPNOTSUPP;
+ }
+
if (policer_id) {
struct mlxsw_sp_trap_policer_item *policer_item;
@@ -1422,16 +1518,18 @@ int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group)
{
return __mlxsw_sp_trap_group_init(mlxsw_core, group,
- group->init_policer_id);
+ group->init_policer_id, NULL);
}
int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core,
const struct devlink_trap_group *group,
- const struct devlink_trap_policer *policer)
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack)
{
u32 policer_id = policer ? policer->id : 0;
- return __mlxsw_sp_trap_group_init(mlxsw_core, group, policer_id);
+ return __mlxsw_sp_trap_group_init(mlxsw_core, group, policer_id,
+ extack);
}
static int
@@ -1576,3 +1674,110 @@ mlxsw_sp_trap_policer_counter_get(struct mlxsw_core *mlxsw_core,
return 0;
}
+
+int mlxsw_sp_trap_group_policer_hw_id_get(struct mlxsw_sp *mlxsw_sp, u16 id,
+ bool *p_enabled, u16 *p_hw_id)
+{
+ struct mlxsw_sp_trap_policer_item *pol_item;
+ struct mlxsw_sp_trap_group_item *gr_item;
+ u32 pol_id;
+
+ gr_item = mlxsw_sp_trap_group_item_lookup(mlxsw_sp, id);
+ if (!gr_item)
+ return -ENOENT;
+
+ pol_id = gr_item->group.init_policer_id;
+ if (!pol_id) {
+ *p_enabled = false;
+ return 0;
+ }
+
+ pol_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, pol_id);
+ if (WARN_ON(!pol_item))
+ return -ENOENT;
+
+ *p_enabled = true;
+ *p_hw_id = pol_item->hw_id;
+ return 0;
+}
+
+static const struct mlxsw_sp_trap_group_item
+mlxsw_sp1_trap_group_items_arr[] = {
+};
+
+static const struct mlxsw_sp_trap_item
+mlxsw_sp1_trap_items_arr[] = {
+};
+
+static int
+mlxsw_sp1_trap_groups_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_group_item **arr,
+ size_t *p_groups_count)
+{
+ *arr = mlxsw_sp1_trap_group_items_arr;
+ *p_groups_count = ARRAY_SIZE(mlxsw_sp1_trap_group_items_arr);
+
+ return 0;
+}
+
+static int mlxsw_sp1_traps_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_item **arr,
+ size_t *p_traps_count)
+{
+ *arr = mlxsw_sp1_trap_items_arr;
+ *p_traps_count = ARRAY_SIZE(mlxsw_sp1_trap_items_arr);
+
+ return 0;
+}
+
+const struct mlxsw_sp_trap_ops mlxsw_sp1_trap_ops = {
+ .groups_init = mlxsw_sp1_trap_groups_init,
+ .traps_init = mlxsw_sp1_traps_init,
+};
+
+static const struct mlxsw_sp_trap_group_item
+mlxsw_sp2_trap_group_items_arr[] = {
+ {
+ .group = DEVLINK_TRAP_GROUP_GENERIC(BUFFER_DROPS, 20),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_BUFFER_DISCARDS,
+ .priority = 0,
+ .fixed_policer = true,
+ },
+};
+
+static const struct mlxsw_sp_trap_item
+mlxsw_sp2_trap_items_arr[] = {
+ {
+ .trap = MLXSW_SP_TRAP_BUFFER_DROP(EARLY_DROP),
+ .listeners_arr = {
+ MLXSW_SP_RXL_BUFFER_DISCARD(INGRESS_WRED),
+ },
+ .is_source = true,
+ },
+};
+
+static int
+mlxsw_sp2_trap_groups_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_group_item **arr,
+ size_t *p_groups_count)
+{
+ *arr = mlxsw_sp2_trap_group_items_arr;
+ *p_groups_count = ARRAY_SIZE(mlxsw_sp2_trap_group_items_arr);
+
+ return 0;
+}
+
+static int mlxsw_sp2_traps_init(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_item **arr,
+ size_t *p_traps_count)
+{
+ *arr = mlxsw_sp2_trap_items_arr;
+ *p_traps_count = ARRAY_SIZE(mlxsw_sp2_trap_items_arr);
+
+ return 0;
+}
+
+const struct mlxsw_sp_trap_ops mlxsw_sp2_trap_ops = {
+ .groups_init = mlxsw_sp2_trap_groups_init,
+ .traps_init = mlxsw_sp2_traps_init,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
index 13ac412f4d53..b8df684bedef 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h
@@ -9,13 +9,13 @@
struct mlxsw_sp_trap {
struct mlxsw_sp_trap_policer_item *policer_items_arr;
- u64 policers_count; /* Number of registered policers */
+ size_t policers_count; /* Number of registered policers */
struct mlxsw_sp_trap_group_item *group_items_arr;
- u64 groups_count; /* Number of registered groups */
+ size_t groups_count; /* Number of registered groups */
struct mlxsw_sp_trap_item *trap_items_arr;
- u64 traps_count; /* Number of registered traps */
+ size_t traps_count; /* Number of registered traps */
u16 thin_policer_hw_id;
@@ -23,4 +23,16 @@ struct mlxsw_sp_trap {
unsigned long policers_usage[]; /* Usage bitmap */
};
+struct mlxsw_sp_trap_ops {
+ int (*groups_init)(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_group_item **arr,
+ size_t *p_groups_count);
+ int (*traps_init)(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_trap_item **arr,
+ size_t *p_traps_count);
+};
+
+extern const struct mlxsw_sp_trap_ops mlxsw_sp1_trap_ops;
+extern const struct mlxsw_sp_trap_ops mlxsw_sp2_trap_ops;
+
#endif
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 095561924bdc..3c57c331729f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -403,8 +403,7 @@ static int ionic_get_coalesce(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
- /* Tx uses Rx interrupt */
- coalesce->tx_coalesce_usecs = lif->rx_coalesce_usecs;
+ coalesce->tx_coalesce_usecs = lif->tx_coalesce_usecs;
coalesce->rx_coalesce_usecs = lif->rx_coalesce_usecs;
return 0;
@@ -417,7 +416,8 @@ static int ionic_set_coalesce(struct net_device *netdev,
struct ionic_identity *ident;
struct ionic_qcq *qcq;
unsigned int i;
- u32 coal;
+ u32 rx_coal;
+ u32 tx_coal;
ident = &lif->ionic->ident;
if (ident->dev.intr_coal_div == 0) {
@@ -426,26 +426,31 @@ static int ionic_set_coalesce(struct net_device *netdev,
return -EIO;
}
- /* Tx uses Rx interrupt, so only change Rx */
- if (coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs) {
+ /* Tx normally shares Rx interrupt, so only change Rx */
+ if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) &&
+ coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs) {
netdev_warn(netdev, "only the rx-usecs can be changed\n");
return -EINVAL;
}
- /* Convert the usec request to a HW useable value. If they asked
+ /* Convert the usec request to a HW usable value. If they asked
* for non-zero and it resolved to zero, bump it up
*/
- coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs);
- if (!coal && coalesce->rx_coalesce_usecs)
- coal = 1;
-
- if (coal > IONIC_INTR_CTRL_COAL_MAX)
+ rx_coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs);
+ if (!rx_coal && coalesce->rx_coalesce_usecs)
+ rx_coal = 1;
+ tx_coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->tx_coalesce_usecs);
+ if (!tx_coal && coalesce->tx_coalesce_usecs)
+ tx_coal = 1;
+
+ if (rx_coal > IONIC_INTR_CTRL_COAL_MAX ||
+ tx_coal > IONIC_INTR_CTRL_COAL_MAX)
return -ERANGE;
- /* Save the new value */
+ /* Save the new values */
lif->rx_coalesce_usecs = coalesce->rx_coalesce_usecs;
- if (coal != lif->rx_coalesce_hw) {
- lif->rx_coalesce_hw = coal;
+ if (rx_coal != lif->rx_coalesce_hw) {
+ lif->rx_coalesce_hw = rx_coal;
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
for (i = 0; i < lif->nxqs; i++) {
@@ -457,6 +462,23 @@ static int ionic_set_coalesce(struct net_device *netdev,
}
}
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ lif->tx_coalesce_usecs = coalesce->tx_coalesce_usecs;
+ else
+ lif->tx_coalesce_usecs = coalesce->rx_coalesce_usecs;
+ if (tx_coal != lif->tx_coalesce_hw) {
+ lif->tx_coalesce_hw = tx_coal;
+
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
+ for (i = 0; i < lif->nxqs; i++) {
+ qcq = lif->txqcqs[i].qcq;
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ qcq->intr.index,
+ lif->tx_coalesce_hw);
+ }
+ }
+ }
+
return 0;
}
@@ -510,29 +532,63 @@ static void ionic_get_channels(struct net_device *netdev,
/* report maximum channels */
ch->max_combined = lif->ionic->ntxqs_per_lif;
+ ch->max_rx = lif->ionic->ntxqs_per_lif / 2;
+ ch->max_tx = lif->ionic->ntxqs_per_lif / 2;
/* report current channels */
- ch->combined_count = lif->nxqs;
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
+ ch->rx_count = lif->nxqs;
+ ch->tx_count = lif->nxqs;
+ } else {
+ ch->combined_count = lif->nxqs;
+ }
}
static void ionic_set_queuecount(struct ionic_lif *lif, void *arg)
{
struct ethtool_channels *ch = arg;
- lif->nxqs = ch->combined_count;
+ if (ch->combined_count) {
+ lif->nxqs = ch->combined_count;
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
+ clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
+ lif->tx_coalesce_hw = lif->rx_coalesce_hw;
+ netdev_info(lif->netdev, "Sharing queue interrupts\n");
+ }
+ } else {
+ lif->nxqs = ch->rx_count;
+ if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
+ set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ netdev_info(lif->netdev, "Splitting queue interrupts\n");
+ }
+ }
}
static int ionic_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ int new_cnt;
- if (!ch->combined_count || ch->other_count ||
- ch->rx_count || ch->tx_count)
+ if (ch->rx_count != ch->tx_count) {
+ netdev_info(netdev, "The rx and tx count must be equal\n");
return -EINVAL;
+ }
- if (ch->combined_count == lif->nxqs)
- return 0;
+ if (ch->combined_count && ch->rx_count) {
+ netdev_info(netdev, "Use either combined_count or rx/tx_count, not both\n");
+ return -EINVAL;
+ }
+
+ if (ch->combined_count)
+ new_cnt = ch->combined_count;
+ else
+ new_cnt = ch->rx_count;
+
+ if (lif->nxqs != new_cnt)
+ netdev_info(netdev, "Changing queue count from %d to %d\n",
+ lif->nxqs, new_cnt);
return ionic_reset_queues(lif, ionic_set_queuecount, ch);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 85e4cf229a96..1944bf5264db 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -616,7 +616,6 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_SG),
- .intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index),
.pid = cpu_to_le16(q->pid),
.ring_size = ilog2(q->num_descs),
.ring_base = cpu_to_le64(q->base_pa),
@@ -624,14 +623,22 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.sg_ring_base = cpu_to_le64(q->sg_base_pa),
},
};
+ unsigned int intr_index;
int err;
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ intr_index = qcq->intr.index;
+ else
+ intr_index = lif->rxqcqs[q->index].qcq->intr.index;
+ ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
+
dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
+ dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
q->tail = q->info;
q->head = q->tail;
@@ -648,6 +655,10 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi,
+ NAPI_POLL_WEIGHT);
+
qcq->flags |= IONIC_QCQ_F_INITED;
return 0;
@@ -684,6 +695,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
+ dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
q->tail = q->info;
q->head = q->tail;
@@ -700,8 +712,12 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
- netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
- NAPI_POLL_WEIGHT);
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
+ NAPI_POLL_WEIGHT);
+ else
+ netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi,
+ NAPI_POLL_WEIGHT);
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -1537,6 +1553,8 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ flags |= IONIC_QCQ_F_INTR;
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
lif->ntxq_descs,
@@ -1547,6 +1565,11 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
if (err)
goto err_out;
+ if (flags & IONIC_QCQ_F_INTR)
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->txqcqs[i].qcq->intr.index,
+ lif->tx_coalesce_hw);
+
lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq);
}
@@ -1562,13 +1585,15 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
if (err)
goto err_out;
- lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
-
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
lif->rxqcqs[i].qcq->intr.index,
lif->rx_coalesce_hw);
- ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
- lif->txqcqs[i].qcq);
+
+ if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
+ lif->txqcqs[i].qcq);
+
+ lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq);
}
@@ -2065,11 +2090,14 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->index = index;
lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
+ lif->tx_budget = IONIC_TX_BUDGET_DEFAULT;
/* Convert the default coalesce value to actual hw resolution */
lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
lif->rx_coalesce_usecs);
+ lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
+ lif->tx_coalesce_hw = lif->rx_coalesce_hw;
snprintf(lif->name, sizeof(lif->name), "lif%u", index);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index edad17d7aeeb..1ee3b14c8d50 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -13,6 +13,7 @@
#define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1)
#define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1)
#define IONIC_RX_COPYBREAK_DEFAULT 256
+#define IONIC_TX_BUDGET_DEFAULT 256
struct ionic_tx_stats {
u64 dma_map_err;
@@ -136,6 +137,7 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_UP,
IONIC_LIF_F_LINK_CHECK_REQUESTED,
IONIC_LIF_F_FW_RESET,
+ IONIC_LIF_F_SPLIT_INTR,
/* leave this as last */
IONIC_LIF_F_STATE_SIZE
@@ -176,6 +178,7 @@ struct ionic_lif {
unsigned int ntxq_descs;
unsigned int nrxq_descs;
u32 rx_copybreak;
+ u32 tx_budget;
unsigned int rx_mode;
u64 hw_features;
bool mc_overflow;
@@ -203,6 +206,8 @@ struct ionic_lif {
struct dentry *dentry;
u32 rx_coalesce_usecs; /* what the user asked for */
u32 rx_coalesce_hw; /* what the hw is using */
+ u32 tx_coalesce_usecs; /* what the user asked for */
+ u32 tx_coalesce_hw; /* what the hw is using */
struct work_struct tx_timeout_work;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 85eb8f276a37..8107d32c2767 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -15,6 +15,10 @@ static void ionic_rx_clean(struct ionic_queue *q,
struct ionic_cq_info *cq_info,
void *cb_arg);
+static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
+
+static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
+
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
@@ -249,29 +253,13 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return true;
}
-static u32 ionic_rx_walk_cq(struct ionic_cq *rxcq, u32 limit)
-{
- u32 work_done = 0;
-
- while (ionic_rx_service(rxcq, rxcq->tail)) {
- if (rxcq->tail->last)
- rxcq->done_color = !rxcq->done_color;
- rxcq->tail = rxcq->tail->next;
- DEBUG_STATS_CQE_CNT(rxcq);
-
- if (++work_done >= limit)
- break;
- }
-
- return work_done;
-}
-
void ionic_rx_flush(struct ionic_cq *cq)
{
struct ionic_dev *idev = &cq->lif->ionic->idev;
u32 work_done;
- work_done = ionic_rx_walk_cq(cq, cq->num_descs);
+ work_done = ionic_cq_service(cq, cq->num_descs,
+ ionic_rx_service, NULL, NULL);
if (work_done)
ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
@@ -331,9 +319,6 @@ static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
__free_page(page);
}
-#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 5) - 1)
-#define IONIC_RX_RING_HEAD_BUF_SZ 2048
-
void ionic_rx_fill(struct ionic_queue *q)
{
struct net_device *netdev = q->lif->netdev;
@@ -345,7 +330,6 @@ void ionic_rx_fill(struct ionic_queue *q)
unsigned int remain_len;
unsigned int seg_len;
unsigned int nfrags;
- bool ring_doorbell;
unsigned int i, j;
unsigned int len;
@@ -360,9 +344,7 @@ void ionic_rx_fill(struct ionic_queue *q)
page_info = &desc_info->pages[0];
if (page_info->page) { /* recycle the buffer */
- ring_doorbell = ((q->head->index + 1) &
- IONIC_RX_RING_DOORBELL_STRIDE) == 0;
- ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
+ ionic_rxq_post(q, false, ionic_rx_clean, NULL);
continue;
}
@@ -401,10 +383,11 @@ void ionic_rx_fill(struct ionic_queue *q)
page_info++;
}
- ring_doorbell = ((q->head->index + 1) &
- IONIC_RX_RING_DOORBELL_STRIDE) == 0;
- ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL);
+ ionic_rxq_post(q, false, ionic_rx_clean, NULL);
}
+
+ ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+ q->dbval | q->head->index);
}
static void ionic_rx_fill_cb(void *arg)
@@ -436,40 +419,117 @@ void ionic_rx_empty(struct ionic_queue *q)
}
}
+int ionic_tx_napi(struct napi_struct *napi, int budget)
+{
+ struct ionic_qcq *qcq = napi_to_qcq(napi);
+ struct ionic_cq *cq = napi_to_cq(napi);
+ struct ionic_dev *idev;
+ struct ionic_lif *lif;
+ u32 work_done = 0;
+ u32 flags = 0;
+
+ lif = cq->bound_q->lif;
+ idev = &lif->ionic->idev;
+
+ work_done = ionic_cq_service(cq, budget,
+ ionic_tx_service, NULL, NULL);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ flags |= IONIC_INTR_CRED_UNMASK;
+ DEBUG_STATS_INTR_REARM(cq->bound_intr);
+ }
+
+ if (work_done || flags) {
+ flags |= IONIC_INTR_CRED_RESET_COALESCE;
+ ionic_intr_credits(idev->intr_ctrl,
+ cq->bound_intr->index,
+ work_done, flags);
+ }
+
+ DEBUG_STATS_NAPI_POLL(qcq, work_done);
+
+ return work_done;
+}
+
int ionic_rx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
+ struct ionic_cq *cq = napi_to_cq(napi);
+ struct ionic_dev *idev;
+ struct ionic_lif *lif;
+ u32 work_done = 0;
+ u32 flags = 0;
+
+ lif = cq->bound_q->lif;
+ idev = &lif->ionic->idev;
+
+ work_done = ionic_cq_service(cq, budget,
+ ionic_rx_service, NULL, NULL);
+
+ if (work_done)
+ ionic_rx_fill(cq->bound_q);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ flags |= IONIC_INTR_CRED_UNMASK;
+ DEBUG_STATS_INTR_REARM(cq->bound_intr);
+ }
+
+ if (work_done || flags) {
+ flags |= IONIC_INTR_CRED_RESET_COALESCE;
+ ionic_intr_credits(idev->intr_ctrl,
+ cq->bound_intr->index,
+ work_done, flags);
+ }
+
+ DEBUG_STATS_NAPI_POLL(qcq, work_done);
+
+ return work_done;
+}
+
+int ionic_txrx_napi(struct napi_struct *napi, int budget)
+{
+ struct ionic_qcq *qcq = napi_to_qcq(napi);
struct ionic_cq *rxcq = napi_to_cq(napi);
unsigned int qi = rxcq->bound_q->index;
struct ionic_dev *idev;
struct ionic_lif *lif;
struct ionic_cq *txcq;
+ u32 rx_work_done = 0;
+ u32 tx_work_done = 0;
u32 work_done = 0;
u32 flags = 0;
+ bool unmask;
lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev;
txcq = &lif->txqcqs[qi].qcq->cq;
- ionic_tx_flush(txcq);
+ tx_work_done = ionic_cq_service(txcq, lif->tx_budget,
+ ionic_tx_service, NULL, NULL);
- work_done = ionic_rx_walk_cq(rxcq, budget);
-
- if (work_done)
+ rx_work_done = ionic_cq_service(rxcq, budget,
+ ionic_rx_service, NULL, NULL);
+ if (rx_work_done)
ionic_rx_fill_cb(rxcq->bound_q);
- if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unmask = (rx_work_done < budget) && (tx_work_done < lif->tx_budget);
+
+ if (unmask && napi_complete_done(napi, rx_work_done)) {
flags |= IONIC_INTR_CRED_UNMASK;
DEBUG_STATS_INTR_REARM(rxcq->bound_intr);
+ work_done = rx_work_done;
+ } else {
+ work_done = budget;
}
if (work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE;
ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
- work_done, flags);
+ tx_work_done + rx_work_done, flags);
}
- DEBUG_STATS_NAPI_POLL(qcq, work_done);
+ DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
+ DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
return work_done;
}
@@ -557,43 +617,39 @@ static void ionic_tx_clean(struct ionic_queue *q,
}
}
-void ionic_tx_flush(struct ionic_cq *cq)
+static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
{
- struct ionic_txq_comp *comp = cq->tail->cq_desc;
- struct ionic_dev *idev = &cq->lif->ionic->idev;
+ struct ionic_txq_comp *comp = cq_info->cq_desc;
struct ionic_queue *q = cq->bound_q;
struct ionic_desc_info *desc_info;
- unsigned int work_done = 0;
-
- /* walk the completed cq entries */
- while (work_done < cq->num_descs &&
- color_match(comp->color, cq->done_color)) {
-
- /* clean the related q entries, there could be
- * several q entries completed for each cq completion
- */
- do {
- desc_info = q->tail;
- q->tail = desc_info->next;
- ionic_tx_clean(q, desc_info, cq->tail,
- desc_info->cb_arg);
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
- } while (desc_info->index != le16_to_cpu(comp->comp_index));
-
- if (cq->tail->last)
- cq->done_color = !cq->done_color;
-
- cq->tail = cq->tail->next;
- comp = cq->tail->cq_desc;
- DEBUG_STATS_CQE_CNT(cq);
-
- work_done++;
- }
+ if (!color_match(comp->color, cq->done_color))
+ return false;
+
+ /* clean the related q entries, there could be
+ * several q entries completed for each cq completion
+ */
+ do {
+ desc_info = q->tail;
+ q->tail = desc_info->next;
+ ionic_tx_clean(q, desc_info, cq->tail, desc_info->cb_arg);
+ desc_info->cb = NULL;
+ desc_info->cb_arg = NULL;
+ } while (desc_info->index != le16_to_cpu(comp->comp_index));
+
+ return true;
+}
+
+void ionic_tx_flush(struct ionic_cq *cq)
+{
+ struct ionic_dev *idev = &cq->lif->ionic->idev;
+ u32 work_done;
+
+ work_done = ionic_cq_service(cq, cq->num_descs,
+ ionic_tx_service, NULL, NULL);
if (work_done)
ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
- work_done, 0);
+ work_done, IONIC_INTR_CRED_RESET_COALESCE);
}
void ionic_tx_empty(struct ionic_queue *q)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index 71973e3c35a6..a5883be0413f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -11,6 +11,8 @@ void ionic_rx_fill(struct ionic_queue *q);
void ionic_rx_empty(struct ionic_queue *q);
void ionic_tx_empty(struct ionic_queue *q);
int ionic_rx_napi(struct napi_struct *napi, int budget);
+int ionic_tx_napi(struct napi_struct *napi, int budget);
+int ionic_txrx_napi(struct napi_struct *napi, int budget);
netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev);
#endif /* _IONIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 9489089706fe..f1f75b6d0421 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -5038,8 +5038,7 @@ static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
if (ether_addr_equal(vf->shadow_config.macs[i],
vf_info->mac)) {
- memset(vf->shadow_config.macs[i], 0,
- ETH_ALEN);
+ eth_zero_addr(vf->shadow_config.macs[i]);
DP_VERBOSE(hwfn, QED_MSG_IOV,
"Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n",
vf_info->mac, vf_id);
@@ -5048,7 +5047,7 @@ static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
}
ether_addr_copy(vf_info->mac, force_mac);
- memset(vf_info->forced_mac, 0, ETH_ALEN);
+ eth_zero_addr(vf_info->forced_mac);
vf->bulletin.p_virt->valid_bitmap &=
~BIT(MAC_ADDR_FORCED);
qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
@@ -5059,7 +5058,7 @@ static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
if (!vf_info->is_trusted_configured) {
u8 empty_mac[ETH_ALEN];
- memset(empty_mac, 0, ETH_ALEN);
+ eth_zero_addr(empty_mac);
for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
if (ether_addr_equal(vf->shadow_config.macs[i],
empty_mac)) {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 4250c17940c0..140a392a81bb 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -2674,8 +2674,8 @@ static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
data->feat_flags |= QED_TLV_LSO;
ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
- memset(data->mac[1], 0, ETH_ALEN);
- memset(data->mac[2], 0, ETH_ALEN);
+ eth_zero_addr(data->mac[1]);
+ eth_zero_addr(data->mac[2]);
/* Copy the first two UC macs */
netif_addr_lock_bh(edev->ndev);
i = 1;
diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c
index de611c0f94e7..9729983f4840 100644
--- a/drivers/net/ethernet/sfc/ef100.c
+++ b/drivers/net/ethernet/sfc/ef100.c
@@ -527,6 +527,8 @@ fail:
static const struct pci_device_id ef100_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x0100), /* Riverhead PF */
.driver_data = (unsigned long) &ef100_pf_nic_type },
+ {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x1100), /* Riverhead VF */
+ .driver_data = (unsigned long) &ef100_vf_nic_type },
{0} /* end of list */
};
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index ec9ca81fed85..63c311ba28b9 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -86,9 +86,11 @@ static int ef100_net_stop(struct net_device *net_dev)
netif_stop_queue(net_dev);
efx_stop_all(efx);
+ efx_mcdi_mac_fini_stats(efx);
efx_disable_interrupts(efx);
efx_clear_interrupt_affinity(efx);
efx_nic_fini_interrupt(efx);
+ efx_remove_filters(efx);
efx_fini_napi(efx);
efx_remove_channels(efx);
efx_mcdi_free_vis(efx);
@@ -138,6 +140,10 @@ static int ef100_net_open(struct net_device *net_dev)
efx_init_napi(efx);
+ rc = efx_probe_filters(efx);
+ if (rc)
+ goto fail;
+
rc = efx_nic_init_interrupt(efx);
if (rc)
goto fail;
@@ -152,6 +158,10 @@ static int ef100_net_open(struct net_device *net_dev)
*/
(void) efx_mcdi_poll_reboot(efx);
+ rc = efx_mcdi_mac_init_stats(efx);
+ if (rc)
+ goto fail;
+
efx_start_all(efx);
/* Link state detection is normally event-driven; we have
@@ -207,8 +217,14 @@ static const struct net_device_ops ef100_netdev_ops = {
.ndo_open = ef100_net_open,
.ndo_stop = ef100_net_stop,
.ndo_start_xmit = ef100_hard_start_xmit,
+ .ndo_get_stats64 = efx_net_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = efx_set_rx_mode, /* Lookout */
.ndo_get_phys_port_id = efx_get_phys_port_id,
.ndo_get_phys_port_name = efx_get_phys_port_name,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = efx_filter_rfs,
+#endif
};
/* Netdev registration
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 6a00f2a2dc2b..8a2126fec078 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -260,9 +260,16 @@ static int ef100_ev_process(struct efx_channel *channel, int quota)
ev_type = EFX_QWORD_FIELD(*p_event, ESF_GZ_E_TYPE);
switch (ev_type) {
+ case ESE_GZ_EF100_EV_RX_PKTS:
+ efx_ef100_ev_rx(channel, p_event);
+ ++spent;
+ break;
case ESE_GZ_EF100_EV_MCDI:
efx_mcdi_process_event(channel, p_event);
break;
+ case ESE_GZ_EF100_EV_TX_COMPLETION:
+ ef100_ev_tx(channel, p_event);
+ break;
case ESE_GZ_EF100_EV_DRIVER:
netif_info(efx, drv, efx->net_dev,
"Driver initiated event " EFX_QWORD_FMT "\n",
@@ -344,6 +351,37 @@ static int ef100_phy_probe(struct efx_nic *efx)
return 0;
}
+static int ef100_filter_table_probe(struct efx_nic *efx)
+{
+ return efx_mcdi_filter_table_probe(efx, true);
+}
+
+static int ef100_filter_table_up(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
+ if (rc) {
+ efx_mcdi_filter_table_down(efx);
+ return rc;
+ }
+
+ rc = efx_mcdi_filter_add_vlan(efx, 0);
+ if (rc) {
+ efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
+ efx_mcdi_filter_table_down(efx);
+ }
+
+ return rc;
+}
+
+static void ef100_filter_table_down(struct efx_nic *efx)
+{
+ efx_mcdi_filter_del_vlan(efx, 0);
+ efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
+ efx_mcdi_filter_table_down(efx);
+}
+
/* Other
*/
static int ef100_reconfigure_mac(struct efx_nic *efx, bool mtu_only)
@@ -390,12 +428,24 @@ static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type)
__clear_bit(reset_type, &efx->reset_pending);
rc = dev_open(efx->net_dev, NULL);
} else if (reset_type == RESET_TYPE_ALL) {
+ /* A RESET_TYPE_ALL will cause filters to be removed, so we remove filters
+ * and reprobe after reset to avoid removing filters twice
+ */
+ down_read(&efx->filter_sem);
+ ef100_filter_table_down(efx);
+ up_read(&efx->filter_sem);
rc = efx_mcdi_reset(efx, reset_type);
if (rc)
return rc;
netif_device_attach(efx->net_dev);
+ down_read(&efx->filter_sem);
+ rc = ef100_filter_table_up(efx);
+ up_read(&efx->filter_sem);
+ if (rc)
+ return rc;
+
rc = dev_open(efx->net_dev, NULL);
} else {
rc = 1; /* Leave the device closed */
@@ -403,6 +453,171 @@ static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type)
return rc;
}
+static void ef100_common_stat_mask(unsigned long *mask)
+{
+ __set_bit(EF100_STAT_port_rx_packets, mask);
+ __set_bit(EF100_STAT_port_tx_packets, mask);
+ __set_bit(EF100_STAT_port_rx_bytes, mask);
+ __set_bit(EF100_STAT_port_tx_bytes, mask);
+ __set_bit(EF100_STAT_port_rx_multicast, mask);
+ __set_bit(EF100_STAT_port_rx_bad, mask);
+ __set_bit(EF100_STAT_port_rx_align_error, mask);
+ __set_bit(EF100_STAT_port_rx_overflow, mask);
+}
+
+static void ef100_ethtool_stat_mask(unsigned long *mask)
+{
+ __set_bit(EF100_STAT_port_tx_pause, mask);
+ __set_bit(EF100_STAT_port_tx_unicast, mask);
+ __set_bit(EF100_STAT_port_tx_multicast, mask);
+ __set_bit(EF100_STAT_port_tx_broadcast, mask);
+ __set_bit(EF100_STAT_port_tx_lt64, mask);
+ __set_bit(EF100_STAT_port_tx_64, mask);
+ __set_bit(EF100_STAT_port_tx_65_to_127, mask);
+ __set_bit(EF100_STAT_port_tx_128_to_255, mask);
+ __set_bit(EF100_STAT_port_tx_256_to_511, mask);
+ __set_bit(EF100_STAT_port_tx_512_to_1023, mask);
+ __set_bit(EF100_STAT_port_tx_1024_to_15xx, mask);
+ __set_bit(EF100_STAT_port_tx_15xx_to_jumbo, mask);
+ __set_bit(EF100_STAT_port_rx_good, mask);
+ __set_bit(EF100_STAT_port_rx_pause, mask);
+ __set_bit(EF100_STAT_port_rx_unicast, mask);
+ __set_bit(EF100_STAT_port_rx_broadcast, mask);
+ __set_bit(EF100_STAT_port_rx_lt64, mask);
+ __set_bit(EF100_STAT_port_rx_64, mask);
+ __set_bit(EF100_STAT_port_rx_65_to_127, mask);
+ __set_bit(EF100_STAT_port_rx_128_to_255, mask);
+ __set_bit(EF100_STAT_port_rx_256_to_511, mask);
+ __set_bit(EF100_STAT_port_rx_512_to_1023, mask);
+ __set_bit(EF100_STAT_port_rx_1024_to_15xx, mask);
+ __set_bit(EF100_STAT_port_rx_15xx_to_jumbo, mask);
+ __set_bit(EF100_STAT_port_rx_gtjumbo, mask);
+ __set_bit(EF100_STAT_port_rx_bad_gtjumbo, mask);
+ __set_bit(EF100_STAT_port_rx_length_error, mask);
+ __set_bit(EF100_STAT_port_rx_nodesc_drops, mask);
+ __set_bit(GENERIC_STAT_rx_nodesc_trunc, mask);
+ __set_bit(GENERIC_STAT_rx_noskb_drops, mask);
+}
+
+#define EF100_DMA_STAT(ext_name, mcdi_name) \
+ [EF100_STAT_ ## ext_name] = \
+ { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+
+static const struct efx_hw_stat_desc ef100_stat_desc[EF100_STAT_COUNT] = {
+ EF100_DMA_STAT(port_tx_bytes, TX_BYTES),
+ EF100_DMA_STAT(port_tx_packets, TX_PKTS),
+ EF100_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
+ EF100_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
+ EF100_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
+ EF100_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
+ EF100_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
+ EF100_DMA_STAT(port_tx_64, TX_64_PKTS),
+ EF100_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
+ EF100_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
+ EF100_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
+ EF100_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
+ EF100_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ EF100_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ EF100_DMA_STAT(port_rx_bytes, RX_BYTES),
+ EF100_DMA_STAT(port_rx_packets, RX_PKTS),
+ EF100_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
+ EF100_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
+ EF100_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
+ EF100_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
+ EF100_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
+ EF100_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
+ EF100_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
+ EF100_DMA_STAT(port_rx_64, RX_64_PKTS),
+ EF100_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
+ EF100_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
+ EF100_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
+ EF100_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
+ EF100_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ EF100_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ EF100_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
+ EF100_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
+ EF100_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
+ EF100_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
+ EF100_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
+ EF100_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
+ EFX_GENERIC_SW_STAT(rx_nodesc_trunc),
+ EFX_GENERIC_SW_STAT(rx_noskb_drops),
+};
+
+static size_t ef100_describe_stats(struct efx_nic *efx, u8 *names)
+{
+ DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {};
+
+ ef100_ethtool_stat_mask(mask);
+ return efx_nic_describe_stats(ef100_stat_desc, EF100_STAT_COUNT,
+ mask, names);
+}
+
+static size_t ef100_update_stats_common(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {};
+ size_t stats_count = 0, index;
+ u64 *stats = nic_data->stats;
+
+ ef100_ethtool_stat_mask(mask);
+
+ if (full_stats) {
+ for_each_set_bit(index, mask, EF100_STAT_COUNT) {
+ if (ef100_stat_desc[index].name) {
+ *full_stats++ = stats[index];
+ ++stats_count;
+ }
+ }
+ }
+
+ if (!core_stats)
+ return stats_count;
+
+ core_stats->rx_packets = stats[EF100_STAT_port_rx_packets];
+ core_stats->tx_packets = stats[EF100_STAT_port_tx_packets];
+ core_stats->rx_bytes = stats[EF100_STAT_port_rx_bytes];
+ core_stats->tx_bytes = stats[EF100_STAT_port_tx_bytes];
+ core_stats->rx_dropped = stats[EF100_STAT_port_rx_nodesc_drops] +
+ stats[GENERIC_STAT_rx_nodesc_trunc] +
+ stats[GENERIC_STAT_rx_noskb_drops];
+ core_stats->multicast = stats[EF100_STAT_port_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[EF100_STAT_port_rx_gtjumbo] +
+ stats[EF100_STAT_port_rx_length_error];
+ core_stats->rx_crc_errors = stats[EF100_STAT_port_rx_bad];
+ core_stats->rx_frame_errors =
+ stats[EF100_STAT_port_rx_align_error];
+ core_stats->rx_fifo_errors = stats[EF100_STAT_port_rx_overflow];
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors);
+
+ return stats_count;
+}
+
+static size_t ef100_update_stats(struct efx_nic *efx,
+ u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ __le64 *mc_stats = kmalloc(array_size(efx->num_mac_stats, sizeof(__le64)), GFP_ATOMIC);
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {};
+ u64 *stats = nic_data->stats;
+
+ ef100_common_stat_mask(mask);
+ ef100_ethtool_stat_mask(mask);
+
+ efx_nic_copy_stats(efx, mc_stats);
+ efx_nic_update_stats(ef100_stat_desc, EF100_STAT_COUNT, mask,
+ stats, mc_stats, false);
+
+ kfree(mc_stats);
+
+ return ef100_update_stats_common(efx, full_stats, core_stats);
+}
+
static int efx_ef100_get_phys_port_id(struct efx_nic *efx,
struct netdev_phys_item_id *ppid)
{
@@ -417,6 +632,50 @@ static int efx_ef100_get_phys_port_id(struct efx_nic *efx,
return 0;
}
+static int efx_ef100_irq_test_generate(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
+ return efx_mcdi_rpc_quiet(efx, MC_CMD_TRIGGER_INTERRUPT,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+}
+
+#define EFX_EF100_TEST 1
+
+static void efx_ef100_ev_test_generate(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
+ struct efx_nic *efx = channel->efx;
+ efx_qword_t event;
+ int rc;
+
+ EFX_POPULATE_QWORD_2(event,
+ ESF_GZ_E_TYPE, ESE_GZ_EF100_EV_DRIVER,
+ ESF_GZ_DRIVER_DATA, EFX_EF100_TEST);
+
+ MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
+
+ /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
+ * already swapped the data to little-endian order.
+ */
+ memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
+ sizeof(efx_qword_t));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc && (rc != -ENETDOWN))
+ goto fail;
+
+ return;
+
+fail:
+ WARN_ON(true);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
static unsigned int ef100_check_caps(const struct efx_nic *efx,
u8 flag, u32 offset)
{
@@ -436,10 +695,16 @@ static unsigned int ef100_check_caps(const struct efx_nic *efx,
/* NIC level access functions
*/
+#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
+ NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | \
+ NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
+ NETIF_F_TSO_MANGLEID | NETIF_F_HW_VLAN_CTAG_TX)
+
const struct efx_nic_type ef100_pf_nic_type = {
.revision = EFX_REV_EF100,
.is_vf = false,
.probe = ef100_probe_pf,
+ .offload_features = EF100_OFFLOAD_FEATURES,
.mcdi_max_ver = 2,
.mcdi_request = ef100_mcdi_request,
.mcdi_poll_response = ef100_mcdi_poll_response,
@@ -447,6 +712,7 @@ const struct efx_nic_type ef100_pf_nic_type = {
.mcdi_poll_reboot = ef100_mcdi_poll_reboot,
.mcdi_reboot_detected = ef100_mcdi_reboot_detected,
.irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef100_irq_test_generate,
.irq_disable_non_ev = efx_port_dummy_op_void,
.push_irq_moderation = efx_channel_dummy_op_void,
.min_interrupt_mode = EFX_INT_MODE_MSIX,
@@ -463,6 +729,7 @@ const struct efx_nic_type ef100_pf_nic_type = {
.irq_handle_msi = ef100_msi_interrupt,
.ev_process = ef100_ev_process,
.ev_read_ack = ef100_ev_read_ack,
+ .ev_test_generate = efx_ef100_ev_test_generate,
.tx_probe = ef100_tx_probe,
.tx_init = ef100_tx_init,
.tx_write = ef100_tx_write,
@@ -472,10 +739,41 @@ const struct efx_nic_type ef100_pf_nic_type = {
.rx_remove = efx_mcdi_rx_remove,
.rx_write = ef100_rx_write,
.rx_packet = __ef100_rx_packet,
+ .fini_dmaq = efx_fini_dmaq,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .filter_table_probe = ef100_filter_table_up,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = ef100_filter_table_down,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+#endif
.get_phys_port_id = efx_ef100_get_phys_port_id,
+ .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
+ .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
+ .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
+ .rx_hash_key_size = 40,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
+ .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+
.reconfigure_mac = ef100_reconfigure_mac,
+ .test_nvram = efx_new_mcdi_nvram_test_all,
+ .describe_stats = ef100_describe_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .update_stats = ef100_update_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
/* Per-type bar/size configuration not used on ef100. Location of
* registers is defined by extended capabilities.
@@ -485,6 +783,310 @@ const struct efx_nic_type ef100_pf_nic_type = {
};
+const struct efx_nic_type ef100_vf_nic_type = {
+ .revision = EFX_REV_EF100,
+ .is_vf = true,
+ .probe = ef100_probe_vf,
+ .offload_features = EF100_OFFLOAD_FEATURES,
+ .mcdi_max_ver = 2,
+ .mcdi_request = ef100_mcdi_request,
+ .mcdi_poll_response = ef100_mcdi_poll_response,
+ .mcdi_read_response = ef100_mcdi_read_response,
+ .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
+ .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef100_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .push_irq_moderation = efx_channel_dummy_op_void,
+ .min_interrupt_mode = EFX_INT_MODE_MSIX,
+ .map_reset_reason = ef100_map_reset_reason,
+ .map_reset_flags = ef100_map_reset_flags,
+ .reset = ef100_reset,
+ .check_caps = ef100_check_caps,
+ .ev_probe = ef100_ev_probe,
+ .ev_init = ef100_ev_init,
+ .ev_fini = efx_mcdi_ev_fini,
+ .ev_remove = efx_mcdi_ev_remove,
+ .irq_handle_msi = ef100_msi_interrupt,
+ .ev_process = ef100_ev_process,
+ .ev_read_ack = ef100_ev_read_ack,
+ .ev_test_generate = efx_ef100_ev_test_generate,
+ .tx_probe = ef100_tx_probe,
+ .tx_init = ef100_tx_init,
+ .tx_write = ef100_tx_write,
+ .tx_enqueue = ef100_enqueue_skb,
+ .rx_probe = efx_mcdi_rx_probe,
+ .rx_init = efx_mcdi_rx_init,
+ .rx_remove = efx_mcdi_rx_remove,
+ .rx_write = ef100_rx_write,
+ .rx_packet = __ef100_rx_packet,
+ .fini_dmaq = efx_fini_dmaq,
+ .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
+ .filter_table_probe = ef100_filter_table_up,
+ .filter_table_restore = efx_mcdi_filter_table_restore,
+ .filter_table_remove = ef100_filter_table_down,
+ .filter_insert = efx_mcdi_filter_insert,
+ .filter_remove_safe = efx_mcdi_filter_remove_safe,
+ .filter_get_safe = efx_mcdi_filter_get_safe,
+ .filter_clear_rx = efx_mcdi_filter_clear_rx,
+ .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
+ .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
+
+ .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
+ .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
+ .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
+ .rx_hash_key_size = 40,
+ .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
+ .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
+ .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
+
+ .reconfigure_mac = ef100_reconfigure_mac,
+ .test_nvram = efx_new_mcdi_nvram_test_all,
+ .describe_stats = ef100_describe_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .update_stats = ef100_update_stats,
+ .pull_stats = efx_mcdi_mac_pull_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+
+ .mem_bar = NULL,
+ .mem_map_size = NULL,
+
+};
+
+static int compare_versions(const char *a, const char *b)
+{
+ int a_major, a_minor, a_point, a_patch;
+ int b_major, b_minor, b_point, b_patch;
+ int a_matched, b_matched;
+
+ a_matched = sscanf(a, "%d.%d.%d.%d", &a_major, &a_minor, &a_point, &a_patch);
+ b_matched = sscanf(b, "%d.%d.%d.%d", &b_major, &b_minor, &b_point, &b_patch);
+
+ if (a_matched == 4 && b_matched != 4)
+ return +1;
+
+ if (a_matched != 4 && b_matched == 4)
+ return -1;
+
+ if (a_matched != 4 && b_matched != 4)
+ return 0;
+
+ if (a_major != b_major)
+ return a_major - b_major;
+
+ if (a_minor != b_minor)
+ return a_minor - b_minor;
+
+ if (a_point != b_point)
+ return a_point - b_point;
+
+ return a_patch - b_patch;
+}
+
+enum ef100_tlv_state_machine {
+ EF100_TLV_TYPE,
+ EF100_TLV_TYPE_CONT,
+ EF100_TLV_LENGTH,
+ EF100_TLV_VALUE
+};
+
+struct ef100_tlv_state {
+ enum ef100_tlv_state_machine state;
+ u64 value;
+ u32 value_offset;
+ u16 type;
+ u8 len;
+};
+
+static int ef100_tlv_feed(struct ef100_tlv_state *state, u8 byte)
+{
+ switch (state->state) {
+ case EF100_TLV_TYPE:
+ state->type = byte & 0x7f;
+ state->state = (byte & 0x80) ? EF100_TLV_TYPE_CONT
+ : EF100_TLV_LENGTH;
+ /* Clear ready to read in a new entry */
+ state->value = 0;
+ state->value_offset = 0;
+ return 0;
+ case EF100_TLV_TYPE_CONT:
+ state->type |= byte << 7;
+ state->state = EF100_TLV_LENGTH;
+ return 0;
+ case EF100_TLV_LENGTH:
+ state->len = byte;
+ /* We only handle TLVs that fit in a u64 */
+ if (state->len > sizeof(state->value))
+ return -EOPNOTSUPP;
+ /* len may be zero, implying a value of zero */
+ state->state = state->len ? EF100_TLV_VALUE : EF100_TLV_TYPE;
+ return 0;
+ case EF100_TLV_VALUE:
+ state->value |= ((u64)byte) << (state->value_offset * 8);
+ state->value_offset++;
+ if (state->value_offset >= state->len)
+ state->state = EF100_TLV_TYPE;
+ return 0;
+ default: /* state machine error, can't happen */
+ WARN_ON_ONCE(1);
+ return -EIO;
+ }
+}
+
+static int ef100_process_design_param(struct efx_nic *efx,
+ const struct ef100_tlv_state *reader)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+
+ switch (reader->type) {
+ case ESE_EF100_DP_GZ_PAD: /* padding, skip it */
+ return 0;
+ case ESE_EF100_DP_GZ_PARTIAL_TSTAMP_SUB_NANO_BITS:
+ /* Driver doesn't support timestamping yet, so we don't care */
+ return 0;
+ case ESE_EF100_DP_GZ_EVQ_UNSOL_CREDIT_SEQ_BITS:
+ /* Driver doesn't support unsolicited-event credits yet, so
+ * we don't care
+ */
+ return 0;
+ case ESE_EF100_DP_GZ_NMMU_GROUP_SIZE:
+ /* Driver doesn't manage the NMMU (so we don't care) */
+ return 0;
+ case ESE_EF100_DP_GZ_RX_L4_CSUM_PROTOCOLS:
+ /* Driver uses CHECKSUM_COMPLETE, so we don't care about
+ * protocol checksum validation
+ */
+ return 0;
+ case ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN:
+ nic_data->tso_max_hdr_len = min_t(u64, reader->value, 0xffff);
+ return 0;
+ case ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS:
+ /* We always put HDR_NUM_SEGS=1 in our TSO descriptors */
+ if (!reader->value) {
+ netif_err(efx, probe, efx->net_dev,
+ "TSO_MAX_HDR_NUM_SEGS < 1\n");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+ case ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY:
+ case ESE_EF100_DP_GZ_TXQ_SIZE_GRANULARITY:
+ /* Our TXQ and RXQ sizes are always power-of-two and thus divisible by
+ * EFX_MIN_DMAQ_SIZE, so we just need to check that
+ * EFX_MIN_DMAQ_SIZE is divisible by GRANULARITY.
+ * This is very unlikely to fail.
+ */
+ if (EFX_MIN_DMAQ_SIZE % reader->value) {
+ netif_err(efx, probe, efx->net_dev,
+ "%s size granularity is %llu, can't guarantee safety\n",
+ reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ",
+ reader->value);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+ case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN:
+ nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_MAX_SIZE);
+ efx->net_dev->gso_max_size = nic_data->tso_max_payload_len;
+ return 0;
+ case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS:
+ nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff);
+ efx->net_dev->gso_max_segs = nic_data->tso_max_payload_num_segs;
+ return 0;
+ case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES:
+ nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff);
+ return 0;
+ case ESE_EF100_DP_GZ_COMPAT:
+ if (reader->value) {
+ netif_err(efx, probe, efx->net_dev,
+ "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n",
+ reader->value);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+ case ESE_EF100_DP_GZ_MEM2MEM_MAX_LEN:
+ /* Driver doesn't use mem2mem transfers */
+ return 0;
+ case ESE_EF100_DP_GZ_EVQ_TIMER_TICK_NANOS:
+ /* Driver doesn't currently use EVQ_TIMER */
+ return 0;
+ case ESE_EF100_DP_GZ_NMMU_PAGE_SIZES:
+ /* Driver doesn't manage the NMMU (so we don't care) */
+ return 0;
+ case ESE_EF100_DP_GZ_VI_STRIDES:
+ /* We never try to set the VI stride, and we don't rely on
+ * being able to find VIs past VI 0 until after we've learned
+ * the current stride from MC_CMD_GET_CAPABILITIES.
+ * So the value of this shouldn't matter.
+ */
+ if (reader->value != ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT)
+ netif_dbg(efx, probe, efx->net_dev,
+ "NIC has other than default VI_STRIDES (mask "
+ "%#llx), early probing might use wrong one\n",
+ reader->value);
+ return 0;
+ case ESE_EF100_DP_GZ_RX_MAX_RUNT:
+ /* Driver doesn't look at L2_STATUS:LEN_ERR bit, so we don't
+ * care whether it indicates runt or overlength for any given
+ * packet, so we don't care about this parameter.
+ */
+ return 0;
+ default:
+ /* Host interface says "Drivers should ignore design parameters
+ * that they do not recognise."
+ */
+ netif_dbg(efx, probe, efx->net_dev,
+ "Ignoring unrecognised design parameter %u\n",
+ reader->type);
+ return 0;
+ }
+}
+
+static int ef100_check_design_params(struct efx_nic *efx)
+{
+ struct ef100_tlv_state reader = {};
+ u32 total_len, offset = 0;
+ efx_dword_t reg;
+ int rc = 0, i;
+ u32 data;
+
+ efx_readd(efx, &reg, ER_GZ_PARAMS_TLV_LEN);
+ total_len = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
+ netif_dbg(efx, probe, efx->net_dev, "%u bytes of design parameters\n",
+ total_len);
+ while (offset < total_len) {
+ efx_readd(efx, &reg, ER_GZ_PARAMS_TLV + offset);
+ data = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
+ for (i = 0; i < sizeof(data); i++) {
+ rc = ef100_tlv_feed(&reader, data);
+ /* Got a complete value? */
+ if (!rc && reader.state == EF100_TLV_TYPE)
+ rc = ef100_process_design_param(efx, &reader);
+ if (rc)
+ goto out;
+ data >>= 8;
+ offset++;
+ }
+ }
+ /* Check we didn't end halfway through a TLV entry, which could either
+ * mean that the TLV stream is truncated or just that it's corrupted
+ * and our state machine is out of sync.
+ */
+ if (reader.state != EF100_TLV_TYPE) {
+ if (reader.state == EF100_TLV_TYPE_CONT)
+ netif_err(efx, probe, efx->net_dev,
+ "truncated design parameter (incomplete type %u)\n",
+ reader.type);
+ else
+ netif_err(efx, probe, efx->net_dev,
+ "truncated design parameter %u\n",
+ reader.type);
+ rc = -EIO;
+ }
+out:
+ return rc;
+}
+
/* NIC probe and remove
*/
static int ef100_probe_main(struct efx_nic *efx)
@@ -492,6 +1094,7 @@ static int ef100_probe_main(struct efx_nic *efx)
unsigned int bar_size = resource_size(&efx->pci_dev->resource[efx->mem_bar]);
struct net_device *net_dev = efx->net_dev;
struct ef100_nic_data *nic_data;
+ char fw_version[32];
int i, rc;
if (WARN_ON(bar_size == 0))
@@ -505,6 +1108,20 @@ static int ef100_probe_main(struct efx_nic *efx)
net_dev->features |= efx->type->offload_features;
net_dev->hw_features |= efx->type->offload_features;
+ /* Populate design-parameter defaults */
+ nic_data->tso_max_hdr_len = ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT;
+ nic_data->tso_max_frames = ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT;
+ nic_data->tso_max_payload_num_segs = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT;
+ nic_data->tso_max_payload_len = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT;
+ net_dev->gso_max_segs = ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT;
+ /* Read design parameters */
+ rc = ef100_check_design_params(efx);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Unsupported design parameters\n");
+ goto fail;
+ }
+
/* we assume later that we can copy from this buffer in dwords */
BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
@@ -551,6 +1168,10 @@ static int ef100_probe_main(struct efx_nic *efx)
if (rc)
goto fail;
+ rc = efx_get_pf_index(efx, &nic_data->pf_index);
+ if (rc)
+ goto fail;
+
rc = efx_ef100_init_datapath_caps(efx);
if (rc < 0)
goto fail;
@@ -562,6 +1183,21 @@ static int ef100_probe_main(struct efx_nic *efx)
goto fail;
efx->port_num = rc;
+ efx_mcdi_print_fwver(efx, fw_version, sizeof(fw_version));
+ netif_dbg(efx, drv, efx->net_dev, "Firmware version %s\n", fw_version);
+
+ if (compare_versions(fw_version, "1.1.0.1000") < 0) {
+ netif_info(efx, drv, efx->net_dev, "Firmware uses old event descriptors\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ if (efx_has_cap(efx, UNSOL_EV_CREDIT_SUPPORTED)) {
+ netif_info(efx, drv, efx->net_dev, "Firmware uses unsolicited-event credits\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+
rc = ef100_phy_probe(efx);
if (rc)
goto fail;
@@ -570,6 +1206,18 @@ static int ef100_probe_main(struct efx_nic *efx)
if (rc)
goto fail;
+ down_write(&efx->filter_sem);
+ rc = ef100_filter_table_probe(efx);
+ up_write(&efx->filter_sem);
+ if (rc)
+ goto fail;
+
+ netdev_rss_key_fill(efx->rss_context.rx_hash_key,
+ sizeof(efx->rss_context.rx_hash_key));
+
+ /* Don't fail init if RSS setup doesn't work. */
+ efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
+
rc = ef100_register_netdev(efx);
if (rc)
goto fail;
@@ -602,11 +1250,20 @@ fail:
return rc;
}
+int ef100_probe_vf(struct efx_nic *efx)
+{
+ return ef100_probe_main(efx);
+}
+
void ef100_remove(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
ef100_unregister_netdev(efx);
+
+ down_write(&efx->filter_sem);
+ efx_mcdi_filter_table_remove(efx);
+ up_write(&efx->filter_sem);
efx_fini_channels(efx);
kfree(efx->phy_data);
efx->phy_data = NULL;
diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h
index 6367bbb2c9b3..e799688d5264 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.h
+++ b/drivers/net/ethernet/sfc/ef100_nic.h
@@ -13,19 +13,67 @@
#include "nic_common.h"
extern const struct efx_nic_type ef100_pf_nic_type;
+extern const struct efx_nic_type ef100_vf_nic_type;
int ef100_probe_pf(struct efx_nic *efx);
+int ef100_probe_vf(struct efx_nic *efx);
void ef100_remove(struct efx_nic *efx);
+enum {
+ EF100_STAT_port_tx_bytes = GENERIC_STAT_COUNT,
+ EF100_STAT_port_tx_packets,
+ EF100_STAT_port_tx_pause,
+ EF100_STAT_port_tx_unicast,
+ EF100_STAT_port_tx_multicast,
+ EF100_STAT_port_tx_broadcast,
+ EF100_STAT_port_tx_lt64,
+ EF100_STAT_port_tx_64,
+ EF100_STAT_port_tx_65_to_127,
+ EF100_STAT_port_tx_128_to_255,
+ EF100_STAT_port_tx_256_to_511,
+ EF100_STAT_port_tx_512_to_1023,
+ EF100_STAT_port_tx_1024_to_15xx,
+ EF100_STAT_port_tx_15xx_to_jumbo,
+ EF100_STAT_port_rx_bytes,
+ EF100_STAT_port_rx_packets,
+ EF100_STAT_port_rx_good,
+ EF100_STAT_port_rx_bad,
+ EF100_STAT_port_rx_pause,
+ EF100_STAT_port_rx_unicast,
+ EF100_STAT_port_rx_multicast,
+ EF100_STAT_port_rx_broadcast,
+ EF100_STAT_port_rx_lt64,
+ EF100_STAT_port_rx_64,
+ EF100_STAT_port_rx_65_to_127,
+ EF100_STAT_port_rx_128_to_255,
+ EF100_STAT_port_rx_256_to_511,
+ EF100_STAT_port_rx_512_to_1023,
+ EF100_STAT_port_rx_1024_to_15xx,
+ EF100_STAT_port_rx_15xx_to_jumbo,
+ EF100_STAT_port_rx_gtjumbo,
+ EF100_STAT_port_rx_bad_gtjumbo,
+ EF100_STAT_port_rx_align_error,
+ EF100_STAT_port_rx_length_error,
+ EF100_STAT_port_rx_overflow,
+ EF100_STAT_port_rx_nodesc_drops,
+ EF100_STAT_COUNT
+};
+
struct ef100_nic_data {
struct efx_nic *efx;
struct efx_buffer mcdi_buf;
u32 datapath_caps;
u32 datapath_caps2;
u32 datapath_caps3;
+ unsigned int pf_index;
u16 warm_boot_count;
u8 port_id[ETH_ALEN];
DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS);
+ u64 stats[EF100_STAT_COUNT];
+ u16 tso_max_hdr_len;
+ u16 tso_max_payload_num_segs;
+ u16 tso_max_frames;
+ unsigned int tso_max_payload_len;
};
#define efx_ef100_has_cap(caps, flag) \
diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c
index 4223a38f46d3..13ba1a4f66fc 100644
--- a/drivers/net/ethernet/sfc/ef100_rx.c
+++ b/drivers/net/ethernet/sfc/ef100_rx.c
@@ -12,20 +12,156 @@
#include "ef100_rx.h"
#include "rx_common.h"
#include "efx.h"
+#include "nic_common.h"
+#include "mcdi_functions.h"
+#include "ef100_regs.h"
+#include "ef100_nic.h"
+#include "io.h"
-/* RX stubs */
+/* Get the value of a field in the RX prefix */
+#define PREFIX_OFFSET_W(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN / 32)
+#define PREFIX_OFFSET_B(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN % 32)
+#define PREFIX_WIDTH_MASK(_f) ((1UL << ESF_GZ_RX_PREFIX_ ## _f ## _WIDTH) - 1)
+#define PREFIX_WORD(_p, _f) le32_to_cpu((__force __le32)(_p)[PREFIX_OFFSET_W(_f)])
+#define PREFIX_FIELD(_p, _f) ((PREFIX_WORD(_p, _f) >> PREFIX_OFFSET_B(_f)) & \
+ PREFIX_WIDTH_MASK(_f))
-void ef100_rx_write(struct efx_rx_queue *rx_queue)
+#define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_LBN \
+ (ESF_GZ_RX_PREFIX_CLASS_LBN + ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_LBN)
+#define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_WIDTH \
+ ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH
+
+static bool check_fcs(struct efx_channel *channel, u32 *prefix)
{
+ u16 rxclass;
+ u8 l2status;
+
+ rxclass = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, CLASS));
+ l2status = PREFIX_FIELD(&rxclass, HCLASS_L2_STATUS);
+
+ if (likely(l2status == ESE_GZ_RH_HCLASS_L2_STATUS_OK))
+ /* Everything is ok */
+ return 0;
+
+ if (l2status == ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR)
+ channel->n_rx_eth_crc_err++;
+ return 1;
}
void __ef100_rx_packet(struct efx_channel *channel)
{
- /* Stub. No RX path yet. Discard the buffer. */
- struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue,
- channel->rx_pkt_index);
- struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+ struct efx_nic *efx = channel->efx;
+ u8 *eh = efx_rx_buf_va(rx_buf);
+ __wsum csum = 0;
+ u32 *prefix;
+
+ prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
+
+ if (check_fcs(channel, prefix) &&
+ unlikely(!(efx->net_dev->features & NETIF_F_RXALL)))
+ goto out;
+
+ rx_buf->len = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, LENGTH));
+ if (rx_buf->len <= sizeof(struct ethhdr)) {
+ if (net_ratelimit())
+ netif_err(channel->efx, rx_err, channel->efx->net_dev,
+ "RX packet too small (%d)\n", rx_buf->len);
+ ++channel->n_rx_frm_trunc;
+ goto out;
+ }
+
+ if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) {
+ if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) {
+ ++channel->n_rx_ip_hdr_chksum_err;
+ } else {
+ u16 sum = be16_to_cpu((__force __be16)PREFIX_FIELD(prefix, CSUM_FRAME));
- efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ csum = (__force __wsum) sum;
+ }
+ }
+
+ if (channel->type->receive_skb) {
+ struct efx_rx_queue *rx_queue =
+ efx_channel_get_rx_queue(channel);
+
+ /* no support for special channels yet, so just discard */
+ WARN_ON_ONCE(1);
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ goto out;
+ }
+
+ efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
+
+out:
channel->rx_pkt_n_frags = 0;
}
+
+static void ef100_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+ struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue, index);
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_nic *efx = rx_queue->efx;
+
+ ++rx_queue->rx_packets;
+
+ netif_vdbg(efx, rx_status, efx->net_dev,
+ "RX queue %d received id %x\n",
+ efx_rx_queue_index(rx_queue), index);
+
+ efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
+
+ prefetch(efx_rx_buf_va(rx_buf));
+
+ rx_buf->page_offset += efx->rx_prefix_size;
+
+ efx_recycle_rx_pages(channel, rx_buf, 1);
+
+ efx_rx_flush_packet(channel);
+ channel->rx_pkt_n_frags = 1;
+ channel->rx_pkt_index = index;
+}
+
+void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ unsigned int n_packets =
+ EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_NUM_PKT);
+ int i;
+
+ WARN_ON_ONCE(!n_packets);
+ if (n_packets > 1)
+ ++channel->n_rx_merge_events;
+
+ channel->irq_mod_score += 2 * n_packets;
+
+ for (i = 0; i < n_packets; ++i) {
+ ef100_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask);
+ ++rx_queue->removed_count;
+ }
+}
+
+void ef100_rx_write(struct efx_rx_queue *rx_queue)
+{
+ struct efx_rx_buffer *rx_buf;
+ unsigned int idx;
+ efx_qword_t *rxd;
+ efx_dword_t rxdb;
+
+ while (rx_queue->notified_count != rx_queue->added_count) {
+ idx = rx_queue->notified_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, idx);
+ rxd = efx_rx_desc(rx_queue, idx);
+
+ EFX_POPULATE_QWORD_1(*rxd, ESF_GZ_RX_BUF_ADDR, rx_buf->dma_addr);
+
+ ++rx_queue->notified_count;
+ }
+
+ wmb();
+ EFX_POPULATE_DWORD_1(rxdb, ERF_GZ_RX_RING_PIDX,
+ rx_queue->added_count & rx_queue->ptr_mask);
+ efx_writed_page(rx_queue->efx, &rxdb,
+ ER_GZ_RX_RING_DOORBELL, efx_rx_queue_index(rx_queue));
+}
diff --git a/drivers/net/ethernet/sfc/ef100_rx.h b/drivers/net/ethernet/sfc/ef100_rx.h
index b5dadf741aa0..f2f266863966 100644
--- a/drivers/net/ethernet/sfc/ef100_rx.h
+++ b/drivers/net/ethernet/sfc/ef100_rx.h
@@ -14,6 +14,7 @@
#include "net_driver.h"
+void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event);
void ef100_rx_write(struct efx_rx_queue *rx_queue);
void __ef100_rx_packet(struct efx_channel *channel);
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index 15e646f8c3e0..a09546e43408 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -9,14 +9,24 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
+#include <net/ip6_checksum.h>
+
#include "net_driver.h"
#include "tx_common.h"
#include "nic_common.h"
+#include "mcdi_functions.h"
+#include "ef100_regs.h"
+#include "io.h"
#include "ef100_tx.h"
+#include "ef100_nic.h"
-/* TX queue stubs */
int ef100_tx_probe(struct efx_tx_queue *tx_queue)
{
+ /* Allocate an extra descriptor for the QMDA status completion entry */
+ return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
+ (tx_queue->ptr_mask + 2) *
+ sizeof(efx_oword_t),
+ GFP_KERNEL);
return 0;
}
@@ -27,10 +37,286 @@ void ef100_tx_init(struct efx_tx_queue *tx_queue)
netdev_get_tx_queue(tx_queue->efx->net_dev,
tx_queue->channel->channel -
tx_queue->efx->tx_channel_offset);
+
+ if (efx_mcdi_tx_init(tx_queue, false))
+ netdev_WARN(tx_queue->efx->net_dev,
+ "failed to initialise TXQ %d\n", tx_queue->queue);
+}
+
+static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct ef100_nic_data *nic_data;
+ struct efx_tx_buffer *buffer;
+ struct tcphdr *tcphdr;
+ struct iphdr *iphdr;
+ size_t header_len;
+ u32 mss;
+
+ nic_data = efx->nic_data;
+
+ if (!skb_is_gso_tcp(skb))
+ return false;
+ if (!(efx->net_dev->features & NETIF_F_TSO))
+ return false;
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (unlikely(mss < 4)) {
+ WARN_ONCE(1, "MSS of %u is too small for TSO\n", mss);
+ return false;
+ }
+
+ header_len = efx_tx_tso_header_length(skb);
+ if (header_len > nic_data->tso_max_hdr_len)
+ return false;
+
+ if (skb_shinfo(skb)->gso_segs > nic_data->tso_max_payload_num_segs) {
+ /* net_dev->gso_max_segs should've caught this */
+ WARN_ON_ONCE(1);
+ return false;
+ }
+
+ if (skb->data_len / mss > nic_data->tso_max_frames)
+ return false;
+
+ /* net_dev->gso_max_size should've caught this */
+ if (WARN_ON_ONCE(skb->data_len > nic_data->tso_max_payload_len))
+ return false;
+
+ /* Reserve an empty buffer for the TSO V3 descriptor.
+ * Convey the length of the header since we already know it.
+ */
+ buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+ buffer->flags = EFX_TX_BUF_TSO_V3 | EFX_TX_BUF_CONT;
+ buffer->len = header_len;
+ buffer->unmap_len = 0;
+ buffer->skb = skb;
+ ++tx_queue->insert_count;
+
+ /* Adjust the TCP checksum to exclude the total length, since we set
+ * ED_INNER_IP_LEN in the descriptor.
+ */
+ tcphdr = tcp_hdr(skb);
+ if (skb_is_gso_v6(skb)) {
+ tcphdr->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ } else {
+ iphdr = ip_hdr(skb);
+ tcphdr->check = ~csum_tcpudp_magic(iphdr->saddr, iphdr->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+ return true;
+}
+
+static efx_oword_t *ef100_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ if (likely(tx_queue->txd.buf.addr))
+ return ((efx_oword_t *)tx_queue->txd.buf.addr) + index;
+ else
+ return NULL;
+}
+
+void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+ unsigned int write_ptr;
+ efx_dword_t reg;
+
+ if (unlikely(tx_queue->notify_count == tx_queue->write_count))
+ return;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ /* The write pointer goes into the high word */
+ EFX_POPULATE_DWORD_1(reg, ERF_GZ_TX_RING_PIDX, write_ptr);
+ efx_writed_page(tx_queue->efx, &reg,
+ ER_GZ_TX_RING_DOORBELL, tx_queue->queue);
+ tx_queue->notify_count = tx_queue->write_count;
+ tx_queue->xmit_more_available = false;
+}
+
+static void ef100_tx_push_buffers(struct efx_tx_queue *tx_queue)
+{
+ ef100_notify_tx_desc(tx_queue);
+ ++tx_queue->pushes;
+}
+
+static void ef100_set_tx_csum_partial(const struct sk_buff *skb,
+ struct efx_tx_buffer *buffer, efx_oword_t *txd)
+{
+ efx_oword_t csum;
+ int csum_start;
+
+ if (!skb || skb->ip_summed != CHECKSUM_PARTIAL)
+ return;
+
+ /* skb->csum_start has the offset from head, but we need the offset
+ * from data.
+ */
+ csum_start = skb_checksum_start_offset(skb);
+ EFX_POPULATE_OWORD_3(csum,
+ ESF_GZ_TX_SEND_CSO_PARTIAL_EN, 1,
+ ESF_GZ_TX_SEND_CSO_PARTIAL_START_W,
+ csum_start >> 1,
+ ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W,
+ skb->csum_offset >> 1);
+ EFX_OR_OWORD(*txd, *txd, csum);
+}
+
+static void ef100_set_tx_hw_vlan(const struct sk_buff *skb, efx_oword_t *txd)
+{
+ u16 vlan_tci = skb_vlan_tag_get(skb);
+ efx_oword_t vlan;
+
+ EFX_POPULATE_OWORD_2(vlan,
+ ESF_GZ_TX_SEND_VLAN_INSERT_EN, 1,
+ ESF_GZ_TX_SEND_VLAN_INSERT_TCI, vlan_tci);
+ EFX_OR_OWORD(*txd, *txd, vlan);
+}
+
+static void ef100_make_send_desc(struct efx_nic *efx,
+ const struct sk_buff *skb,
+ struct efx_tx_buffer *buffer, efx_oword_t *txd,
+ unsigned int segment_count)
+{
+ /* TX send descriptor */
+ EFX_POPULATE_OWORD_3(*txd,
+ ESF_GZ_TX_SEND_NUM_SEGS, segment_count,
+ ESF_GZ_TX_SEND_LEN, buffer->len,
+ ESF_GZ_TX_SEND_ADDR, buffer->dma_addr);
+
+ if (likely(efx->net_dev->features & NETIF_F_HW_CSUM))
+ ef100_set_tx_csum_partial(skb, buffer, txd);
+ if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
+ skb && skb_vlan_tag_present(skb))
+ ef100_set_tx_hw_vlan(skb, txd);
+}
+
+static void ef100_make_tso_desc(struct efx_nic *efx,
+ const struct sk_buff *skb,
+ struct efx_tx_buffer *buffer, efx_oword_t *txd,
+ unsigned int segment_count)
+{
+ u32 mangleid = (efx->net_dev->features & NETIF_F_TSO_MANGLEID) ||
+ skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID ?
+ ESE_GZ_TX_DESC_IP4_ID_NO_OP :
+ ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
+ u16 vlan_enable = efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX ?
+ skb_vlan_tag_present(skb) : 0;
+ unsigned int len, ip_offset, tcp_offset, payload_segs;
+ u16 vlan_tci = skb_vlan_tag_get(skb);
+ u32 mss = skb_shinfo(skb)->gso_size;
+
+ len = skb->len - buffer->len;
+ /* We use 1 for the TSO descriptor and 1 for the header */
+ payload_segs = segment_count - 2;
+ ip_offset = skb_network_offset(skb);
+ tcp_offset = skb_transport_offset(skb);
+
+ EFX_POPULATE_OWORD_13(*txd,
+ ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO,
+ ESF_GZ_TX_TSO_MSS, mss,
+ ESF_GZ_TX_TSO_HDR_NUM_SEGS, 1,
+ ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, payload_segs,
+ ESF_GZ_TX_TSO_HDR_LEN_W, buffer->len >> 1,
+ ESF_GZ_TX_TSO_PAYLOAD_LEN, len,
+ ESF_GZ_TX_TSO_CSO_INNER_L4, 1,
+ ESF_GZ_TX_TSO_INNER_L3_OFF_W, ip_offset >> 1,
+ ESF_GZ_TX_TSO_INNER_L4_OFF_W, tcp_offset >> 1,
+ ESF_GZ_TX_TSO_ED_INNER_IP4_ID, mangleid,
+ ESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1,
+ ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable,
+ ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci
+ );
+}
+
+static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ unsigned int segment_count)
+{
+ unsigned int old_write_count = tx_queue->write_count;
+ unsigned int new_write_count = old_write_count;
+ struct efx_tx_buffer *buffer;
+ unsigned int next_desc_type;
+ unsigned int write_ptr;
+ efx_oword_t *txd;
+ unsigned int nr_descs = tx_queue->insert_count - old_write_count;
+
+ if (unlikely(nr_descs == 0))
+ return;
+
+ if (segment_count)
+ next_desc_type = ESE_GZ_TX_DESC_TYPE_TSO;
+ else
+ next_desc_type = ESE_GZ_TX_DESC_TYPE_SEND;
+
+ /* if it's a raw write (such as XDP) then always SEND single frames */
+ if (!skb)
+ nr_descs = 1;
+
+ do {
+ write_ptr = new_write_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[write_ptr];
+ txd = ef100_tx_desc(tx_queue, write_ptr);
+ ++new_write_count;
+
+ /* Create TX descriptor ring entry */
+ tx_queue->packet_write_count = new_write_count;
+
+ switch (next_desc_type) {
+ case ESE_GZ_TX_DESC_TYPE_SEND:
+ ef100_make_send_desc(tx_queue->efx, skb,
+ buffer, txd, nr_descs);
+ break;
+ case ESE_GZ_TX_DESC_TYPE_TSO:
+ /* TX TSO descriptor */
+ WARN_ON_ONCE(!(buffer->flags & EFX_TX_BUF_TSO_V3));
+ ef100_make_tso_desc(tx_queue->efx, skb,
+ buffer, txd, nr_descs);
+ break;
+ default:
+ /* TX segment descriptor */
+ EFX_POPULATE_OWORD_3(*txd,
+ ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEG,
+ ESF_GZ_TX_SEG_LEN, buffer->len,
+ ESF_GZ_TX_SEG_ADDR, buffer->dma_addr);
+ }
+ /* if it's a raw write (such as XDP) then always SEND */
+ next_desc_type = skb ? ESE_GZ_TX_DESC_TYPE_SEG :
+ ESE_GZ_TX_DESC_TYPE_SEND;
+
+ } while (new_write_count != tx_queue->insert_count);
+
+ wmb(); /* Ensure descriptors are written before they are fetched */
+
+ tx_queue->write_count = new_write_count;
+
+ /* The write_count above must be updated before reading
+ * channel->holdoff_doorbell to avoid a race with the
+ * completion path, so ensure these operations are not
+ * re-ordered. This also flushes the update of write_count
+ * back into the cache.
+ */
+ smp_mb();
}
void ef100_tx_write(struct efx_tx_queue *tx_queue)
{
+ ef100_tx_make_descriptors(tx_queue, NULL, 0);
+ ef100_tx_push_buffers(tx_queue);
+}
+
+void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
+{
+ unsigned int tx_done =
+ EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_NUM_DESC);
+ unsigned int qlabel =
+ EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_Q_LABEL);
+ struct efx_tx_queue *tx_queue =
+ efx_channel_get_tx_queue(channel, qlabel);
+ unsigned int tx_index = (tx_queue->read_count + tx_done - 1) &
+ tx_queue->ptr_mask;
+
+ efx_xmit_done(tx_queue, tx_index);
}
/* Add a socket buffer to a TX queue
@@ -42,10 +328,81 @@ void ef100_tx_write(struct efx_tx_queue *tx_queue)
*/
int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
- /* Stub. No TX path yet. */
+ unsigned int old_insert_count = tx_queue->insert_count;
struct efx_nic *efx = tx_queue->efx;
+ bool xmit_more = netdev_xmit_more();
+ unsigned int fill_level;
+ unsigned int segments;
+ int rc;
+
+ if (!tx_queue->buffer || !tx_queue->ptr_mask) {
+ netif_stop_queue(efx->net_dev);
+ dev_kfree_skb_any(skb);
+ return -ENODEV;
+ }
+
+ segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
+ if (segments == 1)
+ segments = 0; /* Don't use TSO/GSO for a single segment. */
+ if (segments && !ef100_tx_can_tso(tx_queue, skb)) {
+ rc = efx_tx_tso_fallback(tx_queue, skb);
+ tx_queue->tso_fallbacks++;
+ if (rc)
+ goto err;
+ else
+ return 0;
+ }
+
+ /* Map for DMA and create descriptors */
+ rc = efx_tx_map_data(tx_queue, skb, segments);
+ if (rc)
+ goto err;
+ ef100_tx_make_descriptors(tx_queue, skb, segments);
+
+ fill_level = efx_channel_tx_fill_level(tx_queue->channel);
+ if (fill_level > efx->txq_stop_thresh) {
+ netif_tx_stop_queue(tx_queue->core_txq);
+ /* Re-read after a memory barrier in case we've raced with
+ * the completion path. Otherwise there's a danger we'll never
+ * restart the queue if all completions have just happened.
+ */
+ smp_mb();
+ fill_level = efx_channel_tx_fill_level(tx_queue->channel);
+ if (fill_level < efx->txq_stop_thresh)
+ netif_tx_start_queue(tx_queue->core_txq);
+ }
+
+ if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more))
+ tx_queue->xmit_more_available = false; /* push doorbell */
+ else if (tx_queue->write_count - tx_queue->notify_count > 255)
+ /* Ensure we never push more than 256 packets at once */
+ tx_queue->xmit_more_available = false; /* push */
+ else
+ tx_queue->xmit_more_available = true; /* don't push yet */
+
+ if (!tx_queue->xmit_more_available)
+ ef100_tx_push_buffers(tx_queue);
+
+ if (segments) {
+ tx_queue->tso_bursts++;
+ tx_queue->tso_packets += segments;
+ tx_queue->tx_packets += segments;
+ } else {
+ tx_queue->tx_packets++;
+ }
+ return 0;
+
+err:
+ efx_enqueue_unwind(tx_queue, old_insert_count);
+ if (!IS_ERR_OR_NULL(skb))
+ dev_kfree_skb_any(skb);
- netif_stop_queue(efx->net_dev);
- dev_kfree_skb_any(skb);
- return -ENODEV;
+ /* If we're not expecting another transmit and we had something to push
+ * on this queue then we need to push here to get the previous packets
+ * out. We only enter this branch from before the 'Update BQL' section
+ * above, so xmit_more_available still refers to the old state.
+ */
+ if (tx_queue->xmit_more_available && !xmit_more)
+ ef100_tx_push_buffers(tx_queue);
+ return rc;
}
diff --git a/drivers/net/ethernet/sfc/ef100_tx.h b/drivers/net/ethernet/sfc/ef100_tx.h
index 9a472f7aff43..fa23e430bdd7 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.h
+++ b/drivers/net/ethernet/sfc/ef100_tx.h
@@ -17,6 +17,10 @@
int ef100_tx_probe(struct efx_tx_queue *tx_queue);
void ef100_tx_init(struct efx_tx_queue *tx_queue);
void ef100_tx_write(struct efx_tx_queue *tx_queue);
+void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue);
+unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx);
+
+void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event);
netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
#endif
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 9791fac0b649..7bb7ecb480ae 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -173,6 +173,7 @@ struct efx_tx_buffer {
#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
#define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */
+#define EFX_TX_BUF_TSO_V3 0x40 /* empty buffer for a TSO_V3 descriptor */
/**
* struct efx_tx_queue - An Efx TX queue
@@ -245,6 +246,7 @@ struct efx_tx_buffer {
* @pio_packets: Number of times the TX PIO feature has been used
* @xmit_more_available: Are any packets waiting to be pushed to the NIC
* @cb_packets: Number of times the TX copybreak feature has been used
+ * @notify_count: Count of notified descriptors to the NIC
* @empty_read_count: If the completion path has seen the queue as empty
* and the transmission path has not yet checked this, the value of
* @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -292,6 +294,7 @@ struct efx_tx_queue {
unsigned int pio_packets;
bool xmit_more_available;
unsigned int cb_packets;
+ unsigned int notify_count;
/* Statistics to supplement MAC stats */
unsigned long tx_packets;
@@ -1669,6 +1672,24 @@ static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
}
+/* Get the max fill level of the TX queues on this channel */
+static inline unsigned int
+efx_channel_tx_fill_level(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ unsigned int fill_level = 0;
+
+ /* This function is currently only used by EF100, which maybe
+ * could do something simpler and just compute the fill level
+ * of the single TXQ that's really in use.
+ */
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ fill_level = max(fill_level,
+ tx_queue->insert_count - tx_queue->read_count);
+
+ return fill_level;
+}
+
/* Get all supported features.
* If a feature is not fixed, it is present in hw_features.
* If a feature is fixed, it does not present in hw_features, but
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index 11b64c609550..793e234819a8 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -71,6 +71,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
"initialising TX queue %d\n", tx_queue->queue);
tx_queue->insert_count = 0;
+ tx_queue->notify_count = 0;
tx_queue->write_count = 0;
tx_queue->packet_write_count = 0;
tx_queue->old_write_count = 0;
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 6646eba9f57f..6eef0f45b133 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -951,7 +951,7 @@ out_stop:
dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr,
ip->rxr_dma);
if (ip->tx_ring)
- dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring,
+ dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring,
ip->txr_dma);
out_free:
free_netdev(dev);
@@ -964,7 +964,7 @@ static int ioc3eth_remove(struct platform_device *pdev)
struct ioc3_private *ip = netdev_priv(dev);
dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma);
- dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring, ip->txr_dma);
+ dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma);
unregister_netdev(dev);
del_timer_sync(&ip->ioc3_timer);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 05d63963fdb7..ac5e8cc5fb9f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -625,7 +625,7 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
int ret = phylink_ethtool_set_wol(priv->phylink, wol);
if (!ret)
- device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
+ device_set_wakeup_enable(priv->device, !!wol->wolopts);
return ret;
}
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 3902b3aeb0c2..07389702a540 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -283,8 +283,8 @@ spider_net_free_chain(struct spider_net_card *card,
descr = descr->next;
} while (descr != chain->ring);
- dma_free_coherent(&card->pdev->dev, chain->num_desc,
- chain->hwring, chain->dma_addr);
+ dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr),
+ chain->hwring, chain->dma_addr);
}
/**
@@ -314,8 +314,6 @@ spider_net_init_chain(struct spider_net_card *card,
if (!chain->hwring)
return -ENOMEM;
- memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr));
-
/* Set up the hardware pointers in each descriptor */
descr = chain->ring;
hwdescr = chain->hwring;