summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c71
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c21
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c88
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c137
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h122
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c53
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c47
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c36
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c26
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h16
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c30
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c49
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c14
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c76
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h7
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c24
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c23
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c31
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_rdma.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c34
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c5
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c4
-rw-r--r--drivers/net/ethernet/socionext/netsec.c5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h2
69 files changed, 752 insertions, 646 deletions
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index b9b4edb913c1..9b7f1af5f574 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1249,8 +1249,12 @@ out_disable_adv_intr:
static void __alx_stop(struct alx_priv *alx)
{
- alx_halt(alx);
alx_free_irq(alx);
+
+ cancel_work_sync(&alx->link_check_wk);
+ cancel_work_sync(&alx->reset_wk);
+
+ alx_halt(alx);
alx_free_rings(alx);
alx_free_napis(alx);
}
@@ -1855,9 +1859,6 @@ static void alx_remove(struct pci_dev *pdev)
struct alx_priv *alx = pci_get_drvdata(pdev);
struct alx_hw *hw = &alx->hw;
- cancel_work_sync(&alx->link_check_wk);
- cancel_work_sync(&alx->reset_wk);
-
/* restore permanent mac address */
alx_set_macaddr(hw, hw->perm_addr);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c62589c266b2..6a884df44612 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6292,6 +6292,7 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
{
+ struct hwrm_stat_ctx_clr_stats_input req0 = {0};
struct hwrm_stat_ctx_free_input req = {0};
int i;
@@ -6301,6 +6302,7 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
return;
+ bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
@@ -6310,7 +6312,11 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
-
+ if (BNXT_FW_MAJ(bp) <= 20) {
+ req0.stat_ctx_id = req.stat_ctx_id;
+ _hwrm_send_message(bp, &req0, sizeof(req0),
+ HWRM_CMD_TIMEOUT);
+ }
_hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
@@ -6976,7 +6982,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
bp->tx_push_thresh = 0;
- if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
+ if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
+ BNXT_FW_MAJ(bp) > 217)
bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
@@ -7240,8 +7247,9 @@ static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
static int bnxt_hwrm_ver_get(struct bnxt *bp)
{
struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 fw_maj, fw_min, fw_bld, fw_rsv;
u32 dev_caps_cfg, hwrm_ver;
- int rc;
+ int rc, len;
bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
mutex_lock(&bp->hwrm_cmd_lock);
@@ -7273,9 +7281,22 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
resp->hwrm_intf_upd_8b);
- snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
- resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
- resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
+ fw_maj = le16_to_cpu(resp->hwrm_fw_major);
+ if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
+ fw_min = le16_to_cpu(resp->hwrm_fw_minor);
+ fw_bld = le16_to_cpu(resp->hwrm_fw_build);
+ fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
+ len = FW_VER_STR_LEN;
+ } else {
+ fw_maj = resp->hwrm_fw_maj_8b;
+ fw_min = resp->hwrm_fw_min_8b;
+ fw_bld = resp->hwrm_fw_bld_8b;
+ fw_rsv = resp->hwrm_fw_rsvd_8b;
+ len = BC_HWRM_STR_LEN;
+ }
+ bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
+ snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
+ fw_rsv);
if (strlen(resp->active_pkg_name)) {
int fw_ver_len = strlen(bp->fw_ver_str);
@@ -10037,7 +10058,7 @@ static void bnxt_timer(struct timer_list *t)
struct bnxt *bp = from_timer(bp, t, timer);
struct net_device *dev = bp->dev;
- if (!netif_running(dev))
+ if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
return;
if (atomic_read(&bp->intr_sem) != 0)
@@ -11892,7 +11913,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &bnxt_ethtool_ops;
pci_set_drvdata(pdev, dev);
- bnxt_vpd_read_info(bp);
+ if (BNXT_PF(bp))
+ bnxt_vpd_read_info(bp);
rc = bnxt_alloc_hwrm_resources(bp);
if (rc)
@@ -12133,19 +12155,9 @@ static int bnxt_resume(struct device *device)
goto resume_exit;
}
- if (bnxt_hwrm_queue_qportcfg(bp)) {
- rc = -ENODEV;
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc)
goto resume_exit;
- }
-
- if (bp->hwrm_spec_code >= 0x10803) {
- if (bnxt_alloc_ctx_mem(bp)) {
- rc = -ENODEV;
- goto resume_exit;
- }
- }
- if (BNXT_NEW_RM(bp))
- bnxt_hwrm_func_resc_qcaps(bp, false);
if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
rc = -ENODEV;
@@ -12161,6 +12173,8 @@ static int bnxt_resume(struct device *device)
resume_exit:
bnxt_ulp_start(bp, rc);
+ if (!rc)
+ bnxt_reenable_sriov(bp);
rtnl_unlock();
return rc;
}
@@ -12204,6 +12218,9 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
bnxt_close(netdev);
pci_disable_device(pdev);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
rtnl_unlock();
/* Request a slot slot reset. */
@@ -12237,12 +12254,16 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
err = bnxt_hwrm_func_reset(bp);
- if (!err && netif_running(netdev))
- err = bnxt_open(netdev);
-
- if (!err)
- result = PCI_ERS_RESULT_RECOVERED;
+ if (!err) {
+ err = bnxt_hwrm_func_qcaps(bp);
+ if (!err && netif_running(netdev))
+ err = bnxt_open(netdev);
+ }
bnxt_ulp_start(bp, err);
+ if (!err) {
+ bnxt_reenable_sriov(bp);
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
}
if (result != PCI_ERS_RESULT_RECOVERED) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 9e173d74b72a..78e2fd63ac3d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1746,6 +1746,11 @@ struct bnxt {
#define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
char fw_ver_str[FW_VER_STR_LEN];
char hwrm_ver_supp[FW_VER_STR_LEN];
+ u64 fw_ver_code;
+#define BNXT_FW_VER_CODE(maj, min, bld, rsv) \
+ ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
+#define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48)
+
__be16 vxlan_port;
u8 vxlan_port_cnt;
__le16 vxlan_fw_dst_port_id;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 0eef4f5e4a46..4a11c1e7cc02 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1889,7 +1889,8 @@ static void bnxt_tc_setup_indr_rel(void *cb_priv)
}
static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
- struct flow_block_offload *f)
+ struct flow_block_offload *f, void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
struct bnxt_flower_indr_block_cb_priv *cb_priv;
struct flow_block_cb *block_cb;
@@ -1907,9 +1908,10 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
cb_priv->bp = bp;
list_add(&cb_priv->list, &bp->tc_indr_block_list);
- block_cb = flow_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
- cb_priv, cb_priv,
- bnxt_tc_setup_indr_rel);
+ block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
+ cb_priv, cb_priv,
+ bnxt_tc_setup_indr_rel, f,
+ netdev, data, bp, cleanup);
if (IS_ERR(block_cb)) {
list_del(&cb_priv->list);
kfree(cb_priv);
@@ -1930,7 +1932,7 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
if (!block_cb)
return -ENOENT;
- flow_block_cb_remove(block_cb, f);
+ flow_indr_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
break;
default:
@@ -1945,14 +1947,17 @@ static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
}
static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
- enum tc_setup_type type, void *type_data)
+ enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!bnxt_is_netdev_indr_offload(netdev))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_BLOCK:
- return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data);
+ return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data, data,
+ cleanup);
default:
break;
}
@@ -2074,7 +2079,7 @@ void bnxt_shutdown_tc(struct bnxt *bp)
return;
flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp,
- bnxt_tc_setup_indr_block_cb);
+ bnxt_tc_setup_indr_rel);
rhashtable_destroy(&tc_info->flow_table);
rhashtable_destroy(&tc_info->l2_table);
rhashtable_destroy(&tc_info->decap_l2_table);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index ff31da0ed846..af924a8b885f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -459,17 +459,6 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]);
}
-static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
- u32 f_index)
-{
- u32 offset;
- u32 reg;
-
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- return !!(reg & (1 << (f_index % 32)));
-}
-
static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 offset;
@@ -533,19 +522,6 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
bcmgenet_hfb_reg_writel(priv, reg, offset);
}
-static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
-{
- u32 f_index;
-
- /* First MAX_NUM_OF_FS_RULES are reserved for Rx NFC filters */
- for (f_index = MAX_NUM_OF_FS_RULES;
- f_index < priv->hw_params->hfb_filter_cnt; f_index++)
- if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
- return f_index;
-
- return -ENOMEM;
-}
-
static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
{
while (size) {
@@ -634,8 +610,9 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
{
struct ethtool_rx_flow_spec *fs = &rule->fs;
int err = 0, offset = 0, f_length = 0;
- u16 val_16, mask_16;
u8 val_8, mask_8;
+ __be16 val_16;
+ u16 mask_16;
size_t size;
u32 *f_data;
@@ -744,59 +721,6 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
return err;
}
-/* bcmgenet_hfb_add_filter
- *
- * Add new filter to Hardware Filter Block to match and direct Rx traffic to
- * desired Rx queue.
- *
- * f_data is an array of unsigned 32-bit integers where each 32-bit integer
- * provides filter data for 2 bytes (4 nibbles) of Rx frame:
- *
- * bits 31:20 - unused
- * bit 19 - nibble 0 match enable
- * bit 18 - nibble 1 match enable
- * bit 17 - nibble 2 match enable
- * bit 16 - nibble 3 match enable
- * bits 15:12 - nibble 0 data
- * bits 11:8 - nibble 1 data
- * bits 7:4 - nibble 2 data
- * bits 3:0 - nibble 3 data
- *
- * Example:
- * In order to match:
- * - Ethernet frame type = 0x0800 (IP)
- * - IP version field = 4
- * - IP protocol field = 0x11 (UDP)
- *
- * The following filter is needed:
- * u32 hfb_filter_ipv4_udp[] = {
- * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
- * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
- * };
- *
- * To add the filter to HFB and direct the traffic to Rx queue 0, call:
- * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
- * ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
- */
-int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
- u32 f_length, u32 rx_queue)
-{
- int f_index;
-
- f_index = bcmgenet_hfb_find_unused_filter(priv);
- if (f_index < 0)
- return -ENOMEM;
-
- if (f_length > priv->hw_params->hfb_filter_size)
- return -EINVAL;
-
- bcmgenet_hfb_set_filter(priv, f_data, f_length, rx_queue, f_index);
- bcmgenet_hfb_enable_filter(priv, f_index);
-
- return 0;
-}
-
/* bcmgenet_hfb_clear
*
* Clear Hardware Filter Block and disable all filtering.
@@ -2118,11 +2042,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
- if (skb_padto(skb, ETH_ZLEN)) {
- ret = NETDEV_TX_OK;
- goto out;
- }
-
/* Retain how many bytes will be sent on the wire, without TSB inserted
* by transmit checksum offload
*/
@@ -2169,6 +2088,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
len_stat = (size << DMA_BUFLENGTH_SHIFT) |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
+ /* Note: if we ever change from DMA_TX_APPEND_CRC below we
+ * will need to restore software padding of "runt" packets
+ */
if (!i) {
len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
if (skb->ip_summed == CHECKSUM_PARTIAL)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 7a3b22b35238..ebff1fc0d8ce 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -18168,8 +18168,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
- /* We probably don't have netdev yet */
- if (!netdev || !netif_running(netdev))
+ /* Could be second call or maybe we don't have netdev yet */
+ if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
goto done;
/* We needn't recover from permanent error */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 5b9d7c60eebc..52582e8ed90e 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2558,22 +2558,23 @@ static int macb_open(struct net_device *dev)
err = macb_phylink_connect(bp);
if (err)
- goto napi_exit;
+ goto reset_hw;
netif_tx_start_all_queues(dev);
if (bp->ptp_info)
bp->ptp_info->ptp_init(dev);
-napi_exit:
+ return 0;
+
+reset_hw:
+ macb_reset_hw(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
napi_disable(&queue->napi);
+ macb_free_consistent(bp);
pm_exit:
- if (err) {
- pm_runtime_put_sync(&bp->pdev->dev);
- return err;
- }
- return 0;
+ pm_runtime_put_sync(&bp->pdev->dev);
+ return err;
}
static int macb_close(struct net_device *dev)
@@ -3761,15 +3762,9 @@ static int macb_init(struct platform_device *pdev)
static struct sifive_fu540_macb_mgmt *mgmt;
-/* Initialize and start the Receiver and Transmit subsystems */
-static int at91ether_start(struct net_device *dev)
+static int at91ether_alloc_coherent(struct macb *lp)
{
- struct macb *lp = netdev_priv(dev);
struct macb_queue *q = &lp->queues[0];
- struct macb_dma_desc *desc;
- dma_addr_t addr;
- u32 ctl;
- int i;
q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
(AT91ETHER_MAX_RX_DESCR *
@@ -3791,6 +3786,43 @@ static int at91ether_start(struct net_device *dev)
return -ENOMEM;
}
+ return 0;
+}
+
+static void at91ether_free_coherent(struct macb *lp)
+{
+ struct macb_queue *q = &lp->queues[0];
+
+ if (q->rx_ring) {
+ dma_free_coherent(&lp->pdev->dev,
+ AT91ETHER_MAX_RX_DESCR *
+ macb_dma_desc_get_size(lp),
+ q->rx_ring, q->rx_ring_dma);
+ q->rx_ring = NULL;
+ }
+
+ if (q->rx_buffers) {
+ dma_free_coherent(&lp->pdev->dev,
+ AT91ETHER_MAX_RX_DESCR *
+ AT91ETHER_MAX_RBUFF_SZ,
+ q->rx_buffers, q->rx_buffers_dma);
+ q->rx_buffers = NULL;
+ }
+}
+
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct macb *lp)
+{
+ struct macb_queue *q = &lp->queues[0];
+ struct macb_dma_desc *desc;
+ dma_addr_t addr;
+ u32 ctl;
+ int i, ret;
+
+ ret = at91ether_alloc_coherent(lp);
+ if (ret)
+ return ret;
+
addr = q->rx_buffers_dma;
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
desc = macb_rx_desc(q, i);
@@ -3812,9 +3844,39 @@ static int at91ether_start(struct net_device *dev)
ctl = macb_readl(lp, NCR);
macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
+ /* Enable MAC interrupts */
+ macb_writel(lp, IER, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
return 0;
}
+static void at91ether_stop(struct macb *lp)
+{
+ u32 ctl;
+
+ /* Disable MAC interrupts */
+ macb_writel(lp, IDR, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
+ /* Disable Receiver and Transmitter */
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+
+ /* Free resources. */
+ at91ether_free_coherent(lp);
+}
+
/* Open the ethernet interface */
static int at91ether_open(struct net_device *dev)
{
@@ -3834,63 +3896,36 @@ static int at91ether_open(struct net_device *dev)
macb_set_hwaddr(lp);
- ret = at91ether_start(dev);
+ ret = at91ether_start(lp);
if (ret)
- return ret;
-
- /* Enable MAC interrupts */
- macb_writel(lp, IER, MACB_BIT(RCOMP) |
- MACB_BIT(RXUBR) |
- MACB_BIT(ISR_TUND) |
- MACB_BIT(ISR_RLE) |
- MACB_BIT(TCOMP) |
- MACB_BIT(ISR_ROVR) |
- MACB_BIT(HRESP));
+ goto pm_exit;
ret = macb_phylink_connect(lp);
if (ret)
- return ret;
+ goto stop;
netif_start_queue(dev);
return 0;
+
+stop:
+ at91ether_stop(lp);
+pm_exit:
+ pm_runtime_put_sync(&lp->pdev->dev);
+ return ret;
}
/* Close the interface */
static int at91ether_close(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
- struct macb_queue *q = &lp->queues[0];
- u32 ctl;
-
- /* Disable Receiver and Transmitter */
- ctl = macb_readl(lp, NCR);
- macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
-
- /* Disable MAC interrupts */
- macb_writel(lp, IDR, MACB_BIT(RCOMP) |
- MACB_BIT(RXUBR) |
- MACB_BIT(ISR_TUND) |
- MACB_BIT(ISR_RLE) |
- MACB_BIT(TCOMP) |
- MACB_BIT(ISR_ROVR) |
- MACB_BIT(HRESP));
netif_stop_queue(dev);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
- dma_free_coherent(&lp->pdev->dev,
- AT91ETHER_MAX_RX_DESCR *
- macb_dma_desc_get_size(lp),
- q->rx_ring, q->rx_ring_dma);
- q->rx_ring = NULL;
-
- dma_free_coherent(&lp->pdev->dev,
- AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
- q->rx_buffers, q->rx_buffers_dma);
- q->rx_buffers = NULL;
+ at91ether_stop(lp);
return pm_runtime_put(&lp->pdev->dev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 7b9cd69f9844..d8ab8e366818 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -1975,7 +1975,6 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
u8 mem_type[CTXT_INGRESS + 1] = { 0 };
struct cudbg_buffer temp_buff = { 0 };
struct cudbg_ch_cntxt *buff;
- u64 *dst_off, *src_off;
u8 *ctx_buf;
u8 i, k;
int rc;
@@ -2044,8 +2043,11 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
}
for (j = 0; j < max_ctx_qid; j++) {
+ __be64 *dst_off;
+ u64 *src_off;
+
src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
- dst_off = (u64 *)buff->data;
+ dst_off = (__be64 *)buff->data;
/* The data is stored in 64-bit cpu order. Convert it
* to big endian before parsing.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
index d3c654b9989b..80c6627fe981 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
@@ -136,6 +136,9 @@ static inline __u8 bitswap_1(unsigned char val)
((val & 0x02) << 5) |
((val & 0x01) << 7);
}
+
+extern const char * const dcb_ver_array[];
+
#define CXGB4_DCB_ENABLED true
#else /* !CONFIG_CHELSIO_T4_DCB */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 828499256004..b477b8842905 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2379,7 +2379,6 @@ static const struct file_operations rss_vf_config_debugfs_fops = {
};
#ifdef CONFIG_CHELSIO_T4_DCB
-extern char *dcb_ver_array[];
/* Data Center Briging information for each port.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 9fd496732b2c..f27be1132d37 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -588,7 +588,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
/**
* lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware
* capabilities
- * @et_lmm: ethtool Link Mode Mask
+ * @link_mode_mask: ethtool Link Mode Mask
*
* Translate ethtool Link Mode Mask into a Firmware Port capabilities
* value.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 796555255207..7a7f61a8cdf4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -165,6 +165,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
unsigned int tid, bool dip, bool sip, bool dp,
bool sp)
{
+ u8 *nat_lp = (u8 *)&f->fs.nat_lport;
+ u8 *nat_fp = (u8 *)&f->fs.nat_fport;
+
if (dip) {
if (f->fs.type) {
set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W,
@@ -236,8 +239,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
}
set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
- (dp ? f->fs.nat_lport : 0) |
- (sp ? f->fs.nat_fport << 16 : 0), 1);
+ (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
+ (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
+ 1);
}
/* Validate filter spec against configuration done on the card. */
@@ -909,6 +913,9 @@ int set_filter_wr(struct adapter *adapter, int fidx)
fwr->fpm = htons(f->fs.mask.fport);
if (adapter->params.filter2_wr_support) {
+ u8 *nat_lp = (u8 *)&f->fs.nat_lport;
+ u8 *nat_fp = (u8 *)&f->fs.nat_fport;
+
fwr->natmode_to_ulp_type =
FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
ULP_MODE_TCPDDP :
@@ -916,8 +923,8 @@ int set_filter_wr(struct adapter *adapter, int fidx)
FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
- fwr->newlport = htons(f->fs.nat_lport);
- fwr->newfport = htons(f->fs.nat_fport);
+ fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8);
+ fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8);
}
/* Mark the filter as "pending" and ship off the Filter Work Request.
@@ -1105,16 +1112,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family)
struct in_addr *addr;
addr = (struct in_addr *)ipmask;
- if (addr->s_addr == 0xffffffff)
+ if (ntohl(addr->s_addr) == 0xffffffff)
return true;
} else if (family == AF_INET6) {
struct in6_addr *addr6;
addr6 = (struct in6_addr *)ipmask;
- if (addr6->s6_addr32[0] == 0xffffffff &&
- addr6->s6_addr32[1] == 0xffffffff &&
- addr6->s6_addr32[2] == 0xffffffff &&
- addr6->s6_addr32[3] == 0xffffffff)
+ if (ntohl(addr6->s6_addr32[0]) == 0xffffffff &&
+ ntohl(addr6->s6_addr32[1]) == 0xffffffff &&
+ ntohl(addr6->s6_addr32[2]) == 0xffffffff &&
+ ntohl(addr6->s6_addr32[3]) == 0xffffffff)
return true;
}
return false;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 854b1717a70d..0329a6b52087 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -449,7 +449,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
* or -1
* @addr: the new MAC address value
* @persist: whether a new MAC allocation should be persistent
- * @add_smt: if true also add the address to the HW SMT
+ * @smt_idx: the destination to store the new SMT index.
*
* Modifies an MPS filter and sets it to the new MAC address if
* @tcam_idx >= 0, or adds the MAC address to a new filter if
@@ -1615,6 +1615,7 @@ static int tid_init(struct tid_info *t)
* @stid: the server TID
* @sip: local IP address to bind server to
* @sport: the server's TCP port
+ * @vlan: the VLAN header information
* @queue: queue to direct messages from this server to
*
* Create an IP server for the given port and address.
@@ -2609,7 +2610,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
/* Clear out filter specifications */
memset(&f->fs, 0, sizeof(struct ch_filter_specification));
- f->fs.val.lport = cpu_to_be16(sport);
+ f->fs.val.lport = be16_to_cpu(sport);
f->fs.mask.lport = ~0;
val = (u8 *)&sip;
if ((val[0] | val[1] | val[2] | val[3]) != 0) {
@@ -5377,10 +5378,10 @@ static inline bool is_x_10g_port(const struct link_config *lc)
static int cfg_queues(struct adapter *adap)
{
u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
- u32 i, n10g = 0, qidx = 0, n1g = 0;
u32 ncpus = num_online_cpus();
u32 niqflint, neq, num_ulds;
struct sge *s = &adap->sge;
+ u32 i, n10g = 0, qidx = 0;
u32 q10g = 0, q1g;
/* Reduce memory usage in kdump environment, disable all offload. */
@@ -5426,7 +5427,6 @@ static int cfg_queues(struct adapter *adap)
if (n10g)
q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
- n1g = adap->params.nports - n10g;
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its
@@ -5444,7 +5444,8 @@ static int cfg_queues(struct adapter *adap)
else
q10g = max(8U, q10g);
- while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g))
+ while ((q10g * n10g) >
+ (avail_eth_qsets - (adap->params.nports - n10g) * q1g))
q10g--;
#else /* !CONFIG_CHELSIO_T4_DCB */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index f5bc996ac77d..70dbee89118e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -194,6 +194,7 @@ int cxgb4_ptp_redirect_rx_packet(struct adapter *adapter, struct port_info *pi)
}
/**
+ * cxgb4_ptp_adjfreq - Adjust frequency of PHC cycle counter
* @ptp: ptp clock structure
* @ppb: Desired frequency change in parts per billion
*
@@ -229,7 +230,7 @@ static int cxgb4_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
/**
* cxgb4_ptp_fineadjtime - Shift the time of the hardware clock
- * @ptp: ptp clock structure
+ * @adapter: board private structure
* @delta: Desired change in nanoseconds
*
* Adjust the timer by resetting the timecounter structure.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 4a5fa9eba0b6..59b65d4db086 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -58,10 +58,6 @@ static struct ch_tc_pedit_fields pedits[] = {
PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
- PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
- PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
- PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
- PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
};
static struct ch_tc_flower_entry *allocate_flower_entry(void)
@@ -156,14 +152,14 @@ static void cxgb4_process_flow_match(struct net_device *dev,
struct flow_match_ports match;
flow_rule_match_ports(rule, &match);
- fs->val.lport = cpu_to_be16(match.key->dst);
- fs->mask.lport = cpu_to_be16(match.mask->dst);
- fs->val.fport = cpu_to_be16(match.key->src);
- fs->mask.fport = cpu_to_be16(match.mask->src);
+ fs->val.lport = be16_to_cpu(match.key->dst);
+ fs->mask.lport = be16_to_cpu(match.mask->dst);
+ fs->val.fport = be16_to_cpu(match.key->src);
+ fs->mask.fport = be16_to_cpu(match.mask->src);
/* also initialize nat_lport/fport to same values */
- fs->nat_lport = cpu_to_be16(match.key->dst);
- fs->nat_fport = cpu_to_be16(match.key->src);
+ fs->nat_lport = fs->val.lport;
+ fs->nat_fport = fs->val.fport;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
@@ -354,12 +350,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
switch (offset) {
case PEDIT_TCP_SPORT_DPORT:
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
- offload_pedit(fs, cpu_to_be32(val) >> 16,
- cpu_to_be32(mask) >> 16,
- TCP_SPORT);
+ fs->nat_fport = val;
else
- offload_pedit(fs, cpu_to_be32(val),
- cpu_to_be32(mask), TCP_DPORT);
+ fs->nat_lport = val >> 16;
}
fs->nat_mode = NAT_MODE_ALL;
break;
@@ -367,12 +360,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
switch (offset) {
case PEDIT_UDP_SPORT_DPORT:
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
- offload_pedit(fs, cpu_to_be32(val) >> 16,
- cpu_to_be32(mask) >> 16,
- UDP_SPORT);
+ fs->nat_fport = val;
else
- offload_pedit(fs, cpu_to_be32(val),
- cpu_to_be32(mask), UDP_DPORT);
+ fs->nat_lport = val >> 16;
}
fs->nat_mode = NAT_MODE_ALL;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index 3f3c11e54d97..dede02505ceb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -48,7 +48,7 @@ static int fill_match_fields(struct adapter *adap,
bool next_header)
{
unsigned int i, j;
- u32 val, mask;
+ __be32 val, mask;
int off, err;
bool found;
@@ -228,7 +228,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
const struct cxgb4_next_header *next;
bool found = false;
unsigned int i, j;
- u32 val, mask;
+ __be32 val, mask;
int off;
if (t->table[link_uhtid - 1].link_handle) {
@@ -242,10 +242,10 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
/* Try to find matches that allow jumps to next header. */
for (i = 0; next[i].jump; i++) {
- if (next[i].offoff != cls->knode.sel->offoff ||
- next[i].shift != cls->knode.sel->offshift ||
- next[i].mask != cls->knode.sel->offmask ||
- next[i].offset != cls->knode.sel->off)
+ if (next[i].sel.offoff != cls->knode.sel->offoff ||
+ next[i].sel.offshift != cls->knode.sel->offshift ||
+ next[i].sel.offmask != cls->knode.sel->offmask ||
+ next[i].sel.off != cls->knode.sel->off)
continue;
/* Found a possible candidate. Find a key that
@@ -257,9 +257,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
val = cls->knode.sel->keys[j].val;
mask = cls->knode.sel->keys[j].mask;
- if (next[i].match_off == off &&
- next[i].match_val == val &&
- next[i].match_mask == mask) {
+ if (next[i].key.off == off &&
+ next[i].key.val == val &&
+ next[i].key.mask == mask) {
found = true;
break;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
index 125868c6770a..f59dd4b2ae6f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
@@ -38,12 +38,12 @@
struct cxgb4_match_field {
int off; /* Offset from the beginning of the header to match */
/* Fill the value/mask pair in the spec if matched */
- int (*val)(struct ch_filter_specification *f, u32 val, u32 mask);
+ int (*val)(struct ch_filter_specification *f, __be32 val, __be32 mask);
};
/* IPv4 match fields */
static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
f->val.tos = (ntohl(val) >> 16) & 0x000000FF;
f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF;
@@ -52,7 +52,7 @@ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
u32 mask_val;
u8 frag_val;
@@ -74,7 +74,7 @@ static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
f->val.proto = (ntohl(val) >> 16) & 0x000000FF;
f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF;
@@ -83,7 +83,7 @@ static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.fip[0], &val, sizeof(u32));
memcpy(&f->mask.fip[0], &mask, sizeof(u32));
@@ -92,7 +92,7 @@ static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.lip[0], &val, sizeof(u32));
memcpy(&f->mask.lip[0], &mask, sizeof(u32));
@@ -111,7 +111,7 @@ static const struct cxgb4_match_field cxgb4_ipv4_fields[] = {
/* IPv6 match fields */
static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
f->val.tos = (ntohl(val) >> 20) & 0x000000FF;
f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF;
@@ -120,7 +120,7 @@ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
f->val.proto = (ntohl(val) >> 8) & 0x000000FF;
f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF;
@@ -129,7 +129,7 @@ static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.fip[0], &val, sizeof(u32));
memcpy(&f->mask.fip[0], &mask, sizeof(u32));
@@ -138,7 +138,7 @@ static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.fip[4], &val, sizeof(u32));
memcpy(&f->mask.fip[4], &mask, sizeof(u32));
@@ -147,7 +147,7 @@ static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.fip[8], &val, sizeof(u32));
memcpy(&f->mask.fip[8], &mask, sizeof(u32));
@@ -156,7 +156,7 @@ static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.fip[12], &val, sizeof(u32));
memcpy(&f->mask.fip[12], &mask, sizeof(u32));
@@ -165,7 +165,7 @@ static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.lip[0], &val, sizeof(u32));
memcpy(&f->mask.lip[0], &mask, sizeof(u32));
@@ -174,7 +174,7 @@ static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.lip[4], &val, sizeof(u32));
memcpy(&f->mask.lip[4], &mask, sizeof(u32));
@@ -183,7 +183,7 @@ static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.lip[8], &val, sizeof(u32));
memcpy(&f->mask.lip[8], &mask, sizeof(u32));
@@ -192,7 +192,7 @@ static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
}
static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
memcpy(&f->val.lip[12], &val, sizeof(u32));
memcpy(&f->mask.lip[12], &mask, sizeof(u32));
@@ -216,7 +216,7 @@ static const struct cxgb4_match_field cxgb4_ipv6_fields[] = {
/* TCP/UDP match */
static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f,
- u32 val, u32 mask)
+ __be32 val, __be32 mask)
{
f->val.fport = ntohl(val) >> 16;
f->mask.fport = ntohl(mask) >> 16;
@@ -237,19 +237,13 @@ static const struct cxgb4_match_field cxgb4_udp_fields[] = {
};
struct cxgb4_next_header {
- unsigned int offset; /* Offset to next header */
- /* offset, shift, and mask added to offset above
+ /* Offset, shift, and mask added to beginning of the header
* to get to next header. Useful when using a header
* field's value to jump to next header such as IHL field
* in IPv4 header.
*/
- unsigned int offoff;
- u32 shift;
- u32 mask;
- /* match criteria to make this jump */
- unsigned int match_off;
- u32 match_val;
- u32 match_mask;
+ struct tc_u32_sel sel;
+ struct tc_u32_key key;
/* location of jump to make */
const struct cxgb4_match_field *jump;
};
@@ -258,26 +252,74 @@ struct cxgb4_next_header {
* IPv4 header.
*/
static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = {
- { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
- .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00,
- .jump = cxgb4_tcp_fields },
- { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
- .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00,
- .jump = cxgb4_udp_fields },
- { .jump = NULL }
+ {
+ /* TCP Jump */
+ .sel = {
+ .off = 0,
+ .offoff = 0,
+ .offshift = 6,
+ .offmask = cpu_to_be16(0x0f00),
+ },
+ .key = {
+ .off = 8,
+ .val = cpu_to_be32(0x00060000),
+ .mask = cpu_to_be32(0x00ff0000),
+ },
+ .jump = cxgb4_tcp_fields,
+ },
+ {
+ /* UDP Jump */
+ .sel = {
+ .off = 0,
+ .offoff = 0,
+ .offshift = 6,
+ .offmask = cpu_to_be16(0x0f00),
+ },
+ .key = {
+ .off = 8,
+ .val = cpu_to_be32(0x00110000),
+ .mask = cpu_to_be32(0x00ff0000),
+ },
+ .jump = cxgb4_udp_fields,
+ },
+ { .jump = NULL },
};
/* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header
* to get to transport layer header.
*/
static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = {
- { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
- .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000,
- .jump = cxgb4_tcp_fields },
- { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
- .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000,
- .jump = cxgb4_udp_fields },
- { .jump = NULL }
+ {
+ /* TCP Jump */
+ .sel = {
+ .off = 40,
+ .offoff = 0,
+ .offshift = 0,
+ .offmask = 0,
+ },
+ .key = {
+ .off = 4,
+ .val = cpu_to_be32(0x00000600),
+ .mask = cpu_to_be32(0x0000ff00),
+ },
+ .jump = cxgb4_tcp_fields,
+ },
+ {
+ /* UDP Jump */
+ .sel = {
+ .off = 40,
+ .offoff = 0,
+ .offshift = 0,
+ .offmask = 0,
+ },
+ .key = {
+ .off = 4,
+ .val = cpu_to_be32(0x00001100),
+ .mask = cpu_to_be32(0x0000ff00),
+ },
+ .jump = cxgb4_udp_fields,
+ },
+ { .jump = NULL },
};
struct cxgb4_link {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 72b37a66c7d8..c4864125fe02 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -503,40 +503,19 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
EXPORT_SYMBOL(cxgb4_select_ntuple);
/*
- * Called when address resolution fails for an L2T entry to handle packets
- * on the arpq head. If a packet specifies a failure handler it is invoked,
- * otherwise the packet is sent to the device.
- */
-static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
-{
- struct sk_buff *skb;
-
- while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
- const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
-
- spin_unlock(&e->lock);
- if (cb->arp_err_handler)
- cb->arp_err_handler(cb->handle, skb);
- else
- t4_ofld_send(adap, skb);
- spin_lock(&e->lock);
- }
-}
-
-/*
* Called when the host's neighbor layer makes a change to some entry that is
* loaded into the HW L2 table.
*/
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
{
- struct l2t_entry *e;
- struct sk_buff_head *arpq = NULL;
- struct l2t_data *d = adap->l2t;
unsigned int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *) neigh->primary_key;
- int ifidx = neigh->dev->ifindex;
- int hash = addr_hash(d, addr, addr_len, ifidx);
+ int hash, ifidx = neigh->dev->ifindex;
+ struct sk_buff_head *arpq = NULL;
+ struct l2t_data *d = adap->l2t;
+ struct l2t_entry *e;
+ hash = addr_hash(d, addr, addr_len, ifidx);
read_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (!addreq(e, addr) && e->ifindex == ifidx) {
@@ -569,8 +548,25 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
write_l2e(adap, e, 0);
}
- if (arpq)
- handle_failed_resolution(adap, e);
+ if (arpq) {
+ struct sk_buff *skb;
+
+ /* Called when address resolution fails for an L2T
+ * entry to handle packets on the arpq head. If a
+ * packet specifies a failure handler it is invoked,
+ * otherwise the packet is sent to the device.
+ */
+ while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
+ const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+ spin_unlock(&e->lock);
+ if (cb->arp_err_handler)
+ cb->arp_err_handler(cb->handle, skb);
+ else
+ t4_ofld_send(adap, skb);
+ spin_lock(&e->lock);
+ }
+ }
spin_unlock_bh(&e->lock);
}
@@ -613,6 +609,7 @@ struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
}
/**
+ * cxgb4_l2t_alloc_switching - Allocates an L2T entry for switch filters
* @dev: net_device pointer
* @vlan: VLAN Id
* @port: Associated port
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index fde93c50cfec..a1b14468d1ff 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -598,7 +598,7 @@ struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
/**
* cxgb4_sched_class_free - free a scheduling class
* @dev: net_device pointer
- * @e: scheduling class
+ * @classid: scheduling class id to free
*
* Frees a scheduling class if there are no users.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 1359158652b7..32a45dc51ed7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -302,7 +302,7 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
/**
* free_tx_desc - reclaims Tx descriptors and their buffers
- * @adapter: the adapter
+ * @adap: the adapter
* @q: the Tx queue to reclaim descriptors from
* @n: the number of descriptors to reclaim
* @unmap: whether the buffers should be unmapped for DMA
@@ -722,6 +722,7 @@ static inline unsigned int flits_to_desc(unsigned int n)
/**
* is_eth_imm - can an Ethernet packet be sent as immediate data?
* @skb: the packet
+ * @chip_ver: chip version
*
* Returns whether an Ethernet packet is small enough to fit as
* immediate data. Return value corresponds to headroom required.
@@ -749,6 +750,7 @@ static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
/**
* calc_tx_flits - calculate the number of flits for a packet Tx WR
* @skb: the packet
+ * @chip_ver: chip version
*
* Returns the number of flits needed for a Tx WR for the given Ethernet
* packet, including the needed WR and CPL headers.
@@ -804,6 +806,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
/**
* calc_tx_descs - calculate the number of Tx descriptors for a packet
* @skb: the packet
+ * @chip_ver: chip version
*
* Returns the number of Tx descriptors needed for the given Ethernet
* packet, including the needed WR and CPL headers.
@@ -1425,12 +1428,10 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
qidx = skb_get_queue_mapping(skb);
if (ptp_enabled) {
- spin_lock(&adap->ptp_lock);
if (!(adap->ptp_tx_skb)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
adap->ptp_tx_skb = skb_get(skb);
} else {
- spin_unlock(&adap->ptp_lock);
goto out_free;
}
q = &adap->sge.ptptxq;
@@ -1444,11 +1445,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef CONFIG_CHELSIO_T4_FCOE
ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
- if (unlikely(ret == -ENOTSUPP)) {
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
+ if (unlikely(ret == -EOPNOTSUPP))
goto out_free;
- }
#endif /* CONFIG_CHELSIO_T4_FCOE */
chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
@@ -1461,8 +1459,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
dev_err(adap->pdev_dev,
"%s: Tx ring %u full while queue awake!\n",
dev->name, qidx);
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
return NETDEV_TX_BUSY;
}
@@ -1481,8 +1477,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
q->mapping_err++;
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
goto out_free;
}
@@ -1533,8 +1527,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (iph->version == 4) {
iph->check = 0;
iph->tot_len = 0;
- iph->check = (u16)(~ip_fast_csum((u8 *)iph,
- iph->ihl));
+ iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl);
}
if (skb->ip_summed == CHECKSUM_PARTIAL)
cntrl = hwcsum(adap->params.chip, skb);
@@ -1630,8 +1623,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
- if (ptp_enabled)
- spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;
out_free:
@@ -2377,6 +2368,16 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(qid >= pi->nqsets))
return cxgb4_ethofld_xmit(skb, dev);
+ if (is_ptp_enabled(skb, dev)) {
+ struct adapter *adap = netdev2adap(dev);
+ netdev_tx_t ret;
+
+ spin_lock(&adap->ptp_lock);
+ ret = cxgb4_eth_xmit(skb, dev);
+ spin_unlock(&adap->ptp_lock);
+ return ret;
+ }
+
return cxgb4_eth_xmit(skb, dev);
}
@@ -2410,9 +2411,9 @@ static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
/**
* cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
- * @dev - netdevice
- * @eotid - ETHOFLD tid to bind/unbind
- * @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
+ * @dev: netdevice
+ * @eotid: ETHOFLD tid to bind/unbind
+ * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
*
* Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
* If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
@@ -2691,7 +2692,6 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
/**
* txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
- * @adap: the adapter
* @q: the queue to stop
*
* Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
@@ -3286,7 +3286,7 @@ enum {
/**
* t4_systim_to_hwstamp - read hardware time stamp
- * @adap: the adapter
+ * @adapter: the adapter
* @skb: the packet
*
* Read Time Stamp from MPS packet and insert in skb which
@@ -3313,15 +3313,16 @@ static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
hwtstamps = skb_hwtstamps(skb);
memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data)));
+ hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data));
return RX_PTP_PKT_SUC;
}
/**
* t4_rx_hststamp - Recv PTP Event Message
- * @adap: the adapter
+ * @adapter: the adapter
* @rsp: the response queue descriptor holding the RX_PKT message
+ * @rxq: the response queue holding the RX_PKT message
* @skb: the packet
*
* PTP enabled and MPS packet, read HW timestamp
@@ -3345,7 +3346,7 @@ static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
/**
* t4_tx_hststamp - Loopback PTP Transmit Event Message
- * @adap: the adapter
+ * @adapter: the adapter
* @skb: the packet
* @dev: the ingress net device
*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c
index 01c65d13fc0e..cbe72ed27b1e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c
@@ -103,6 +103,7 @@ static void t4_smte_free(struct smt_entry *e)
}
/**
+ * cxgb4_smt_release - Release SMT entry
* @e: smt entry to release
*
* Releases ref count and frees up an smt entry from SMT table
@@ -231,6 +232,7 @@ static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
}
/**
+ * cxgb4_smt_alloc_switching - Allocates an SMT entry for switch filters.
* @dev: net_device pointer
* @smac: MAC address to add to SMT
* Returns pointer to the SMT entry created
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 1c8068c02728..1aa6dc10dc0b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3163,7 +3163,7 @@ int t4_get_tp_version(struct adapter *adapter, u32 *vers)
/**
* t4_get_exprom_version - return the Expansion ROM version (if any)
- * @adapter: the adapter
+ * @adap: the adapter
* @vers: where to place the version
*
* Reads the Expansion ROM header from FLASH and returns the version
@@ -5310,7 +5310,7 @@ static unsigned int t4_use_ldst(struct adapter *adap)
* @cmd: TP fw ldst address space type
* @vals: where the indirect register values are stored/written
* @nregs: how many indirect registers to read/write
- * @start_idx: index of first indirect register to read/write
+ * @start_index: index of first indirect register to read/write
* @rw: Read (1) or Write (0)
* @sleep_ok: if true we may sleep while awaiting command completion
*
@@ -6115,7 +6115,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
/**
* compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
- * @adap: the adapter
+ * @adapter: the adapter
* @pidx: the port index
*
* Computes and returns a bitmap indicating which MPS buffer groups are
@@ -6252,7 +6252,7 @@ static unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
/**
* t4_get_tp_ch_map - return TP ingress channels associated with a port
- * @adapter: the adapter
+ * @adap: the adapter
* @pidx: the port index
*
* Returns a bitmap indicating which TP Ingress Channels are associated
@@ -6589,7 +6589,7 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
* @phy_addr: the PHY address
* @mmd: the PHY MMD to access (0 for clause 22 PHYs)
* @reg: the register to write
- * @valp: value to write
+ * @val: value to write
*
* Issues a FW command through the given mailbox to write a PHY register.
*/
@@ -6615,7 +6615,7 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
/**
* t4_sge_decode_idma_state - decode the idma state
- * @adap: the adapter
+ * @adapter: the adapter
* @state: the state idma is stuck in
*/
void t4_sge_decode_idma_state(struct adapter *adapter, int state)
@@ -6782,7 +6782,7 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
* t4_sge_ctxt_flush - flush the SGE context cache
* @adap: the adapter
* @mbox: mailbox to use for the FW command
- * @ctx_type: Egress or Ingress
+ * @ctxt_type: Egress or Ingress
*
* Issues a FW command through the given mailbox to flush the
* SGE context cache.
@@ -6809,7 +6809,7 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
/**
* t4_read_sge_dbqtimers - read SGE Doorbell Queue Timer values
- * @adap - the adapter
+ * @adap: the adapter
* @ndbqtimers: size of the provided SGE Doorbell Queue Timer table
* @dbqtimers: SGE Doorbell Queue Timer table
*
@@ -7092,6 +7092,7 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
/**
* t4_fw_restart - restart the firmware by taking the uP out of RESET
* @adap: the adapter
+ * @mbox: mailbox to use for the FW command
* @reset: if we want to do a RESET to restart things
*
* Restart firmware previously halted by t4_fw_halt(). On successful
@@ -7630,6 +7631,8 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
* @nmac: number of MAC addresses needed (1 to 5)
* @mac: the MAC addresses of the VI
* @rss_size: size of RSS table slice associated with this VI
+ * @vivld: the destination to store the VI Valid value.
+ * @vin: the destination to store the VIN value.
*
* Allocates a virtual interface for the given physical port. If @mac is
* not %NULL it contains the MAC addresses of the VI as assigned by FW.
@@ -7848,7 +7851,7 @@ int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
* t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
* @adap: the adapter
* @viid: the VI id
- * @mac: the MAC address
+ * @addr: the MAC address
* @mask: the mask
* @vni: the VNI id for the tunnel protocol
* @vni_mask: mask for the VNI id
@@ -7897,11 +7900,11 @@ int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
* t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
* @adap: the adapter
* @viid: the VI id
- * @mac: the MAC address
+ * @addr: the MAC address
* @mask: the mask
* @idx: index at which to add this entry
- * @port_id: the port index
* @lookup_type: MAC address for inner (1) or outer (0) header
+ * @port_id: the port index
* @sleep_ok: call is allowed to sleep
*
* Adds the mac entry at the specified index using raw mac interface.
@@ -8126,7 +8129,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
* @idx: index of existing filter for old value of MAC address, or -1
* @addr: the new MAC address value
* @persist: whether a new MAC allocation should be persistent
- * @add_smt: if true also add the address to the HW SMT
+ * @smt_idx: the destination to store the new SMT index.
*
* Modifies an exact-match filter and sets it to the new MAC address.
* Note that in general it is not possible to modify the value of a given
@@ -8448,7 +8451,6 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
/**
* t4_link_down_rc_str - return a string for a Link Down Reason Code
- * @adap: the adapter
* @link_down_rc: Link Down Reason Code
*
* Returns a string representation of the Link Down Reason Code.
@@ -8472,9 +8474,7 @@ static const char *t4_link_down_rc_str(unsigned char link_down_rc)
return reason[link_down_rc];
}
-/**
- * Return the highest speed set in the port capabilities, in Mb/s.
- */
+/* Return the highest speed set in the port capabilities, in Mb/s. */
static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
{
#define TEST_SPEED_RETURN(__caps_speed, __speed) \
@@ -9110,7 +9110,6 @@ found:
/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
- * @reset: if true perform a HW reset
*
* Initialize adapter SW state for the various HW modules, set initial
* values for some adapter tunables, take PHYs out of reset, and
@@ -10395,6 +10394,7 @@ int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
/**
* t4_i2c_rd - read I2C data from adapter
* @adap: the adapter
+ * @mbox: mailbox to use for the FW command
* @port: Port number if per-port device; <0 if not
* @devid: per-port device ID or absolute device ID
* @offset: byte offset into device I2C space
@@ -10450,7 +10450,7 @@ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
/**
* t4_set_vlan_acl - Set a VLAN id for the specified VF
- * @adapter: the adapter
+ * @adap: the adapter
* @mbox: mailbox to use for the FW command
* @vf: one of the VFs instantiated by the specified PF
* @vlan: The vlanid to be set
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index cec865a97464..a7641be9094f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -260,8 +260,7 @@ static int cxgb4vf_set_addr_hash(struct port_info *pi)
* @tcam_idx: TCAM index of existing filter for old value of MAC address,
* or -1
* @addr: the new MAC address value
- * @persist: whether a new MAC allocation should be persistent
- * @add_smt: if true also add the address to the HW SMT
+ * @persistent: whether a new MAC allocation should be persistent
*
* Modifies an MPS filter and sets it to the new MAC address if
* @tcam_idx >= 0, or adds the MAC address to a new filter if
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f71c973398ec..8c3d6e11a4bf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1692,7 +1692,7 @@ static inline bool is_new_response(const struct rsp_ctrl *rc,
* restore_rx_bufs - put back a packet's RX buffers
* @gl: the packet gather list
* @fl: the SGE Free List
- * @nfrags: how many fragments in @si
+ * @frags: how many fragments in @si
*
* Called when we find out that the current packet, @si, can't be
* processed right away for some reason. This is a very rare event and
@@ -2054,7 +2054,7 @@ irq_handler_t t4vf_intr_handler(struct adapter *adapter)
/**
* sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
- * @data: the adapter
+ * @t: Rx timer
*
* Runs periodically from a timer to perform maintenance of SGE RX queues.
*
@@ -2113,7 +2113,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
/**
* sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
- * @data: the adapter
+ * @t: Tx timer
*
* Runs periodically from a timer to perform maintenance of SGE TX queues.
*
@@ -2405,6 +2405,7 @@ err:
* t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
* @adapter: the adapter
* @txq: pointer to the new txq to be filled in
+ * @dev: the network device
* @devq: the network TX queue associated with the new txq
* @iqid: the relative ingress queue ID to which events relating to
* the new txq should be directed
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 9d49ff211cc1..a31b87390b50 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -389,9 +389,7 @@ static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
return cc_fec;
}
-/**
- * Return the highest speed set in the port capabilities, in Mb/s.
- */
+/* Return the highest speed set in the port capabilities, in Mb/s. */
static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
{
#define TEST_SPEED_RETURN(__caps_speed, __speed) \
@@ -1467,6 +1465,7 @@ int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
* @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
* @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
* -1 no change
+ * @sleep_ok: call is allowed to sleep
*
* Sets Rx properties of a virtual interface.
*/
@@ -1906,7 +1905,7 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
/**
* t4vf_handle_get_port_info - process a FW reply message
* @pi: the port info
- * @rpl: start of the FW message
+ * @cmd: start of the FW message
*
* Processes a GET_PORT_INFO FW reply message.
*/
@@ -2137,8 +2136,6 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
return 0;
}
-/**
- */
int t4vf_prep_adapter(struct adapter *adapter)
{
int err;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 298c55786fd9..96831f49925c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1595,6 +1595,24 @@ static int enetc_set_psfp(struct net_device *ndev, int en)
return 0;
}
+static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
+}
+
+static void enetc_enable_txvlan(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
+}
+
int enetc_set_features(struct net_device *ndev,
netdev_features_t features)
{
@@ -1604,6 +1622,14 @@ int enetc_set_features(struct net_device *ndev,
if (changed & NETIF_F_RXHASH)
enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+ enetc_enable_rxvlan(ndev,
+ !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_TX)
+ enetc_enable_txvlan(ndev,
+ !!(features & NETIF_F_HW_VLAN_CTAG_TX));
+
if (changed & NETIF_F_HW_TC)
err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 6314051bc6c1..ce0d321c0639 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -531,22 +531,22 @@ struct enetc_msg_cmd_header {
/* Common H/W utility functions */
-static inline void enetc_enable_rxvlan(struct enetc_hw *hw, int si_idx,
- bool en)
+static inline void enetc_bdr_enable_rxvlan(struct enetc_hw *hw, int idx,
+ bool en)
{
- u32 val = enetc_rxbdr_rd(hw, si_idx, ENETC_RBMR);
+ u32 val = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0);
- enetc_rxbdr_wr(hw, si_idx, ENETC_RBMR, val);
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, val);
}
-static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx,
- bool en)
+static inline void enetc_bdr_enable_txvlan(struct enetc_hw *hw, int idx,
+ bool en)
{
- u32 val = enetc_txbdr_rd(hw, si_idx, ENETC_TBMR);
+ u32 val = enetc_txbdr_rd(hw, idx, ENETC_TBMR);
val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0);
- enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val);
+ enetc_txbdr_wr(hw, idx, ENETC_TBMR, val);
}
static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 824d211ec00f..4fac57dbb3c8 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -649,14 +649,6 @@ static int enetc_pf_set_features(struct net_device *ndev,
netdev_features_t changed = ndev->features ^ features;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- if (changed & NETIF_F_HW_VLAN_CTAG_RX)
- enetc_enable_rxvlan(&priv->si->hw, 0,
- !!(features & NETIF_F_HW_VLAN_CTAG_RX));
-
- if (changed & NETIF_F_HW_VLAN_CTAG_TX)
- enetc_enable_txvlan(&priv->si->hw, 0,
- !!(features & NETIF_F_HW_VLAN_CTAG_TX));
-
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
struct enetc_pf *pf = enetc_si_priv(priv->si);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index c117074c16e3..23f278e46975 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -699,7 +699,7 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
struct net_device *ndev = ring_data->napi.dev;
skb->protocol = eth_type_trans(skb, ndev);
- (void)napi_gro_receive(&ring_data->napi, skb);
+ napi_gro_receive(&ring_data->napi, skb);
}
static int hns_desc_unused(struct hnae_ring *ring)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 96d36ae5049e..c5c732601e35 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1715,7 +1715,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev->min_mtu = IBMVETH_MIN_MTU;
- netdev->max_mtu = ETH_MAX_MTU;
+ netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1b4d04e4474b..0fd7eae25fe9 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -842,12 +842,13 @@ static int ibmvnic_login(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
unsigned long timeout = msecs_to_jiffies(30000);
int retry_count = 0;
+ int retries = 10;
bool retry;
int rc;
do {
retry = false;
- if (retry_count > IBMVNIC_MAX_QUEUES) {
+ if (retry_count > retries) {
netdev_warn(netdev, "Login attempts exceeded\n");
return -1;
}
@@ -862,11 +863,23 @@ static int ibmvnic_login(struct net_device *netdev)
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
- netdev_warn(netdev, "Login timed out\n");
- return -1;
+ netdev_warn(netdev, "Login timed out, retrying...\n");
+ retry = true;
+ adapter->init_done_rc = 0;
+ retry_count++;
+ continue;
}
- if (adapter->init_done_rc == PARTIALSUCCESS) {
+ if (adapter->init_done_rc == ABORTED) {
+ netdev_warn(netdev, "Login aborted, retrying...\n");
+ retry = true;
+ adapter->init_done_rc = 0;
+ retry_count++;
+ /* FW or device may be busy, so
+ * wait a bit before retrying login
+ */
+ msleep(500);
+ } else if (adapter->init_done_rc == PARTIALSUCCESS) {
retry_count++;
release_sub_crqs(adapter, 1);
@@ -1958,13 +1971,18 @@ static int do_reset(struct ibmvnic_adapter *adapter,
release_sub_crqs(adapter, 1);
} else {
rc = ibmvnic_reset_crq(adapter);
- if (!rc)
+ if (rc == H_CLOSED || rc == H_SUCCESS) {
rc = vio_enable_interrupts(adapter->vdev);
+ if (rc)
+ netdev_err(adapter->netdev,
+ "Reset failed to enable interrupts. rc=%d\n",
+ rc);
+ }
}
if (rc) {
netdev_err(adapter->netdev,
- "Couldn't initialize crq. rc=%d\n", rc);
+ "Reset couldn't initialize crq. rc=%d\n", rc);
goto out;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index d9fa4600f745..4b2de08137be 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -151,10 +151,8 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
-#ifdef CONFIG_PM
-static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
-static int e1000_resume(struct pci_dev *pdev);
-#endif
+static int __maybe_unused e1000_suspend(struct device *dev);
+static int __maybe_unused e1000_resume(struct device *dev);
static void e1000_shutdown(struct pci_dev *pdev);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -179,16 +177,16 @@ static const struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
+static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
+
static struct pci_driver e1000_driver = {
.name = e1000_driver_name,
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = e1000_remove,
-#ifdef CONFIG_PM
- /* Power Management Hooks */
- .suspend = e1000_suspend,
- .resume = e1000_resume,
-#endif
+ .driver = {
+ .pm = &e1000_pm_ops,
+ },
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
};
@@ -5060,9 +5058,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status;
u32 wufc = adapter->wol;
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
netif_device_detach(netdev);
@@ -5076,12 +5071,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
e1000_down(adapter);
}
-#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-#endif
-
status = er32(STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -5142,37 +5131,26 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
return 0;
}
-#ifdef CONFIG_PM
-static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused e1000_suspend(struct device *dev)
{
int retval;
+ struct pci_dev *pdev = to_pci_dev(dev);
bool wake;
retval = __e1000_shutdown(pdev, &wake);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
+ device_set_wakeup_enable(dev, wake);
- return 0;
+ return retval;
}
-static int e1000_resume(struct pci_dev *pdev)
+static int __maybe_unused e1000_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_save_state(pdev);
-
if (adapter->need_ioport)
err = pci_enable_device(pdev);
else
@@ -5209,7 +5187,6 @@ static int e1000_resume(struct pci_dev *pdev)
return 0;
}
-#endif
static void e1000_shutdown(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a279f4fa9962..6f6479ca1267 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6349,7 +6349,6 @@ fl_out:
pm_runtime_put_sync(netdev->dev.parent);
}
-#ifdef CONFIG_PM_SLEEP
/* S0ix implementation */
static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
{
@@ -6571,7 +6570,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS;
ew32(CTRL_EXT, mac_data);
}
-#endif /* CONFIG_PM_SLEEP */
static int e1000e_pm_freeze(struct device *dev)
{
@@ -6611,11 +6609,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 ctrl, ctrl_ext, rctl, status;
- /* Runtime suspend should only enable wakeup for link changes */
- u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+ u32 ctrl, ctrl_ext, rctl, status, wufc;
int retval = 0;
+ /* Runtime suspend should only enable wakeup for link changes */
+ if (runtime)
+ wufc = E1000_WUFC_LNKC;
+ else if (device_may_wakeup(&pdev->dev))
+ wufc = adapter->wol;
+ else
+ wufc = 0;
+
status = er32(STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -6672,7 +6676,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
} else if (hw->mac.type >= e1000_pch_lpt) {
- if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
/* ULP does not support wake from unicast, multicast
* or broadcast.
*/
@@ -6869,7 +6873,6 @@ err_irq:
return rc;
}
-#ifdef CONFIG_PM
static int __e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -6935,8 +6938,7 @@ static int __e1000_resume(struct pci_dev *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int e1000e_pm_suspend(struct device *dev)
+static __maybe_unused int e1000e_pm_suspend(struct device *dev)
{
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6960,7 +6962,7 @@ static int e1000e_pm_suspend(struct device *dev)
return rc;
}
-static int e1000e_pm_resume(struct device *dev)
+static __maybe_unused int e1000e_pm_resume(struct device *dev)
{
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6979,9 +6981,8 @@ static int e1000e_pm_resume(struct device *dev)
return e1000e_pm_thaw(dev);
}
-#endif /* CONFIG_PM_SLEEP */
-static int e1000e_pm_runtime_idle(struct device *dev)
+static __maybe_unused int e1000e_pm_runtime_idle(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6997,7 +6998,7 @@ static int e1000e_pm_runtime_idle(struct device *dev)
return -EBUSY;
}
-static int e1000e_pm_runtime_resume(struct device *dev)
+static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7014,7 +7015,7 @@ static int e1000e_pm_runtime_resume(struct device *dev)
return rc;
}
-static int e1000e_pm_runtime_suspend(struct device *dev)
+static __maybe_unused int e1000e_pm_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7039,7 +7040,6 @@ static int e1000e_pm_runtime_suspend(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
static void e1000_shutdown(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index aa8026b1eb81..67806b7b2f49 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2072,6 +2072,9 @@ static int i40e_set_ringparam(struct net_device *netdev,
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err)
goto rx_unwind;
+ err = i40e_alloc_rx_bi(&rx_rings[i]);
+ if (err)
+ goto rx_unwind;
/* now allocate the Rx buffers to make sure the OS
* has enough memory, any failure here means abort
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 5d807c8004f8..56ecd6c3f236 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -439,11 +439,15 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
i40e_get_netdev_stats_struct_tx(ring, stats);
if (i40e_enabled_xdp_vsi(vsi)) {
- ring++;
+ ring = READ_ONCE(vsi->xdp_rings[i]);
+ if (!ring)
+ continue;
i40e_get_netdev_stats_struct_tx(ring, stats);
}
- ring++;
+ ring = READ_ONCE(vsi->rx_rings[i]);
+ if (!ring)
+ continue;
do {
start = u64_stats_fetch_begin_irq(&ring->syncp);
packets = ring->stats.packets;
@@ -787,6 +791,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
p = READ_ONCE(vsi->tx_rings[q]);
+ if (!p)
+ continue;
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
@@ -800,8 +806,11 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
- /* Rx queue is part of the same block as Tx queue */
- p = &p[1];
+ /* locate Rx ring */
+ p = READ_ONCE(vsi->rx_rings[q]);
+ if (!p)
+ continue;
+
do {
start = u64_stats_fetch_begin_irq(&p->syncp);
packets = p->stats.packets;
@@ -10824,10 +10833,10 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
if (vsi->tx_rings && vsi->tx_rings[0]) {
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu);
- vsi->tx_rings[i] = NULL;
- vsi->rx_rings[i] = NULL;
+ WRITE_ONCE(vsi->tx_rings[i], NULL);
+ WRITE_ONCE(vsi->rx_rings[i], NULL);
if (vsi->xdp_rings)
- vsi->xdp_rings[i] = NULL;
+ WRITE_ONCE(vsi->xdp_rings[i], NULL);
}
}
}
@@ -10861,7 +10870,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
ring->itr_setting = pf->tx_itr_default;
- vsi->tx_rings[i] = ring++;
+ WRITE_ONCE(vsi->tx_rings[i], ring++);
if (!i40e_enabled_xdp_vsi(vsi))
goto setup_rx;
@@ -10879,7 +10888,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
set_ring_xdp(ring);
ring->itr_setting = pf->tx_itr_default;
- vsi->xdp_rings[i] = ring++;
+ WRITE_ONCE(vsi->xdp_rings[i], ring++);
setup_rx:
ring->queue_index = i;
@@ -10892,7 +10901,7 @@ setup_rx:
ring->size = 0;
ring->dcb_tc = 0;
ring->itr_setting = pf->rx_itr_default;
- vsi->rx_rings[i] = ring;
+ WRITE_ONCE(vsi->rx_rings[i], ring);
}
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 28b46cc9f5cb..2e3a39cea2c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1194,7 +1194,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
for (i = 0; i < vsi->alloc_txq; i++) {
if (vsi->tx_rings[i]) {
kfree_rcu(vsi->tx_rings[i], rcu);
- vsi->tx_rings[i] = NULL;
+ WRITE_ONCE(vsi->tx_rings[i], NULL);
}
}
}
@@ -1202,7 +1202,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
for (i = 0; i < vsi->alloc_rxq; i++) {
if (vsi->rx_rings[i]) {
kfree_rcu(vsi->rx_rings[i], rcu);
- vsi->rx_rings[i] = NULL;
+ WRITE_ONCE(vsi->rx_rings[i], NULL);
}
}
}
@@ -1235,7 +1235,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->vsi = vsi;
ring->dev = dev;
ring->count = vsi->num_tx_desc;
- vsi->tx_rings[i] = ring;
+ WRITE_ONCE(vsi->tx_rings[i], ring);
}
/* Allocate Rx rings */
@@ -1254,7 +1254,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->netdev = vsi->netdev;
ring->dev = dev;
ring->count = vsi->num_rx_desc;
- vsi->rx_rings[i] = ring;
+ WRITE_ONCE(vsi->rx_rings[i], ring);
}
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 082825e3cb39..4cbd49c87568 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1702,7 +1702,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
xdp_ring->netdev = NULL;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
- vsi->xdp_rings[i] = xdp_ring;
+ WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index fd9f5d41b594..2e35c5706cf1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -921,7 +921,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ring->queue_index = txr_idx;
/* assign ring to adapter */
- adapter->tx_ring[txr_idx] = ring;
+ WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
/* update count and index */
txr_count--;
@@ -948,7 +948,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
set_ring_xdp(ring);
/* assign ring to adapter */
- adapter->xdp_ring[xdp_idx] = ring;
+ WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
/* update count and index */
xdp_count--;
@@ -991,7 +991,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ring->queue_index = rxr_idx;
/* assign ring to adapter */
- adapter->rx_ring[rxr_idx] = ring;
+ WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
/* update count and index */
rxr_count--;
@@ -1020,13 +1020,13 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
ixgbe_for_each_ring(ring, q_vector->tx) {
if (ring_is_xdp(ring))
- adapter->xdp_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
else
- adapter->tx_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
}
ixgbe_for_each_ring(ring, q_vector->rx)
- adapter->rx_ring[ring->queue_index] = NULL;
+ WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
adapter->q_vector[v_idx] = NULL;
napi_hash_del(&q_vector->napi);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index f162b8b8f345..97a423ecf808 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7051,7 +7051,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+ struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
+
+ if (!rx_ring)
+ continue;
non_eop_descs += rx_ring->rx_stats.non_eop_descs;
alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
@@ -7072,15 +7075,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
packets = 0;
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
+
+ if (!tx_ring)
+ continue;
restart_queue += tx_ring->tx_stats.restart_queue;
tx_busy += tx_ring->tx_stats.tx_busy;
bytes += tx_ring->stats.bytes;
packets += tx_ring->stats.packets;
}
for (i = 0; i < adapter->num_xdp_queues; i++) {
- struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
+ struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
+ if (!xdp_ring)
+ continue;
restart_queue += xdp_ring->tx_stats.restart_queue;
tx_busy += xdp_ring->tx_stats.tx_busy;
bytes += xdp_ring->stats.bytes;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 946925bbcb2d..c639e3a29302 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -106,9 +106,11 @@
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
+/* Only exists on Armada XP and Armada 370 */
#define MVNETA_SERDES_CFG 0x24A0
#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
#define MVNETA_QSGMII_SERDES_PROTO 0x0667
+#define MVNETA_HSGMII_SERDES_PROTO 0x1107
#define MVNETA_TYPE_PRIO 0x24bc
#define MVNETA_FORCE_UNI BIT(21)
#define MVNETA_TXQ_CMD_1 0x24e4
@@ -3529,26 +3531,60 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
return 0;
}
-static int mvneta_comphy_init(struct mvneta_port *pp)
+static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
{
int ret;
- if (!pp->comphy)
- return 0;
-
- ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET,
- pp->phy_interface);
+ ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
if (ret)
return ret;
return phy_power_on(pp->comphy);
}
+static int mvneta_config_interface(struct mvneta_port *pp,
+ phy_interface_t interface)
+{
+ int ret = 0;
+
+ if (pp->comphy) {
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_2500BASEX) {
+ ret = mvneta_comphy_init(pp, interface);
+ }
+ } else {
+ switch (interface) {
+ case PHY_INTERFACE_MODE_QSGMII:
+ mvreg_write(pp, MVNETA_SERDES_CFG,
+ MVNETA_QSGMII_SERDES_PROTO);
+ break;
+
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ mvreg_write(pp, MVNETA_SERDES_CFG,
+ MVNETA_SGMII_SERDES_PROTO);
+ break;
+
+ case PHY_INTERFACE_MODE_2500BASEX:
+ mvreg_write(pp, MVNETA_SERDES_CFG,
+ MVNETA_HSGMII_SERDES_PROTO);
+ break;
+ default:
+ break;
+ }
+ }
+
+ pp->phy_interface = interface;
+
+ return ret;
+}
+
static void mvneta_start_dev(struct mvneta_port *pp)
{
int cpu;
- WARN_ON(mvneta_comphy_init(pp));
+ WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
mvneta_max_rx_size_set(pp, pp->pkt_size);
mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -3926,14 +3962,10 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
if (state->speed == SPEED_2500)
new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
- if (pp->comphy && pp->phy_interface != state->interface &&
- (state->interface == PHY_INTERFACE_MODE_SGMII ||
- state->interface == PHY_INTERFACE_MODE_1000BASEX ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
- pp->phy_interface = state->interface;
-
- WARN_ON(phy_power_off(pp->comphy));
- WARN_ON(mvneta_comphy_init(pp));
+ if (pp->phy_interface != state->interface) {
+ if (pp->comphy)
+ WARN_ON(phy_power_off(pp->comphy));
+ WARN_ON(mvneta_config_interface(pp, state->interface));
}
if (new_ctrl0 != gmac_ctrl0)
@@ -4982,12 +5014,10 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
/* MAC Cause register should be cleared */
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
- if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
- mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
- else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
- phy_interface_mode_is_8023z(phy_mode))
- mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
- else if (!phy_interface_mode_is_rgmii(phy_mode))
+ if (phy_mode != PHY_INTERFACE_MODE_QSGMII &&
+ phy_mode != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(phy_mode) &&
+ !phy_interface_mode_is_rgmii(phy_mode))
return -EINVAL;
return 0;
@@ -5176,10 +5206,10 @@ static int mvneta_probe(struct platform_device *pdev)
if (err < 0)
goto err_netdev;
- err = mvneta_port_power_up(pp, phy_mode);
+ err = mvneta_port_power_up(pp, pp->phy_interface);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
- goto err_netdev;
+ return err;
}
/* Armada3700 network controller does not support per-cpu
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 2b5dad2ec650..24f4d8e0da98 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1544,7 +1544,7 @@ static void mvpp2_read_stats(struct mvpp2_port *port)
for (q = 0; q < port->ntxqs; q++)
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
*pstats++ += mvpp2_read_index(port->priv,
- MVPP22_CTRS_TX_CTR(port->id, i),
+ MVPP22_CTRS_TX_CTR(port->id, q),
mvpp2_ethtool_txq_regs[i].offset);
/* Rxqs are numbered from 0 from the user standpoint, but not from the
@@ -1553,7 +1553,7 @@ static void mvpp2_read_stats(struct mvpp2_port *port)
for (q = 0; q < port->nrxqs; q++)
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
*pstats++ += mvpp2_read_index(port->priv,
- port->first_rxq + i,
+ port->first_rxq + q,
mvpp2_ethtool_rxq_regs[i].offset);
}
@@ -5983,8 +5983,8 @@ static int mvpp2_remove(struct platform_device *pdev)
{
struct mvpp2 *priv = platform_get_drvdata(pdev);
struct fwnode_handle *fwnode = pdev->dev.fwnode;
+ int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
struct fwnode_handle *port_fwnode;
- int i = 0;
mvpp2_dbgfs_cleanup(priv);
@@ -5998,7 +5998,10 @@ static int mvpp2_remove(struct platform_device *pdev)
destroy_workqueue(priv->stats_queue);
- for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+ if (priv->percpu_pools)
+ poolnum = mvpp2_get_nrxqs(priv) * 2;
+
+ for (i = 0; i < poolnum; i++) {
struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index f1ace4fec19f..3e765bdcf9e1 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -24,7 +24,6 @@
#include <linux/regmap.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
-#include <linux/workqueue.h>
#define MTK_STAR_DRVNAME "mtk_star_emac"
@@ -262,7 +261,6 @@ struct mtk_star_priv {
spinlock_t lock;
struct rtnl_link_stats64 stats;
- struct work_struct stats_work;
};
static struct device *mtk_star_get_dev(struct mtk_star_priv *priv)
@@ -432,42 +430,6 @@ static void mtk_star_intr_disable(struct mtk_star_priv *priv)
regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
}
-static void mtk_star_intr_enable_tx(struct mtk_star_priv *priv)
-{
- regmap_clear_bits(priv->regs, MTK_STAR_REG_INT_MASK,
- MTK_STAR_BIT_INT_STS_TNTC);
-}
-
-static void mtk_star_intr_enable_rx(struct mtk_star_priv *priv)
-{
- regmap_clear_bits(priv->regs, MTK_STAR_REG_INT_MASK,
- MTK_STAR_BIT_INT_STS_FNRC);
-}
-
-static void mtk_star_intr_enable_stats(struct mtk_star_priv *priv)
-{
- regmap_clear_bits(priv->regs, MTK_STAR_REG_INT_MASK,
- MTK_STAR_REG_INT_STS_MIB_CNT_TH);
-}
-
-static void mtk_star_intr_disable_tx(struct mtk_star_priv *priv)
-{
- regmap_set_bits(priv->regs, MTK_STAR_REG_INT_MASK,
- MTK_STAR_BIT_INT_STS_TNTC);
-}
-
-static void mtk_star_intr_disable_rx(struct mtk_star_priv *priv)
-{
- regmap_set_bits(priv->regs, MTK_STAR_REG_INT_MASK,
- MTK_STAR_BIT_INT_STS_FNRC);
-}
-
-static void mtk_star_intr_disable_stats(struct mtk_star_priv *priv)
-{
- regmap_set_bits(priv->regs, MTK_STAR_REG_INT_MASK,
- MTK_STAR_REG_INT_STS_MIB_CNT_TH);
-}
-
static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
{
unsigned int val;
@@ -663,20 +625,6 @@ static void mtk_star_update_stats(struct mtk_star_priv *priv)
stats->rx_errors += stats->rx_fifo_errors;
}
-/* This runs in process context and parallel TX and RX paths executing in
- * napi context may result in losing some stats data but this should happen
- * seldom enough to be acceptable.
- */
-static void mtk_star_update_stats_work(struct work_struct *work)
-{
- struct mtk_star_priv *priv = container_of(work, struct mtk_star_priv,
- stats_work);
-
- mtk_star_update_stats(priv);
- mtk_star_reset_counters(priv);
- mtk_star_intr_enable_stats(priv);
-}
-
static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev)
{
uintptr_t tail, offset;
@@ -767,42 +715,25 @@ static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
}
-/* All processing for TX and RX happens in the napi poll callback. */
+/* All processing for TX and RX happens in the napi poll callback.
+ *
+ * FIXME: The interrupt handling should be more fine-grained with each
+ * interrupt enabled/disabled independently when needed. Unfortunatly this
+ * turned out to impact the driver's stability and until we have something
+ * working properly, we're disabling all interrupts during TX & RX processing
+ * or when resetting the counter registers.
+ */
static irqreturn_t mtk_star_handle_irq(int irq, void *data)
{
struct mtk_star_priv *priv;
struct net_device *ndev;
- bool need_napi = false;
- unsigned int status;
ndev = data;
priv = netdev_priv(ndev);
if (netif_running(ndev)) {
- status = mtk_star_intr_read(priv);
-
- if (status & MTK_STAR_BIT_INT_STS_TNTC) {
- mtk_star_intr_disable_tx(priv);
- need_napi = true;
- }
-
- if (status & MTK_STAR_BIT_INT_STS_FNRC) {
- mtk_star_intr_disable_rx(priv);
- need_napi = true;
- }
-
- if (need_napi)
- napi_schedule(&priv->napi);
-
- /* One of the counters reached 0x8000000 - update stats and
- * reset all counters.
- */
- if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
- mtk_star_intr_disable_stats(priv);
- schedule_work(&priv->stats_work);
- }
-
- mtk_star_intr_ack_all(priv);
+ mtk_star_intr_disable(priv);
+ napi_schedule(&priv->napi);
}
return IRQ_HANDLED;
@@ -1169,8 +1100,6 @@ static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
if (wake && netif_queue_stopped(ndev))
netif_wake_queue(ndev);
- mtk_star_intr_enable_tx(priv);
-
spin_unlock(&priv->lock);
}
@@ -1332,20 +1261,32 @@ static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
static int mtk_star_poll(struct napi_struct *napi, int budget)
{
struct mtk_star_priv *priv;
+ unsigned int status;
int received = 0;
priv = container_of(napi, struct mtk_star_priv, napi);
- /* Clean-up all TX descriptors. */
- mtk_star_tx_complete_all(priv);
- /* Receive up to $budget packets. */
- received = mtk_star_process_rx(priv, budget);
+ status = mtk_star_intr_read(priv);
+ mtk_star_intr_ack_all(priv);
- if (received < budget) {
- napi_complete_done(napi, received);
- mtk_star_intr_enable_rx(priv);
+ if (status & MTK_STAR_BIT_INT_STS_TNTC)
+ /* Clean-up all TX descriptors. */
+ mtk_star_tx_complete_all(priv);
+
+ if (status & MTK_STAR_BIT_INT_STS_FNRC)
+ /* Receive up to $budget packets. */
+ received = mtk_star_process_rx(priv, budget);
+
+ if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
+ mtk_star_update_stats(priv);
+ mtk_star_reset_counters(priv);
}
+ if (received < budget)
+ napi_complete_done(napi, received);
+
+ mtk_star_intr_enable(priv);
+
return received;
}
@@ -1532,7 +1473,6 @@ static int mtk_star_probe(struct platform_device *pdev)
ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE;
spin_lock_init(&priv->lock);
- INIT_WORK(&priv->stats_work, mtk_star_update_stats_work);
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 80713123de5c..eefeb1cdc2ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -407,7 +407,9 @@ static int
mlx5e_rep_indr_setup_block(struct net_device *netdev,
struct mlx5e_rep_priv *rpriv,
struct flow_block_offload *f,
- flow_setup_cb_t *setup_cb)
+ flow_setup_cb_t *setup_cb,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
struct mlx5e_rep_indr_block_priv *indr_priv;
@@ -438,8 +440,10 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
list_add(&indr_priv->list,
&rpriv->uplink_priv.tc_indr_block_priv_list);
- block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv,
- mlx5e_rep_indr_block_unbind);
+ block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv,
+ mlx5e_rep_indr_block_unbind,
+ f, netdev, data, rpriv,
+ cleanup);
if (IS_ERR(block_cb)) {
list_del(&indr_priv->list);
kfree(indr_priv);
@@ -458,7 +462,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
if (!block_cb)
return -ENOENT;
- flow_block_cb_remove(block_cb, f);
+ flow_indr_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
return 0;
default:
@@ -469,15 +473,19 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
static
int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
- enum tc_setup_type type, void *type_data)
+ enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
switch (type) {
case TC_SETUP_BLOCK:
return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
- mlx5e_rep_indr_setup_tc_cb);
+ mlx5e_rep_indr_setup_tc_cb,
+ data, cleanup);
case TC_SETUP_FT:
return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
- mlx5e_rep_indr_setup_ft_cb);
+ mlx5e_rep_indr_setup_ft_cb,
+ data, cleanup);
default:
return -EOPNOTSUPP;
}
@@ -496,7 +504,7 @@ int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv)
void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
{
flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv,
- mlx5e_rep_indr_setup_tc_cb);
+ mlx5e_rep_indr_block_unbind);
}
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 5ffa32b75e5f..029ea344ad65 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -978,8 +978,10 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
lossy = !(pfc || pause_en);
thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
+ thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
pfc, pause_en);
+ delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
total_cells = thres_cells + delay_cells;
taken_headroom_cells += total_cells;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 6f96ca50c9ba..3abe3e7d89bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -374,6 +374,17 @@ mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
return NULL;
}
+static inline u32
+mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 size_cells)
+{
+ /* Ports with eight lanes use two headroom buffers between which the
+ * configured headroom size is split. Therefore, multiply the calculated
+ * headroom size by two.
+ */
+ return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
+}
+
enum mlxsw_sp_flood_type {
MLXSW_SP_FLOOD_TYPE_UC,
MLXSW_SP_FLOOD_TYPE_BC,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 21bfb2f6a6f0..6f84557a5a6f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -312,6 +312,7 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
if (i == MLXSW_SP_PB_UNUSED)
continue;
+ size = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size);
mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
}
mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 304eb8c3d8bd..92351a79addc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -782,6 +782,7 @@ mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
speed = 0;
buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
+ buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize);
mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index c5c5c688b7e2..f1711ac86d0c 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -3091,6 +3091,8 @@ static const struct pci_device_id lan743x_pcidev_tbl[] = {
{ 0, }
};
+MODULE_DEVICE_TABLE(pci, lan743x_pcidev_tbl);
+
static struct pci_driver lan743x_pcidev_driver = {
.name = DRIVER_NAME,
.id_table = lan743x_pcidev_tbl,
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index 628fa9b2f741..373165119850 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -297,7 +297,7 @@ struct vxge_hw_fifo_config {
* @greedy_return: If Set it forces the device to return absolutely all RxD
* that are consumed and still on board when a timer interrupt
* triggers. If Clear, then if the device has already returned
- * RxD before current timer interrupt trigerred and after the
+ * RxD before current timer interrupt triggered and after the
* previous timer interrupt triggered, then the device is not
* forced to returned the rest of the consumed RxD that it has
* on board which account for a byte count less than the one
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index c39327677a7d..bb448c82cdc2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -861,7 +861,7 @@ static void nfp_flower_clean(struct nfp_app *app)
flush_work(&app_priv->cmsg_work);
flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
- nfp_flower_setup_indr_block_cb);
+ nfp_flower_setup_indr_tc_release);
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_cleanup(app);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 6c3dc3baf387..7f54a620acad 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -459,9 +459,10 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow);
void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
- enum tc_setup_type type, void *type_data);
-int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv);
+ enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb));
+void nfp_flower_setup_indr_tc_release(void *cb_priv);
void
__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 695d24b9dd92..d7340dc09b4c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1619,8 +1619,8 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
return NULL;
}
-int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
- void *type_data, void *cb_priv)
+static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
struct flow_cls_offload *flower = type_data;
@@ -1637,7 +1637,7 @@ int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
}
}
-static void nfp_flower_setup_indr_tc_release(void *cb_priv)
+void nfp_flower_setup_indr_tc_release(void *cb_priv)
{
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
@@ -1647,7 +1647,8 @@ static void nfp_flower_setup_indr_tc_release(void *cb_priv)
static int
nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
- struct flow_block_offload *f)
+ struct flow_block_offload *f, void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
struct nfp_flower_indr_block_cb_priv *cb_priv;
struct nfp_flower_priv *priv = app->priv;
@@ -1676,9 +1677,10 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
cb_priv->app = app;
list_add(&cb_priv->list, &priv->indr_block_cb_priv);
- block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
- cb_priv, cb_priv,
- nfp_flower_setup_indr_tc_release);
+ block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb,
+ cb_priv, cb_priv,
+ nfp_flower_setup_indr_tc_release,
+ f, netdev, data, app, cleanup);
if (IS_ERR(block_cb)) {
list_del(&cb_priv->list);
kfree(cb_priv);
@@ -1699,7 +1701,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
if (!block_cb)
return -ENOENT;
- flow_block_cb_remove(block_cb, f);
+ flow_indr_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
return 0;
default:
@@ -1710,7 +1712,9 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
int
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
- enum tc_setup_type type, void *type_data)
+ enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!nfp_fl_is_netdev_to_offload(netdev))
return -EOPNOTSUPP;
@@ -1718,7 +1722,7 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
switch (type) {
case TC_SETUP_BLOCK:
return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
- type_data);
+ type_data, data, cleanup);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 32b9d7705404..55cef5b16aa5 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -147,7 +147,7 @@ struct pch_gbe_regs {
#define PCH_GBE_RH_ALM_FULL_8 0x00001000 /* 8 words */
#define PCH_GBE_RH_ALM_FULL_16 0x00002000 /* 16 words */
#define PCH_GBE_RH_ALM_FULL_32 0x00003000 /* 32 words */
-/* RX FIFO Read Triger Threshold */
+/* RX FIFO Read Trigger Threshold */
#define PCH_GBE_RH_RD_TRG_4 0x00000000 /* 4 words */
#define PCH_GBE_RH_RD_TRG_8 0x00000200 /* 8 words */
#define PCH_GBE_RH_RD_TRG_16 0x00000400 /* 16 words */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 9d8c969f21cb..aaa00edd9d5b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -96,7 +96,8 @@ static void ionic_link_status_check(struct ionic_lif *lif)
u16 link_status;
bool link_up;
- if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
+ if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) ||
+ test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state))
return;
link_status = le16_to_cpu(lif->info->status.link_status);
@@ -1245,6 +1246,7 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
netdev->hw_features |= netdev->hw_enc_features;
netdev->features |= netdev->hw_features;
+ netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
netdev->priv_flags |= IFF_UNICAST_FLT |
IFF_LIVE_ADDR_CHANGE;
@@ -1692,15 +1694,15 @@ static void ionic_stop_queues(struct ionic_lif *lif)
if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
return;
- ionic_txrx_disable(lif);
netif_tx_disable(lif->netdev);
+ ionic_txrx_disable(lif);
}
int ionic_stop(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
- if (!netif_device_present(netdev))
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
return 0;
ionic_stop_queues(lif);
@@ -1983,18 +1985,19 @@ int ionic_reset_queues(struct ionic_lif *lif)
bool running;
int err = 0;
- /* Put off the next watchdog timeout */
- netif_trans_update(lif->netdev);
-
err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
if (err)
return err;
running = netif_running(lif->netdev);
- if (running)
+ if (running) {
+ netif_device_detach(lif->netdev);
err = ionic_stop(lif->netdev);
- if (!err && running)
+ }
+ if (!err && running) {
ionic_open(lif->netdev);
+ netif_device_attach(lif->netdev);
+ }
clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 7b76667acaba..08ba9d54ab63 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -271,7 +271,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
}
- iids->vf_cids += vf_cids * p_mngr->vf_count;
+ iids->vf_cids = vf_cids;
iids->tids += vf_tids * p_mngr->vf_count;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
@@ -465,6 +465,20 @@ static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
return p_blk;
}
+static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+ u32 cli_idx, blk_idx;
+
+ for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) {
+ for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++)
+ clients[cli_idx].pf_blks[blk_idx].total_size = 0;
+
+ for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++)
+ clients[cli_idx].vf_blks[blk_idx].total_size = 0;
+ }
+}
+
int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
@@ -484,6 +498,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
+ /* Reset all ILT blocks at the beginning of ILT computing in order
+ * to prevent memory allocation for irrelevant blocks afterwards.
+ */
+ qed_cxt_ilt_blk_reset(p_hwfn);
+
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"hwfn [%d] - Set context manager starting line to be 0x%08x\n",
p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 57a0dab88431..81e8fbe4a05b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -5568,7 +5568,8 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
"The filter/trigger constraint dword offsets are not enabled for recording",
-
+ /* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
+ "No matching framing mode",
/* DBG_STATUS_VFC_READ_ERROR */
"Error reading from VFC",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 1eebf30fa798..3aa51374e727 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -980,7 +980,7 @@ int qed_llh_add_mac_filter(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
union qed_llh_filter filter = {};
- u8 filter_idx, abs_ppfid;
+ u8 filter_idx, abs_ppfid = 0;
u32 high, low, ref_cnt;
int rc = 0;
@@ -1368,6 +1368,8 @@ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn)
void qed_resc_free(struct qed_dev *cdev)
{
+ struct qed_rdma_info *rdma_info;
+ struct qed_hwfn *p_hwfn;
int i;
if (IS_VF(cdev)) {
@@ -1385,7 +1387,8 @@ void qed_resc_free(struct qed_dev *cdev)
qed_llh_free(cdev);
for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ p_hwfn = cdev->hwfns + i;
+ rdma_info = p_hwfn->p_rdma_info;
qed_cxt_mngr_free(p_hwfn);
qed_qm_info_free(p_hwfn);
@@ -1404,8 +1407,10 @@ void qed_resc_free(struct qed_dev *cdev)
qed_ooo_free(p_hwfn);
}
- if (QED_IS_RDMA_PERSONALITY(p_hwfn))
+ if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
+ qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
qed_rdma_info_free(p_hwfn);
+ }
qed_iov_free(p_hwfn);
qed_l2_free(p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index d2fe61a5cf56..5409a2da6106 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -2836,8 +2836,6 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
if (rc)
return rc;
- qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
-
return qed_iwarp_ll2_stop(p_hwfn);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 4566815f7b87..7271dd7166e5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -113,7 +113,6 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
break;
}
}
- qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
}
static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 856051f50eb7..adc2c8f3d48e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
}
+#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90
+#define QED_VF_CHANNEL_USLEEP_DELAY 100
+#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
+#define QED_VF_CHANNEL_MSLEEP_DELAY 25
+
static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
struct ustorm_trigger_vf_zone trigger;
struct ustorm_vf_zone *zone_data;
- int rc = 0, time = 100;
+ int iter, rc = 0;
zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
@@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
/* When PF would be done with the response, it would write back to the
- * `done' address. Poll until then.
+ * `done' address from a coherent DMA zone. Poll until then.
*/
- while ((!*done) && time) {
- msleep(25);
- time--;
+
+ iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
+ while (!*done && iter--) {
+ udelay(QED_VF_CHANNEL_USLEEP_DELAY);
+ dma_rmb();
+ }
+
+ iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
+ while (!*done && iter--) {
+ msleep(QED_VF_CHANNEL_MSLEEP_DELAY);
+ dma_rmb();
}
if (!*done) {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 756c05eb96f3..29e285430f99 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1229,7 +1229,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
/* PTP not supported on VFs */
if (!is_vf)
- qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL));
+ qede_ptp_enable(edev);
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
@@ -1318,6 +1318,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
if (system_state == SYSTEM_POWER_OFF)
return;
qed_ops->common->remove(cdev);
+ edev->cdev = NULL;
/* Since this can happen out-of-sync with other flows,
* don't release the netdevice until after slowpath stop
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 4c7f7a7fc151..cd5841a9415e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -412,6 +412,7 @@ void qede_ptp_disable(struct qede_dev *edev)
if (ptp->tx_skb) {
dev_kfree_skb_any(ptp->tx_skb);
ptp->tx_skb = NULL;
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
}
/* Disable PTP in HW */
@@ -423,7 +424,7 @@ void qede_ptp_disable(struct qede_dev *edev)
edev->ptp = NULL;
}
-static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
+static int qede_ptp_init(struct qede_dev *edev)
{
struct qede_ptp *ptp;
int rc;
@@ -444,25 +445,19 @@ static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
/* Init work queue for Tx timestamping */
INIT_WORK(&ptp->work, qede_ptp_task);
- /* Init cyclecounter and timecounter. This is done only in the first
- * load. If done in every load, PTP application will fail when doing
- * unload / load (e.g. MTU change) while it is running.
- */
- if (init_tc) {
- memset(&ptp->cc, 0, sizeof(ptp->cc));
- ptp->cc.read = qede_ptp_read_cc;
- ptp->cc.mask = CYCLECOUNTER_MASK(64);
- ptp->cc.shift = 0;
- ptp->cc.mult = 1;
-
- timecounter_init(&ptp->tc, &ptp->cc,
- ktime_to_ns(ktime_get_real()));
- }
+ /* Init cyclecounter and timecounter */
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = qede_ptp_read_cc;
+ ptp->cc.mask = CYCLECOUNTER_MASK(64);
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
- return rc;
+ timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+
+ return 0;
}
-int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
+int qede_ptp_enable(struct qede_dev *edev)
{
struct qede_ptp *ptp;
int rc;
@@ -483,7 +478,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
edev->ptp = ptp;
- rc = qede_ptp_init(edev, init_tc);
+ rc = qede_ptp_init(edev);
if (rc)
goto err1;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
index 691a14c4b2c5..89c7f3cf3ee2 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
@@ -41,7 +41,7 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
void qede_ptp_disable(struct qede_dev *edev);
-int qede_ptp_enable(struct qede_dev *edev, bool init_tc);
+int qede_ptp_enable(struct qede_dev *edev);
int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
index 2d873ae8a234..668ccc9d49f8 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
@@ -105,6 +105,7 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev)
qede_rdma_cleanup_event(edev);
destroy_workqueue(edev->rdma_info.rdma_wq);
+ edev->rdma_info.rdma_wq = NULL;
}
int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
@@ -325,7 +326,7 @@ static void qede_rdma_add_event(struct qede_dev *edev,
if (edev->rdma_info.exp_recovery)
return;
- if (!edev->rdma_info.qedr_dev)
+ if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
return;
/* We don't want the cleanup flow to start while we're allocating and
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 8d7b9bb910f2..10037639ac2c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -269,7 +269,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
@@ -286,7 +286,7 @@ static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
@@ -315,7 +315,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
u64 data;
int ret;
@@ -337,7 +337,7 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
u64 data;
int ret;
@@ -402,7 +402,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
char *buf, loff_t offset,
size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pm_func_cfg *pm_cfg;
u32 id, action, pci_func;
@@ -452,7 +452,7 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
char *buf, loff_t offset,
size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pm_func_cfg *pm_cfg;
u8 pci_func;
@@ -545,7 +545,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
char *buf, loff_t offset,
size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_esw_func_cfg *esw_cfg;
struct qlcnic_npar_info *npar;
@@ -629,7 +629,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
char *buf, loff_t offset,
size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_esw_func_cfg *esw_cfg;
u8 pci_func;
@@ -681,7 +681,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
char *buf, loff_t offset,
size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_info nic_info;
struct qlcnic_npar_func_cfg *np_cfg;
@@ -728,7 +728,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
char *buf, loff_t offset,
size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_npar_func_cfg *np_cfg;
struct qlcnic_info nic_info;
@@ -775,7 +775,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
char *buf, loff_t offset,
size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_esw_statistics port_stats;
int ret;
@@ -810,7 +810,7 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
char *buf, loff_t offset,
size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_esw_statistics esw_stats;
int ret;
@@ -845,7 +845,7 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
char *buf, loff_t offset,
size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
@@ -875,7 +875,7 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
@@ -904,7 +904,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
char *buf, loff_t offset,
size_t size)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pci_func_cfg *pci_cfg;
struct qlcnic_pci_info *pci_info;
@@ -946,7 +946,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
{
unsigned char *p_read_buf;
int ret, count;
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
if (!size)
@@ -1124,7 +1124,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
int ret;
static int flash_mode;
unsigned long data;
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
ret = kstrtoul(buf, 16, &data);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index dad84ecf5a77..b660ddbe4025 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2114,8 +2114,11 @@ static void rtl_release_firmware(struct rtl8169_private *tp)
void r8169_apply_firmware(struct rtl8169_private *tp)
{
/* TODO: release firmware if rtl_fw_write_firmware signals failure. */
- if (tp->rtl_fw)
+ if (tp->rtl_fw) {
rtl_fw_write_firmware(tp, tp->rtl_fw);
+ /* At least one firmware doesn't reset tp->ocp_base. */
+ tp->ocp_base = OCP_STD_PHY_BASE;
+ }
}
static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 7585cd2270ba..fc99e7118e49 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -647,10 +647,10 @@ static int rocker_dma_rings_init(struct rocker *rocker)
err_dma_event_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker->event_ring);
err_dma_event_ring_create:
+ rocker_dma_cmd_ring_waits_free(rocker);
+err_dma_cmd_ring_waits_alloc:
rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
PCI_DMA_BIDIRECTIONAL);
-err_dma_cmd_ring_waits_alloc:
- rocker_dma_cmd_ring_waits_free(rocker);
err_dma_cmd_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
return err;
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 328bc38848bb..0f366cc50b74 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1044,8 +1044,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
skb->ip_summed = CHECKSUM_UNNECESSARY;
next:
- if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
- xdp_result) {
+ if (skb)
+ napi_gro_receive(&priv->napi, skb);
+ if (skb || xdp_result) {
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += xdp.data_end - xdp.data;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index fbaf3c987d9c..f34c7903ff52 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -186,7 +186,7 @@
#define XAE_RAF_TXVSTRPMODE_MASK 0x00000180 /* Tx VLAN STRIP mode */
#define XAE_RAF_RXVSTRPMODE_MASK 0x00000600 /* Rx VLAN STRIP mode */
#define XAE_RAF_NEWFNCENBL_MASK 0x00000800 /* New function mode */
-/* Exteneded Multicast Filtering mode */
+/* Extended Multicast Filtering mode */
#define XAE_RAF_EMULTIFLTRENBL_MASK 0x00001000
#define XAE_RAF_STATSRST_MASK 0x00002000 /* Stats. Counter Reset */
#define XAE_RAF_RXBADFRMEN_MASK 0x00004000 /* Recv Bad Frame Enable */