summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2020-10-15 12:43:21 -0700
committerJakub Kicinski <kuba@kernel.org>2020-10-15 12:43:21 -0700
commit2295cddf99e3f7c2be2b1160e2f5e53cc35b09be (patch)
tree2c5a619c8aee5ef68028a65a1c059736b005846b /drivers/net
parent346e320cb2103edef709c4466a29140c4a8e527a (diff)
parent2ecbc1f684482b4ed52447a39903bd9b0f222898 (diff)
downloadlinux-2295cddf99e3f7c2be2b1160e2f5e53cc35b09be.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Minor conflicts in net/mptcp/protocol.h and tools/testing/selftests/net/Makefile. In both cases code was added on both sides in the same place so just keep both. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/can/m_can/m_can_platform.c2
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c175
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h15
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c35
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c19
-rw-r--r--drivers/net/ethernet/korina.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ipa/ipa_endpoint.c6
11 files changed, 242 insertions, 36 deletions
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 38ea5e600fb8..e6d0cb9ee02f 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -144,8 +144,6 @@ static int __maybe_unused m_can_runtime_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct m_can_classdev *mcan_class = netdev_priv(ndev);
- m_can_class_suspend(dev);
-
clk_disable_unprepare(mcan_class->cclk);
clk_disable_unprepare(mcan_class->hclk);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 01f5784f69cb..0ef854911f21 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -103,14 +103,8 @@ void ksz_init_mib_timer(struct ksz_device *dev)
INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
- /* Read MIB counters every 30 seconds to avoid overflow. */
- dev->mib_read_interval = msecs_to_jiffies(30000);
-
for (i = 0; i < dev->mib_port_cnt; i++)
dev->dev_ops->port_init_cnt(dev, i);
-
- /* Start the timer 2 seconds later. */
- schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000));
}
EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
@@ -143,7 +137,9 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
/* Read all MIB counters when the link is going down. */
p->read = true;
- schedule_delayed_work(&dev->mib_read, 0);
+ /* timer started */
+ if (dev->mib_read_interval)
+ schedule_delayed_work(&dev->mib_read, 0);
}
EXPORT_SYMBOL_GPL(ksz_mac_link_down);
@@ -451,6 +447,12 @@ int ksz_switch_register(struct ksz_device *dev,
return ret;
}
+ /* Read MIB counters every 30 seconds to avoid overflow. */
+ dev->mib_read_interval = msecs_to_jiffies(30000);
+
+ /* Start the MIB timer. */
+ schedule_delayed_work(&dev->mib_read, 0);
+
return 0;
}
EXPORT_SYMBOL(ksz_switch_register);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index f642c1b475c4..1b88bd1c2dbe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -60,6 +60,89 @@ static struct ch_tc_pedit_fields pedits[] = {
PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
};
+static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = {
+ /* Default supported NAT modes */
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_NONE,
+ .natmode = NAT_MODE_NONE,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP,
+ .natmode = NAT_MODE_DIP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT,
+ .natmode = NAT_MODE_DIP_DP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
+ CXGB4_ACTION_NATMODE_SIP,
+ .natmode = NAT_MODE_DIP_DP_SIP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
+ CXGB4_ACTION_NATMODE_SPORT,
+ .natmode = NAT_MODE_DIP_DP_SP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT,
+ .natmode = NAT_MODE_SIP_SP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
+ CXGB4_ACTION_NATMODE_SPORT,
+ .natmode = NAT_MODE_DIP_SIP_SP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
+ CXGB4_ACTION_NATMODE_DPORT |
+ CXGB4_ACTION_NATMODE_SPORT,
+ .natmode = NAT_MODE_ALL,
+ },
+ /* T6+ can ignore L4 ports when they're disabled. */
+ {
+ .chip = CHELSIO_T6,
+ .flags = CXGB4_ACTION_NATMODE_SIP,
+ .natmode = NAT_MODE_SIP_SP,
+ },
+ {
+ .chip = CHELSIO_T6,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT,
+ .natmode = NAT_MODE_DIP_DP_SP,
+ },
+ {
+ .chip = CHELSIO_T6,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP,
+ .natmode = NAT_MODE_ALL,
+ },
+};
+
+static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs,
+ u8 natmode_flags)
+{
+ u8 i = 0;
+
+ /* Translate the enabled NAT 4-tuple fields to one of the
+ * hardware supported NAT mode configurations. This ensures
+ * that we pick a valid combination, where the disabled fields
+ * do not get overwritten to 0.
+ */
+ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
+ if (cxgb4_natmode_config_array[i].flags == natmode_flags) {
+ fs->nat_mode = cxgb4_natmode_config_array[i].natmode;
+ return;
+ }
+ }
+}
+
static struct ch_tc_flower_entry *allocate_flower_entry(void)
{
struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
@@ -289,7 +372,8 @@ static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
}
static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
- u32 mask, u32 offset, u8 htype)
+ u32 mask, u32 offset, u8 htype,
+ u8 *natmode_flags)
{
switch (htype) {
case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
@@ -314,60 +398,94 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
switch (offset) {
case PEDIT_IP4_SRC:
offload_pedit(fs, val, mask, IP4_SRC);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP4_DST:
offload_pedit(fs, val, mask, IP4_DST);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
}
- fs->nat_mode = NAT_MODE_ALL;
break;
case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
switch (offset) {
case PEDIT_IP6_SRC_31_0:
offload_pedit(fs, val, mask, IP6_SRC_31_0);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_SRC_63_32:
offload_pedit(fs, val, mask, IP6_SRC_63_32);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_SRC_95_64:
offload_pedit(fs, val, mask, IP6_SRC_95_64);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_SRC_127_96:
offload_pedit(fs, val, mask, IP6_SRC_127_96);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_DST_31_0:
offload_pedit(fs, val, mask, IP6_DST_31_0);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
case PEDIT_IP6_DST_63_32:
offload_pedit(fs, val, mask, IP6_DST_63_32);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
case PEDIT_IP6_DST_95_64:
offload_pedit(fs, val, mask, IP6_DST_95_64);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
case PEDIT_IP6_DST_127_96:
offload_pedit(fs, val, mask, IP6_DST_127_96);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
}
- fs->nat_mode = NAT_MODE_ALL;
break;
case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
switch (offset) {
case PEDIT_TCP_SPORT_DPORT:
- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
fs->nat_fport = val;
- else
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
+ } else {
fs->nat_lport = val >> 16;
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
+ }
}
- fs->nat_mode = NAT_MODE_ALL;
break;
case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
switch (offset) {
case PEDIT_UDP_SPORT_DPORT:
- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
fs->nat_fport = val;
- else
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
+ } else {
fs->nat_lport = val >> 16;
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
+ }
}
- fs->nat_mode = NAT_MODE_ALL;
+ break;
+ }
+}
+
+static int cxgb4_action_natmode_validate(struct adapter *adap, u8 natmode_flags,
+ struct netlink_ext_ack *extack)
+{
+ u8 i = 0;
+
+ /* Extract the NAT mode to enable based on what 4-tuple fields
+ * are enabled to be overwritten. This ensures that the
+ * disabled fields don't get overwritten to 0.
+ */
+ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
+ const struct cxgb4_natmode_config *c;
+
+ c = &cxgb4_natmode_config_array[i];
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip &&
+ natmode_flags == c->flags)
+ return 0;
}
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported NAT mode 4-tuple combination");
+ return -EOPNOTSUPP;
}
void cxgb4_process_flow_actions(struct net_device *in,
@@ -375,6 +493,7 @@ void cxgb4_process_flow_actions(struct net_device *in,
struct ch_filter_specification *fs)
{
struct flow_action_entry *act;
+ u8 natmode_flags = 0;
int i;
flow_action_for_each(i, act, actions) {
@@ -426,7 +545,8 @@ void cxgb4_process_flow_actions(struct net_device *in,
val = act->mangle.val;
offset = act->mangle.offset;
- process_pedit_field(fs, val, mask, offset, htype);
+ process_pedit_field(fs, val, mask, offset, htype,
+ &natmode_flags);
}
break;
case FLOW_ACTION_QUEUE:
@@ -438,6 +558,9 @@ void cxgb4_process_flow_actions(struct net_device *in,
break;
}
}
+ if (natmode_flags)
+ cxgb4_action_natmode_tweak(fs, natmode_flags);
+
}
static bool valid_l4_mask(u32 mask)
@@ -454,7 +577,8 @@ static bool valid_l4_mask(u32 mask)
}
static bool valid_pedit_action(struct net_device *dev,
- const struct flow_action_entry *act)
+ const struct flow_action_entry *act,
+ u8 *natmode_flags)
{
u32 mask, offset;
u8 htype;
@@ -479,7 +603,10 @@ static bool valid_pedit_action(struct net_device *dev,
case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
switch (offset) {
case PEDIT_IP4_SRC:
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
case PEDIT_IP4_DST:
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
@@ -493,10 +620,13 @@ static bool valid_pedit_action(struct net_device *dev,
case PEDIT_IP6_SRC_63_32:
case PEDIT_IP6_SRC_95_64:
case PEDIT_IP6_SRC_127_96:
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
case PEDIT_IP6_DST_31_0:
case PEDIT_IP6_DST_63_32:
case PEDIT_IP6_DST_95_64:
case PEDIT_IP6_DST_127_96:
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
@@ -512,6 +642,10 @@ static bool valid_pedit_action(struct net_device *dev,
__func__);
return false;
}
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
+ else
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
@@ -527,6 +661,10 @@ static bool valid_pedit_action(struct net_device *dev,
__func__);
return false;
}
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
+ else
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
@@ -546,10 +684,12 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
struct netlink_ext_ack *extack,
u8 matchall_filter)
{
+ struct adapter *adap = netdev2adap(dev);
struct flow_action_entry *act;
bool act_redir = false;
bool act_pedit = false;
bool act_vlan = false;
+ u8 natmode_flags = 0;
int i;
if (!flow_action_basic_hw_stats_check(actions, extack))
@@ -563,7 +703,6 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
break;
case FLOW_ACTION_MIRRED:
case FLOW_ACTION_REDIRECT: {
- struct adapter *adap = netdev2adap(dev);
struct net_device *n_dev, *target_dev;
bool found = false;
unsigned int i;
@@ -620,7 +759,8 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
}
break;
case FLOW_ACTION_MANGLE: {
- bool pedit_valid = valid_pedit_action(dev, act);
+ bool pedit_valid = valid_pedit_action(dev, act,
+ &natmode_flags);
if (!pedit_valid)
return -EOPNOTSUPP;
@@ -642,6 +782,15 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
return -EINVAL;
}
+ if (act_pedit) {
+ int ret;
+
+ ret = cxgb4_action_natmode_validate(adap, natmode_flags,
+ extack);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
index 6296e1d5a12b..3a2fa00c8cde 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
@@ -108,6 +108,21 @@ struct ch_tc_pedit_fields {
#define PEDIT_TCP_SPORT_DPORT 0x0
#define PEDIT_UDP_SPORT_DPORT 0x0
+enum cxgb4_action_natmode_flags {
+ CXGB4_ACTION_NATMODE_NONE = 0,
+ CXGB4_ACTION_NATMODE_DIP = (1 << 0),
+ CXGB4_ACTION_NATMODE_SIP = (1 << 1),
+ CXGB4_ACTION_NATMODE_DPORT = (1 << 2),
+ CXGB4_ACTION_NATMODE_SPORT = (1 << 3),
+};
+
+/* TC PEDIT action to NATMODE translation entry */
+struct cxgb4_natmode_config {
+ enum chip_type chip;
+ u8 flags;
+ u8 natmode;
+};
+
void cxgb4_process_flow_actions(struct net_device *in,
struct flow_action *actions,
struct ch_filter_specification *fs);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c043afb38b6e..8f7eca1e7716 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1912,6 +1912,27 @@ out:
return ret;
}
+static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phy_dev = ndev->phydev;
+
+ if (phy_dev) {
+ phy_reset_after_clk_enable(phy_dev);
+ } else if (fep->phy_node) {
+ /*
+ * If the PHY still is not bound to the MAC, but there is
+ * OF PHY node and a matching PHY device instance already,
+ * use the OF PHY node to obtain the PHY device instance,
+ * and then use that PHY device instance when triggering
+ * the PHY reset.
+ */
+ phy_dev = of_phy_find_device(fep->phy_node);
+ phy_reset_after_clk_enable(phy_dev);
+ put_device(&phy_dev->mdio.dev);
+ }
+}
+
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1938,7 +1959,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
if (ret)
goto failed_clk_ref;
- phy_reset_after_clk_enable(ndev->phydev);
+ fec_enet_phy_reset_after_clk_enable(ndev);
} else {
clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) {
@@ -2983,16 +3004,16 @@ fec_enet_open(struct net_device *ndev)
/* Init MAC prior to mii bus probe */
fec_restart(ndev);
- /* Probe and connect to PHY when open the interface */
- ret = fec_enet_mii_probe(ndev);
- if (ret)
- goto err_enet_mii_probe;
-
/* Call phy_reset_after_clk_enable() again if it failed during
* phy_reset_after_clk_enable() before because the PHY wasn't probed.
*/
if (reset_again)
- phy_reset_after_clk_enable(ndev->phydev);
+ fec_enet_phy_reset_after_clk_enable(ndev);
+
+ /* Probe and connect to PHY when open the interface */
+ ret = fec_enet_mii_probe(ndev);
+ if (ret)
+ goto err_enet_mii_probe;
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_used();
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c5c732601e35..7ef3369953b6 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1349,6 +1349,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
int offset = ibmveth_rxq_frame_offset(adapter);
int csum_good = ibmveth_rxq_csum_good(adapter);
int lrg_pkt = ibmveth_rxq_large_packet(adapter);
+ __sum16 iph_check = 0;
skb = ibmveth_rxq_get_buffer(adapter);
@@ -1385,16 +1386,26 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, netdev);
- if (csum_good) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- ibmveth_rx_csum_helper(skb, adapter);
+ /* PHYP without PLSO support places a -1 in the ip
+ * checksum for large send frames.
+ */
+ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ iph_check = iph->check;
}
- if (length > netdev->mtu + ETH_HLEN) {
+ if ((length > netdev->mtu + ETH_HLEN) ||
+ lrg_pkt || iph_check == 0xffff) {
ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
adapter->rx_large_packets++;
}
+ if (csum_good) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ ibmveth_rx_csum_helper(skb, adapter);
+ }
+
napi_gro_receive(napi, skb); /* send it up */
netdev->stats.rx_packets++;
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 03e034918d14..af441d699a57 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1113,7 +1113,7 @@ out:
return rc;
probe_err_register:
- kfree(lp->td_ring);
+ kfree(KSEG0ADDR(lp->td_ring));
probe_err_td_ring:
iounmap(lp->tx_dma_regs);
probe_err_dma_tx:
@@ -1133,6 +1133,7 @@ static int korina_remove(struct platform_device *pdev)
iounmap(lp->eth_regs);
iounmap(lp->rx_dma_regs);
iounmap(lp->tx_dma_regs);
+ kfree(KSEG0ADDR(lp->td_ring));
unregister_netdev(bif->dev);
free_netdev(bif->dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 99d7737e8ad6..502d1b97855c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -943,6 +943,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
bool clean_complete = true;
int done;
+ if (!budget)
+ return 0;
+
if (priv->tx_ring_num[TX_XDP]) {
xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
if (xdp_tx_cq->xdp_busy) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 45869b29368f..3ddb7268e415 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -350,7 +350,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
.dma = tx_info->map0_dma,
};
- if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
+ if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
dma_unmap_page(priv->ddev, tx_info->map0_dma,
PAGE_SIZE, priv->dma_dir);
put_page(tx_info->page);
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 317fad787d78..267c080ee084 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -2504,7 +2504,7 @@ static void tlan_phy_power_down(struct net_device *dev)
}
/* Wait for 50 ms and powerup
- * This is abitrary. It is intended to make sure the
+ * This is arbitrary. It is intended to make sure the
* transceiver settles.
*/
tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP);
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 24d688e3cd93..b40b711cf4bd 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1432,6 +1432,9 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
void ipa_endpoint_suspend(struct ipa *ipa)
{
+ if (!ipa->setup_complete)
+ return;
+
if (ipa->modem_netdev)
ipa_modem_suspend(ipa->modem_netdev);
@@ -1443,6 +1446,9 @@ void ipa_endpoint_suspend(struct ipa *ipa)
void ipa_endpoint_resume(struct ipa *ipa)
{
+ if (!ipa->setup_complete)
+ return;
+
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);