summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c59x.c50
-rw-r--r--drivers/net/ethernet/arc/emac_main.c53
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c31
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c143
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c49
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c38
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c67
-rw-r--r--drivers/net/ethernet/sfc/farch.c3
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw.c52
18 files changed, 373 insertions, 185 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 3fe45c705933..8ca49f04acec 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
int entry = vp->cur_tx % TX_RING_SIZE;
struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
unsigned long flags;
+ dma_addr_t dma_addr;
if (vortex_debug > 6) {
pr_debug("boomerang_start_xmit()\n");
@@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
if (!skb_shinfo(skb)->nr_frags) {
- vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
- skb->len, PCI_DMA_TODEVICE));
+ dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+ goto out_dma_err;
+
+ vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
} else {
int i;
- vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE));
+ dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+ goto out_dma_err;
+
+ vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
+ 0,
+ frag->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
+ for(i = i-1; i >= 0; i--)
+ dma_unmap_page(&VORTEX_PCI(vp)->dev,
+ le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
+ le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
+ DMA_TO_DEVICE);
+
+ pci_unmap_single(VORTEX_PCI(vp),
+ le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
+ le32_to_cpu(vp->tx_ring[entry].frag[0].length),
+ PCI_DMA_TODEVICE);
+
+ goto out_dma_err;
+ }
+
vp->tx_ring[entry].frag[i+1].addr =
- cpu_to_le32(skb_frag_dma_map(
- &VORTEX_PCI(vp)->dev,
- frag,
- frag->page_offset, frag->size, DMA_TO_DEVICE));
+ cpu_to_le32(dma_addr);
if (i == skb_shinfo(skb)->nr_frags-1)
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
@@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
#else
- vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
+ dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
+ if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+ goto out_dma_err;
+ vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
#endif
@@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
iowrite16(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
+out:
return NETDEV_TX_OK;
+out_dma_err:
+ dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
+ goto out;
}
/* The interrupt handler does all of the Rx thread work and cleans up
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index fe5cfeace6e3..5919394d9f58 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -30,6 +30,17 @@
#define DRV_VERSION "1.0"
/**
+ * arc_emac_tx_avail - Return the number of available slots in the tx ring.
+ * @priv: Pointer to ARC EMAC private data structure.
+ *
+ * returns: the number of slots available for transmission in tx the ring.
+ */
+static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
+{
+ return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
+}
+
+/**
* arc_emac_adjust_link - Adjust the PHY link duplex.
* @ndev: Pointer to the net_device structure.
*
@@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev)
txbd->info = 0;
*txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
-
- if (netif_queue_stopped(ndev))
- netif_wake_queue(ndev);
}
+
+ /* Ensure that txbd_dirty is visible to tx() before checking
+ * for queue stopped.
+ */
+ smp_mb();
+
+ if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
+ netif_wake_queue(ndev);
}
/**
@@ -298,7 +314,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
work_done = arc_emac_rx(ndev, budget);
if (work_done < budget) {
napi_complete(napi);
- arc_reg_or(priv, R_ENABLE, RXINT_MASK);
+ arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
}
return work_done;
@@ -327,9 +343,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
/* Reset all flags except "MDIO complete" */
arc_reg_set(priv, R_STATUS, status);
- if (status & RXINT_MASK) {
+ if (status & (RXINT_MASK | TXINT_MASK)) {
if (likely(napi_schedule_prep(&priv->napi))) {
- arc_reg_clr(priv, R_ENABLE, RXINT_MASK);
+ arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
__napi_schedule(&priv->napi);
}
}
@@ -440,7 +456,7 @@ static int arc_emac_open(struct net_device *ndev)
arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
/* Enable interrupts */
- arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
+ arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
/* Set CONTROL */
arc_reg_set(priv, R_CTRL,
@@ -511,7 +527,7 @@ static int arc_emac_stop(struct net_device *ndev)
netif_stop_queue(ndev);
/* Disable interrupts */
- arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK);
+ arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
/* Disable EMAC */
arc_reg_clr(priv, R_CTRL, EN_MASK);
@@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
len = max_t(unsigned int, ETH_ZLEN, skb->len);
- /* EMAC still holds this buffer in its possession.
- * CPU must not modify this buffer descriptor
- */
- if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
+ if (unlikely(!arc_emac_tx_avail(priv))) {
netif_stop_queue(ndev);
+ netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
/* Increment index to point to the next BD */
*txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
- /* Get "info" of the next BD */
- info = &priv->txbd[*txbd_curr].info;
+ /* Ensure that tx_clean() sees the new txbd_curr before
+ * checking the queue status. This prevents an unneeded wake
+ * of the queue in tx_clean().
+ */
+ smp_mb();
- /* Check if if Tx BD ring is full - next BD is still owned by EMAC */
- if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
+ if (!arc_emac_tx_avail(priv)) {
netif_stop_queue(ndev);
+ /* Refresh tx_dirty */
+ smp_mb();
+ if (arc_emac_tx_avail(priv))
+ netif_start_queue(ndev);
+ }
arc_reg_set(priv, R_STATUS, TXPL_MASK);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 4a7028d65912..d588136b23b9 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1697,7 +1697,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
hwstat->tx_underruns +
hwstat->tx_excessive_cols +
hwstat->tx_late_cols);
- nstat->multicast = hwstat->tx_multicast_pkts;
+ nstat->multicast = hwstat->rx_multicast_pkts;
nstat->collisions = hwstat->tx_total_cols;
nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 6f4e18644bd4..d9b9170ed2fc 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -534,6 +534,25 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
while ((processed < to_process) && (processed < budget)) {
cb = &priv->rx_cbs[priv->rx_read_ptr];
skb = cb->skb;
+
+ processed++;
+ priv->rx_read_ptr++;
+
+ if (priv->rx_read_ptr == priv->num_rx_bds)
+ priv->rx_read_ptr = 0;
+
+ /* We do not have a backing SKB, so we do not a corresponding
+ * DMA mapping for this incoming packet since
+ * bcm_sysport_rx_refill always either has both skb and mapping
+ * or none.
+ */
+ if (unlikely(!skb)) {
+ netif_err(priv, rx_err, ndev, "out of memory!\n");
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ goto refill;
+ }
+
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
RX_BUF_LENGTH, DMA_FROM_DEVICE);
@@ -543,23 +562,11 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
DESC_STATUS_MASK;
- processed++;
- priv->rx_read_ptr++;
- if (priv->rx_read_ptr == priv->num_rx_bds)
- priv->rx_read_ptr = 0;
-
netif_dbg(priv, rx_status, ndev,
"p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
p_index, priv->rx_c_index, priv->rx_read_ptr,
len, status);
- if (unlikely(!skb)) {
- netif_err(priv, rx_err, ndev, "out of memory!\n");
- ndev->stats.rx_dropped++;
- ndev->stats.rx_errors++;
- goto refill;
- }
-
if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
netif_err(priv, rx_status, ndev, "fragmented packet!\n");
ndev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 3f9d4de8173c..5cc9cae21ed5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -875,6 +875,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
int last_tx_cn, last_c_index, num_tx_bds;
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
+ unsigned int bds_compl;
unsigned int c_index;
/* Compute how many buffers are transmitted since last xmit call */
@@ -899,7 +900,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
/* Reclaim transmitted buffers */
while (last_tx_cn-- > 0) {
tx_cb_ptr = ring->cbs + last_c_index;
+ bds_compl = 0;
if (tx_cb_ptr->skb) {
+ bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
dev->stats.tx_bytes += tx_cb_ptr->skb->len;
dma_unmap_single(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
@@ -916,7 +919,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
}
dev->stats.tx_packets++;
- ring->free_bds += 1;
+ ring->free_bds += bds_compl;
last_c_index++;
last_c_index &= (num_tx_bds - 1);
@@ -1274,12 +1277,29 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
while ((rxpktprocessed < rxpkttoprocess) &&
(rxpktprocessed < budget)) {
+ cb = &priv->rx_cbs[priv->rx_read_ptr];
+ skb = cb->skb;
+
+ rxpktprocessed++;
+
+ priv->rx_read_ptr++;
+ priv->rx_read_ptr &= (priv->num_rx_bds - 1);
+
+ /* We do not have a backing SKB, so we do not have a
+ * corresponding DMA mapping for this incoming packet since
+ * bcmgenet_rx_refill always either has both skb and mapping or
+ * none.
+ */
+ if (unlikely(!skb)) {
+ dev->stats.rx_dropped++;
+ dev->stats.rx_errors++;
+ goto refill;
+ }
+
/* Unmap the packet contents such that we can use the
* RSV from the 64 bytes descriptor when enabled and save
* a 32-bits register read
*/
- cb = &priv->rx_cbs[priv->rx_read_ptr];
- skb = cb->skb;
dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
priv->rx_buf_len, DMA_FROM_DEVICE);
@@ -1307,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
__func__, p_index, priv->rx_c_index,
priv->rx_read_ptr, dma_length_status);
- rxpktprocessed++;
-
- priv->rx_read_ptr++;
- priv->rx_read_ptr &= (priv->num_rx_bds - 1);
-
- /* out of memory, just drop packets at the hardware level */
- if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
- dev->stats.rx_errors++;
- goto refill;
- }
-
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
@@ -1736,13 +1744,63 @@ static void bcmgenet_init_multiq(struct net_device *dev)
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
}
+static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
+{
+ int ret = 0;
+ int timeout = 0;
+ u32 reg;
+
+ /* Disable TDMA to stop add more frames in TX DMA */
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check TDMA status register to confirm TDMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+ if (reg & DMA_DISABLED)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == DMA_TIMEOUT_VAL) {
+ netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
+ ret = -ETIMEDOUT;
+ }
+
+ /* Wait 10ms for packet drain in both tx and rx dma */
+ usleep_range(10000, 20000);
+
+ /* Disable RDMA */
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~DMA_EN;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ timeout = 0;
+ /* Check RDMA status register to confirm RDMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+ if (reg & DMA_DISABLED)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == DMA_TIMEOUT_VAL) {
+ netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
int i;
/* disable DMA */
- bcmgenet_rdma_writel(priv, 0, DMA_CTRL);
- bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
+ bcmgenet_dma_teardown(priv);
for (i = 0; i < priv->num_tx_bds; i++) {
if (priv->tx_cbs[i].skb != NULL) {
@@ -2101,57 +2159,6 @@ err_clk_disable:
return ret;
}
-static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
-{
- int ret = 0;
- int timeout = 0;
- u32 reg;
-
- /* Disable TDMA to stop add more frames in TX DMA */
- reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
-
- /* Check TDMA status register to confirm TDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
- netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
- ret = -ETIMEDOUT;
- }
-
- /* Wait 10ms for packet drain in both tx and rx dma */
- usleep_range(10000, 20000);
-
- /* Disable RDMA */
- reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
- reg &= ~DMA_EN;
- bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
-
- timeout = 0;
- /* Check RDMA status register to confirm RDMA is disabled */
- while (timeout++ < DMA_TIMEOUT_VAL) {
- reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
- if (reg & DMA_DISABLED)
- break;
-
- udelay(1);
- }
-
- if (timeout == DMA_TIMEOUT_VAL) {
- netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
- ret = -ETIMEDOUT;
- }
-
- return ret;
-}
-
static void bcmgenet_netif_stop(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index cb77ae93d89a..e7d3a620d96a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7914,8 +7914,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry = tnapi->tx_prod;
base_flags = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- base_flags |= TXD_FLAG_TCPUDP_CSUM;
mss = skb_shinfo(skb)->gso_size;
if (mss) {
@@ -7929,6 +7927,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
+ /* HW/FW can not correctly segment packets that have been
+ * vlan encapsulated.
+ */
+ if (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD))
+ return tg3_tso_bug(tp, tnapi, txq, skb);
+
if (!skb_is_gso_v6(skb)) {
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
tg3_flag(tp, TSO_BUG))
@@ -7979,6 +7984,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
base_flags |= tsflags << 12;
}
}
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ /* HW/FW can not correctly checksum packets that have been
+ * vlan encapsulated.
+ */
+ if (skb->protocol == htons(ETH_P_8021Q) ||
+ skb->protocol == htons(ETH_P_8021AD)) {
+ if (skb_checksum_help(skb))
+ goto drop;
+ } else {
+ base_flags |= TXD_FLAG_TCPUDP_CSUM;
+ }
}
if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 8c34811a1128..e5be511a3c38 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6478,6 +6478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct port_info *pi;
bool highdma = false;
struct adapter *adapter = NULL;
+ void __iomem *regs;
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -6494,19 +6495,35 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_release_regions;
}
+ regs = pci_ioremap_bar(pdev, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ err = -ENOMEM;
+ goto out_disable_device;
+ }
+
+ /* We control everything through one PF */
+ func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
+ if (func != ent->driver_data) {
+ iounmap(regs);
+ pci_disable_device(pdev);
+ pci_save_state(pdev); /* to restore SR-IOV later */
+ goto sriov;
+ }
+
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
highdma = true;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
"coherent allocations\n");
- goto out_disable_device;
+ goto out_unmap_bar0;
}
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto out_disable_device;
+ goto out_unmap_bar0;
}
}
@@ -6518,7 +6535,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
err = -ENOMEM;
- goto out_disable_device;
+ goto out_unmap_bar0;
}
adapter->workq = create_singlethread_workqueue("cxgb4");
@@ -6530,20 +6547,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* PCI device has been enabled */
adapter->flags |= DEV_ENABLED;
- adapter->regs = pci_ioremap_bar(pdev, 0);
- if (!adapter->regs) {
- dev_err(&pdev->dev, "cannot map device registers\n");
- err = -ENOMEM;
- goto out_free_adapter;
- }
-
- /* We control everything through one PF */
- func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI));
- if (func != ent->driver_data) {
- pci_save_state(pdev); /* to restore SR-IOV later */
- goto sriov;
- }
-
+ adapter->regs = regs;
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
adapter->mbox = func;
@@ -6560,7 +6564,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = t4_prep_adapter(adapter);
if (err)
- goto out_unmap_bar0;
+ goto out_free_adapter;
+
if (!is_t4(adapter->params.chip)) {
s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
@@ -6577,14 +6582,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev,
"Incorrect number of egress queues per page\n");
err = -EINVAL;
- goto out_unmap_bar0;
+ goto out_free_adapter;
}
adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
pci_resource_len(pdev, 2));
if (!adapter->bar2) {
dev_err(&pdev->dev, "cannot map device bar2 region\n");
err = -ENOMEM;
- goto out_unmap_bar0;
+ goto out_free_adapter;
}
}
@@ -6722,13 +6727,13 @@ sriov:
out_unmap_bar:
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
- out_unmap_bar0:
- iounmap(adapter->regs);
out_free_adapter:
if (adapter->workq)
destroy_workqueue(adapter->workq);
kfree(adapter);
+ out_unmap_bar0:
+ iounmap(regs);
out_disable_device:
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 9b33057a9477..70089c29d307 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1399,7 +1399,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
const void *mac_addr;
if (!IS_ENABLED(CONFIG_OF) || !np)
- return NULL;
+ return ERR_PTR(-ENXIO);
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 65a4a0f88ea0..02a2e90d581a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2389,6 +2389,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
}
EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
+static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
+{
+ struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
+ int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
+ + 1;
+ int max_port = min_port +
+ bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+
+ if (port < min_port)
+ port = min_port;
+ else if (port >= max_port)
+ port = max_port - 1;
+
+ return port;
+}
+
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2402,6 +2418,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
if (slave < 0)
return -EINVAL;
+ port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->mac = mac;
mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
@@ -2428,6 +2445,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
if (slave < 0)
return -EINVAL;
+ port = mlx4_slaves_closest_port(dev, slave, port);
vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
if ((0 == vlan) && (0 == qos))
@@ -2455,6 +2473,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
struct mlx4_priv *priv;
priv = mlx4_priv(dev);
+ port = mlx4_slaves_closest_port(dev, slave, port);
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (MLX4_VGT != vp_oper->state.default_vlan) {
@@ -2482,6 +2501,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
if (slave < 0)
return -EINVAL;
+ port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->spoofchk = setting;
@@ -2535,6 +2555,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
if (slave < 0)
return -EINVAL;
+ port = mlx4_slaves_closest_port(dev, slave, port);
switch (link_state) {
case IFLA_VF_LINK_STATE_AUTO:
/* get current link state */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index e22f24f784fc..35ff2925110a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -487,6 +487,9 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
struct mlx4_en_dev *mdev = priv->mdev;
int err;
+ if (pause->autoneg)
+ return -EINVAL;
+
priv->prof->tx_pause = pause->tx_pause != 0;
priv->prof->rx_pause = pause->rx_pause != 0;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 0dc31d85fc3b..2301365c79c7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -390,13 +390,14 @@ err_icm:
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
-int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_update_qp_context *cmd;
u64 pri_addr_path_mask = 0;
+ u64 qp_mask = 0;
int err = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
}
+ if (attr & MLX4_UPDATE_QP_VSD) {
+ qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
+ if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
+ cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
+ }
+
cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+ cmd->qp_mask = cpu_to_be64(qp_mask);
- err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+ err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1089367fed22..5d2498dcf536 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_qp_context *qpc = inbox->buf + 8;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_priv *priv;
+ u32 qp_type;
int port;
port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
priv = mlx4_priv(dev);
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
if (MLX4_VGT != vp_oper->state.default_vlan) {
/* the reserved QPs (special, proxy, tunnel)
@@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
if (mlx4_is_qp_reserved(dev, qpn))
return 0;
- /* force strip vlan by clear vsd */
- qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
+ /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
+ if (qp_type == MLX4_QP_ST_UD ||
+ (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
+ if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
+ *(__be32 *)inbox->buf =
+ cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
+ MLX4_QP_OPTPAR_VLAN_STRIPPING);
+ qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
+ } else {
+ struct mlx4_update_qp_params params = {.flags = 0};
+
+ mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
+ }
+ }
if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
@@ -3998,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
}
port = (rqp->sched_queue >> 6 & 1) + 1;
- smac_index = cmd->qp_context.pri_path.grh_mylmc;
- err = mac_find_smac_ix_in_slave(dev, slave, port,
- smac_index, &mac);
- if (err) {
- mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
- qpn, smac_index);
- goto err_mac;
+
+ if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
+ smac_index = cmd->qp_context.pri_path.grh_mylmc;
+ err = mac_find_smac_ix_in_slave(dev, slave, port,
+ smac_index, &mac);
+
+ if (err) {
+ mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+ qpn, smac_index);
+ goto err_mac;
+ }
}
err = mlx4_cmd(dev, inbox->dma,
@@ -4818,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
upd_context = mailbox->buf;
- upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
+ upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 979c6980639f..a42293092ea4 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -290,9 +290,11 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
/* Read the hardware TX timestamp if one was recorded */
if (unlikely(re.s.tstamp)) {
struct skb_shared_hwtstamps ts;
+ u64 ns;
+
memset(&ts, 0, sizeof(ts));
/* Read the timestamp */
- u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
+ ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
/* Remove the timestamp from the FIFO */
cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
/* Tell the kernel about the timestamp */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 44c8be1c6805..5f7a35212796 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -7,6 +7,7 @@ config PCH_GBE
depends on PCI && (X86_32 || COMPILE_TEST)
select MII
select PTP_1588_CLOCK_PCH
+ select NET_PTP_CLASSIFY
---help---
This is a gigabit ethernet driver for EG20T PCH.
EG20T PCH is the platform controller hub that is used in Intel's
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 91652e7235e4..0921302553c6 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1783,33 +1783,31 @@ static void __rtl8169_set_features(struct net_device *dev,
netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
- netdev_features_t changed = features ^ dev->features;
void __iomem *ioaddr = tp->mmio_addr;
+ u32 rx_config;
- if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_RX)))
- return;
+ rx_config = RTL_R32(RxConfig);
+ if (features & NETIF_F_RXALL)
+ rx_config |= (AcceptErr | AcceptRunt);
+ else
+ rx_config &= ~(AcceptErr | AcceptRunt);
- if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) {
- if (features & NETIF_F_RXCSUM)
- tp->cp_cmd |= RxChkSum;
- else
- tp->cp_cmd &= ~RxChkSum;
+ RTL_W32(RxConfig, rx_config);
- if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
- tp->cp_cmd |= RxVlan;
- else
- tp->cp_cmd &= ~RxVlan;
+ if (features & NETIF_F_RXCSUM)
+ tp->cp_cmd |= RxChkSum;
+ else
+ tp->cp_cmd &= ~RxChkSum;
- RTL_W16(CPlusCmd, tp->cp_cmd);
- RTL_R16(CPlusCmd);
- }
- if (changed & NETIF_F_RXALL) {
- int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
- if (features & NETIF_F_RXALL)
- tmp |= (AcceptErr | AcceptRunt);
- RTL_W32(RxConfig, tmp);
- }
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ tp->cp_cmd |= RxVlan;
+ else
+ tp->cp_cmd &= ~RxVlan;
+
+ tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum);
+
+ RTL_W16(CPlusCmd, tp->cp_cmd);
+ RTL_R16(CPlusCmd);
}
static int rtl8169_set_features(struct net_device *dev,
@@ -1817,8 +1815,11 @@ static int rtl8169_set_features(struct net_device *dev,
{
struct rtl8169_private *tp = netdev_priv(dev);
+ features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
+
rtl_lock_work(tp);
- __rtl8169_set_features(dev, features);
+ if (features ^ dev->features)
+ __rtl8169_set_features(dev, features);
rtl_unlock_work(tp);
return 0;
@@ -7118,8 +7119,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
}
}
-static int
-rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
const unsigned int region = cfg->region;
@@ -7194,7 +7194,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_mwi_2;
}
- tp->cp_cmd = RxChkSum;
+ tp->cp_cmd = 0;
if ((sizeof(dma_addr_t) > 4) &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -7235,13 +7235,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- /*
- * Pretend we are using VLANs; This bypasses a nasty bug where
- * Interrupts stop flowing on high load on 8110SCd controllers.
- */
- if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- tp->cp_cmd |= RxVlan;
-
rtl_init_mdio_ops(tp);
rtl_init_pll_power_ops(tp);
rtl_init_jumbo_ops(tp);
@@ -7302,8 +7295,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA;
+ tp->cp_cmd |= RxChkSum | RxVlan;
+
+ /*
+ * Pretend we are using VLANs; This bypasses a nasty bug where
+ * Interrupts stop flowing on high load on 8110SCd controllers.
+ */
if (tp->mac_version == RTL_GIGA_MAC_VER_05)
- /* 8110SCd requires hardware Rx VLAN - disallow toggling */
+ /* Disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
if (tp->txd_version == RTL_TD_0)
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 0537381cd2f6..6859437b59fb 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2933,6 +2933,9 @@ void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
u32 crc;
int bit;
+ if (!efx_dev_registered(efx))
+ return;
+
netif_addr_lock_bh(net_dev);
efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 23c89ab5a6ad..f67539650c38 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -350,14 +350,17 @@ static int vnet_walk_rx_one(struct vnet_port *port,
if (IS_ERR(desc))
return PTR_ERR(desc);
+ if (desc->hdr.state != VIO_DESC_READY)
+ return 1;
+
+ rmb();
+
viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
desc->hdr.state, desc->hdr.ack,
desc->size, desc->ncookies,
desc->cookies[0].cookie_addr,
desc->cookies[0].cookie_size);
- if (desc->hdr.state != VIO_DESC_READY)
- return 1;
err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
if (err == -ECONNRESET)
return err;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 999fb72688d2..e2a00287f8eb 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -699,6 +699,28 @@ static void cpsw_rx_handler(void *token, int len, int status)
cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
+ bool ndev_status = false;
+ struct cpsw_slave *slave = priv->slaves;
+ int n;
+
+ if (priv->data.dual_emac) {
+ /* In dual emac mode check for all interfaces */
+ for (n = priv->data.slaves; n; n--, slave++)
+ if (netif_running(slave->ndev))
+ ndev_status = true;
+ }
+
+ if (ndev_status && (status >= 0)) {
+ /* The packet received is for the interface which
+ * is already down and the other interface is up
+ * and running, intead of freeing which results
+ * in reducing of the number of rx descriptor in
+ * DMA engine, requeue skb back to cpdma.
+ */
+ new_skb = skb;
+ goto requeue;
+ }
+
/* the interface is going down, skbs are purged */
dev_kfree_skb_any(skb);
return;
@@ -717,6 +739,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
new_skb = skb;
}
+requeue:
ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
skb_tailroom(new_skb), 0);
if (WARN_ON(ret < 0))
@@ -2311,10 +2334,19 @@ static int cpsw_suspend(struct device *dev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
- if (netif_running(ndev))
- cpsw_ndo_stop(ndev);
+ if (priv->data.dual_emac) {
+ int i;
- for_each_slave(priv, soft_reset_slave);
+ for (i = 0; i < priv->data.slaves; i++) {
+ if (netif_running(priv->slaves[i].ndev))
+ cpsw_ndo_stop(priv->slaves[i].ndev);
+ soft_reset_slave(priv->slaves + i);
+ }
+ } else {
+ if (netif_running(ndev))
+ cpsw_ndo_stop(ndev);
+ for_each_slave(priv, soft_reset_slave);
+ }
pm_runtime_put_sync(&pdev->dev);
@@ -2328,14 +2360,24 @@ static int cpsw_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct cpsw_priv *priv = netdev_priv(ndev);
pm_runtime_get_sync(&pdev->dev);
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- if (netif_running(ndev))
- cpsw_ndo_open(ndev);
+ if (priv->data.dual_emac) {
+ int i;
+
+ for (i = 0; i < priv->data.slaves; i++) {
+ if (netif_running(priv->slaves[i].ndev))
+ cpsw_ndo_open(priv->slaves[i].ndev);
+ }
+ } else {
+ if (netif_running(ndev))
+ cpsw_ndo_open(ndev);
+ }
return 0;
}