summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c47
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c175
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c54
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c119
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c143
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c9
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h114
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/phy/amd-xgbe-phy.c82
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/xen-netback/netback.c29
25 files changed, 568 insertions, 329 deletions
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 760c72c6e2ac..6725dc00750b 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
u16 pktlength;
u16 pktstatus;
- while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
+ while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
+ (count < limit)) {
pktstatus = rxstatus >> 16;
pktlength = rxstatus & 0xffff;
@@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget)
struct altera_tse_private *priv =
container_of(napi, struct altera_tse_private, napi);
int rxcomplete = 0;
- int txcomplete = 0;
unsigned long int flags;
- txcomplete = tse_tx_complete(priv);
+ tse_tx_complete(priv);
rxcomplete = tse_rx(priv, budget);
- if (rxcomplete >= budget || txcomplete > 0)
- return rxcomplete;
+ if (rxcomplete < budget) {
- napi_gro_flush(napi, false);
- __napi_complete(napi);
+ napi_gro_flush(napi, false);
+ __napi_complete(napi);
- netdev_dbg(priv->dev,
- "NAPI Complete, did %d packets with budget %d\n",
- txcomplete+rxcomplete, budget);
+ netdev_dbg(priv->dev,
+ "NAPI Complete, did %d packets with budget %d\n",
+ rxcomplete, budget);
- spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
- priv->dmaops->enable_rxirq(priv);
- priv->dmaops->enable_txirq(priv);
- spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
- return rxcomplete + txcomplete;
+ spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ priv->dmaops->enable_rxirq(priv);
+ priv->dmaops->enable_txirq(priv);
+ spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+ }
+ return rxcomplete;
}
/* DMA TX & RX FIFO interrupt routing
@@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct altera_tse_private *priv;
- unsigned long int flags;
if (unlikely(!dev)) {
pr_err("%s: invalid dev pointer\n", __func__);
@@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
}
priv = netdev_priv(dev);
- /* turn off desc irqs and enable napi rx */
- spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+ spin_lock(&priv->rxdma_irq_lock);
+ /* reset IRQs */
+ priv->dmaops->clear_rxirq(priv);
+ priv->dmaops->clear_txirq(priv);
+ spin_unlock(&priv->rxdma_irq_lock);
if (likely(napi_schedule_prep(&priv->napi))) {
+ spin_lock(&priv->rxdma_irq_lock);
priv->dmaops->disable_rxirq(priv);
priv->dmaops->disable_txirq(priv);
+ spin_unlock(&priv->rxdma_irq_lock);
__napi_schedule(&priv->napi);
}
- /* reset IRQs */
- priv->dmaops->clear_rxirq(priv);
- priv->dmaops->clear_txirq(priv);
-
- spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
return IRQ_HANDLED;
}
@@ -1399,7 +1398,7 @@ static int altera_tse_probe(struct platform_device *pdev)
}
if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
- &priv->rx_fifo_depth)) {
+ &priv->tx_fifo_depth)) {
dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
ret = -ENXIO;
goto err_free_netdev;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b93d4404d975..885b02b5be07 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
}
}
+static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int i;
+ int ret;
+
+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
+ netdev->name, pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ pdata->dev_irq);
+ return ret;
+ }
+
+ if (!pdata->per_channel_irq)
+ return 0;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ snprintf(channel->dma_irq_name,
+ sizeof(channel->dma_irq_name) - 1,
+ "%s-TxRx-%u", netdev_name(netdev),
+ channel->queue_index);
+
+ ret = devm_request_irq(pdata->dev, channel->dma_irq,
+ xgbe_dma_isr, 0,
+ channel->dma_irq_name, channel);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ channel->dma_irq);
+ goto err_irq;
+ }
+ }
+
+ return 0;
+
+err_irq:
+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+ for (i--, channel--; i < pdata->channel_count; i--, channel--)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ return ret;
+}
+
+static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ if (!pdata->per_channel_irq)
+ return;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+}
+
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
return -EINVAL;
}
- phy_stop(pdata->phydev);
-
spin_lock_irqsave(&pdata->lock, flags);
if (caller == XGMAC_DRIVER_CONTEXT)
netif_device_detach(netdev);
netif_tx_stop_all_queues(netdev);
- xgbe_napi_disable(pdata, 0);
- /* Powerdown Tx/Rx */
hw_if->powerdown_tx(pdata);
hw_if->powerdown_rx(pdata);
+ xgbe_napi_disable(pdata, 0);
+
+ phy_stop(pdata->phydev);
+
pdata->power_down = 1;
spin_unlock_irqrestore(&pdata->lock, flags);
@@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
phy_start(pdata->phydev);
- /* Enable Tx/Rx */
+ xgbe_napi_enable(pdata, 0);
+
hw_if->powerup_tx(pdata);
hw_if->powerup_rx(pdata);
if (caller == XGMAC_DRIVER_CONTEXT)
netif_device_attach(netdev);
- xgbe_napi_enable(pdata, 0);
netif_tx_start_all_queues(netdev);
spin_unlock_irqrestore(&pdata->lock, flags);
@@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct net_device *netdev = pdata->netdev;
+ int ret;
DBGPR("-->xgbe_start\n");
@@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
phy_start(pdata->phydev);
+ xgbe_napi_enable(pdata, 1);
+
+ ret = xgbe_request_irqs(pdata);
+ if (ret)
+ goto err_napi;
+
hw_if->enable_tx(pdata);
hw_if->enable_rx(pdata);
xgbe_init_tx_timers(pdata);
- xgbe_napi_enable(pdata, 1);
netif_tx_start_all_queues(netdev);
DBGPR("<--xgbe_start\n");
return 0;
+
+err_napi:
+ xgbe_napi_disable(pdata, 1);
+
+ phy_stop(pdata->phydev);
+
+ hw_if->exit(pdata);
+
+ return ret;
}
static void xgbe_stop(struct xgbe_prv_data *pdata)
@@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_stop\n");
- phy_stop(pdata->phydev);
-
netif_tx_stop_all_queues(netdev);
- xgbe_napi_disable(pdata, 1);
xgbe_stop_tx_timers(pdata);
hw_if->disable_tx(pdata);
hw_if->disable_rx(pdata);
+ xgbe_free_irqs(pdata);
+
+ xgbe_napi_disable(pdata, 1);
+
+ phy_stop(pdata->phydev);
+
+ hw_if->exit(pdata);
+
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
if (!channel->tx_ring)
@@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int i;
-
DBGPR("-->xgbe_restart_dev\n");
/* If not running, "restart" will happen on open */
@@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
return;
xgbe_stop(pdata);
- synchronize_irq(pdata->dev_irq);
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- synchronize_irq(channel->dma_irq);
- }
xgbe_free_tx_data(pdata);
xgbe_free_rx_data(pdata);
- /* Issue software reset to device */
- hw_if->exit(pdata);
-
xgbe_start(pdata);
DBGPR("<--xgbe_restart_dev\n");
@@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
static int xgbe_open(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel = NULL;
- unsigned int i = 0;
int ret;
DBGPR("-->xgbe_open\n");
@@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev)
INIT_WORK(&pdata->restart_work, xgbe_restart);
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
- /* Request interrupts */
- ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
- netdev->name, pdata);
- if (ret) {
- netdev_alert(netdev, "error requesting irq %d\n",
- pdata->dev_irq);
- goto err_rings;
- }
-
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- snprintf(channel->dma_irq_name,
- sizeof(channel->dma_irq_name) - 1,
- "%s-TxRx-%u", netdev_name(netdev),
- channel->queue_index);
-
- ret = devm_request_irq(pdata->dev, channel->dma_irq,
- xgbe_dma_isr, 0,
- channel->dma_irq_name, channel);
- if (ret) {
- netdev_alert(netdev,
- "error requesting irq %d\n",
- channel->dma_irq);
- goto err_irq;
- }
- }
- }
-
ret = xgbe_start(pdata);
if (ret)
- goto err_start;
+ goto err_rings;
DBGPR("<--xgbe_open\n");
return 0;
-err_start:
- hw_if->exit(pdata);
-
-err_irq:
- if (pdata->per_channel_irq) {
- /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
- for (i--, channel--; i < pdata->channel_count; i--, channel--)
- devm_free_irq(pdata->dev, channel->dma_irq, channel);
- }
-
- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
-
err_rings:
desc_if->free_ring_resources(pdata);
@@ -1399,30 +1424,16 @@ err_phy_init:
static int xgbe_close(struct net_device *netdev)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
- struct xgbe_channel *channel;
- unsigned int i;
DBGPR("-->xgbe_close\n");
/* Stop the device */
xgbe_stop(pdata);
- /* Issue software reset to device */
- hw_if->exit(pdata);
-
/* Free the ring descriptors and buffers */
desc_if->free_ring_resources(pdata);
- /* Release the interrupts */
- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- devm_free_irq(pdata->dev, channel->dma_irq, channel);
- }
-
/* Free the channel and ring structures */
xgbe_free_channels(pdata);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d6cda17efe6e..97842d03675b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
#define T4_MEMORY_WRITE 0
#define T4_MEMORY_READ 1
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
- __be32 *buf, int dir);
+ void *buf, int dir);
static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
u32 len, __be32 *buf)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4d643b65265e..853c38997c82 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
* @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
* @addr: address within indicated memory type
* @len: amount of memory to transfer
- * @buf: host memory buffer
+ * @hbuf: host memory buffer
* @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
*
* Reads/writes an [almost] arbitrary memory region in the firmware: the
@@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
* caller's responsibility to perform appropriate byte order conversions.
*/
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
- u32 len, __be32 *buf, int dir)
+ u32 len, void *hbuf, int dir)
{
u32 pos, offset, resid, memoffset;
u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
+ u32 *buf;
/* Argument sanity checks ...
*/
- if (addr & 0x3)
+ if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
return -EINVAL;
+ buf = (u32 *)hbuf;
/* It's convenient to be able to handle lengths which aren't a
* multiple of 32-bits because we often end up transferring files to
@@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
/* Transfer data to/from the adapter as long as there's an integral
* number of 32-bit transfers to complete.
+ *
+ * A note on Endianness issues:
+ *
+ * The "register" reads and writes below from/to the PCI-E Memory
+ * Window invoke the standard adapter Big-Endian to PCI-E Link
+ * Little-Endian "swizzel." As a result, if we have the following
+ * data in adapter memory:
+ *
+ * Memory: ... | b0 | b1 | b2 | b3 | ...
+ * Address: i+0 i+1 i+2 i+3
+ *
+ * Then a read of the adapter memory via the PCI-E Memory Window
+ * will yield:
+ *
+ * x = readl(i)
+ * 31 0
+ * [ b3 | b2 | b1 | b0 ]
+ *
+ * If this value is stored into local memory on a Little-Endian system
+ * it will show up correctly in local memory as:
+ *
+ * ( ..., b0, b1, b2, b3, ... )
+ *
+ * But on a Big-Endian system, the store will show up in memory
+ * incorrectly swizzled as:
+ *
+ * ( ..., b3, b2, b1, b0, ... )
+ *
+ * So we need to account for this in the reads and writes to the
+ * PCI-E Memory Window below by undoing the register read/write
+ * swizzels.
*/
while (len > 0) {
if (dir == T4_MEMORY_READ)
- *buf++ = (__force __be32) t4_read_reg(adap,
- mem_base + offset);
+ *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
+ mem_base + offset));
else
t4_write_reg(adap, mem_base + offset,
- (__force u32) *buf++);
+ (__force u32)cpu_to_le32(*buf++));
offset += sizeof(__be32);
len -= sizeof(__be32);
@@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
*/
if (resid) {
union {
- __be32 word;
+ u32 word;
char byte[4];
} last;
unsigned char *bp;
int i;
if (dir == T4_MEMORY_READ) {
- last.word = (__force __be32) t4_read_reg(adap,
- mem_base + offset);
+ last.word = le32_to_cpu(
+ (__force __le32)t4_read_reg(adap,
+ mem_base + offset));
for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
bp[i] = last.byte[i];
} else {
@@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
for (i = resid; i < 4; i++)
last.byte[i] = 0;
t4_write_reg(adap, mem_base + offset,
- (__force u32) last.word);
+ (__force u32)cpu_to_le32(last.word));
}
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 9cbe038a388e..a5179bfcdc2c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
}
if (ENIC_TEST_INTR(pba, notify_intr)) {
- vnic_intr_return_all_credits(&enic->intr[notify_intr]);
enic_notify_check(enic);
+ vnic_intr_return_all_credits(&enic->intr[notify_intr]);
}
if (ENIC_TEST_INTR(pba, err_intr)) {
@@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
struct enic *enic = data;
unsigned int intr = enic_msix_notify_intr(enic);
- vnic_intr_return_all_credits(&enic->intr[intr]);
enic_notify_check(enic);
+ vnic_intr_return_all_credits(&enic->intr[intr]);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 11a9ffebf8d8..6aea65dae5ed 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -868,8 +868,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
* The grst delay value is in 100ms units, and we'll wait a
* couple counts longer to be sure we don't just miss the end.
*/
- grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
- >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+ grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+ I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+ I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
for (cnt = 0; cnt < grst_del + 2; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@@ -2846,7 +2847,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
- if (!status)
+ if (!status && filter_index)
*filter_index = resp->index;
return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 183dcb63ce98..a11c70ca5a28 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
u32 val;
val = rd32(hw, I40E_PRTDCB_GENC);
- *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
+ *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
I40E_PRTDCB_GENC_PFCLDA_SHIFT);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 61236f983971..c17ee77100d3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -989,8 +989,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (!cmd_buf)
return count;
bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
- if (bytes_not_copied < 0)
+ if (bytes_not_copied < 0) {
+ kfree(cmd_buf);
return bytes_not_copied;
+ }
if (bytes_not_copied > 0)
count -= bytes_not_copied;
cmd_buf[count] = '\0';
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index cbe281be1c9f..dadda3c5d658 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1512,7 +1512,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Number of queues per enabled TC */
- num_tc_qps = vsi->alloc_queue_pairs/numtc;
+ /* In MFP case we can have a much lower count of MSIx
+ * vectors available and so we need to lower the used
+ * q count.
+ */
+ qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+ num_tc_qps = qcount / numtc;
num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
/* Setup queue offset/count for all TCs for given VSI */
@@ -2684,8 +2689,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
u16 qoffset, qcount;
int i, n;
- if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
- return;
+ if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ /* Reset the TC information */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ rx_ring = vsi->rx_rings[i];
+ tx_ring = vsi->tx_rings[i];
+ rx_ring->dcb_tc = 0;
+ tx_ring->dcb_tc = 0;
+ }
+ }
for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
if (!(vsi->tc_config.enabled_tc & (1 << n)))
@@ -3830,6 +3842,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
{
int i;
+ i40e_stop_misc_vector(pf);
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ synchronize_irq(pf->msix_entries[0].vector);
+ free_irq(pf->msix_entries[0].vector, pf);
+ }
+
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
@@ -5254,8 +5272,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Wait for the PF's Tx queues to be disabled */
ret = i40e_pf_wait_txq_disabled(pf);
- if (!ret)
+ if (ret) {
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
+ } else {
i40e_pf_unquiesce_all_vsi(pf);
+ }
+
exit:
return ret;
}
@@ -5587,7 +5611,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
int i, v;
/* If we're down or resetting, just bail */
- if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ if (test_bit(__I40E_DOWN, &pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, &pf->state))
return;
/* for each VSI/netdev
@@ -9533,6 +9558,7 @@ static void i40e_remove(struct pci_dev *pdev)
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
+ i40e_fdir_teardown(pf);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
@@ -9559,12 +9585,6 @@ static void i40e_remove(struct pci_dev *pdev)
if (pf->vsi[pf->lan_vsi])
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
- i40e_stop_misc_vector(pf);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
- synchronize_irq(pf->msix_entries[0].vector);
- free_irq(pf->msix_entries[0].vector, pf);
- }
-
/* shutdown and destroy the HMC */
if (pf->hw.hmc.hmc_obj) {
ret_code = i40e_shutdown_lan_hmc(&pf->hw);
@@ -9718,6 +9738,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ i40e_clear_interrupt_scheme(pf);
+
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, pf->wol_en);
pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 3e70f2e45a47..5defe0d63514 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -679,9 +679,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
{
i40e_status status;
enum i40e_nvmupd_cmd upd_cmd;
+ bool retry_attempt = false;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
@@ -725,6 +727,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
*errno = -ESRCH;
break;
}
+
+ /* In some circumstances, a multi-write transaction takes longer
+ * than the default 3 minute timeout on the write semaphore. If
+ * the write failed with an EBUSY status, this is likely the problem,
+ * so here we try to reacquire the semaphore then retry the write.
+ * We only do one retry, then give up.
+ */
+ if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ !retry_attempt) {
+ i40e_status old_status = status;
+ u32 old_asq_status = hw->aq.asq_last_status;
+ u32 gtime;
+
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ if (gtime >= hw->nvm.hw_semaphore_timeout) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
+ gtime, hw->nvm.hw_semaphore_timeout);
+ i40e_release_nvm(hw);
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
+ hw->aq.asq_last_status);
+ status = old_status;
+ hw->aq.asq_last_status = old_asq_status;
+ } else {
+ retry_attempt = true;
+ goto retry;
+ }
+ }
+ }
+
return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2206d2d36f0f..bbf1b1247ac4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -586,6 +586,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
*
@@ -594,10 +608,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
**/
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
- u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
- ? ring->next_to_use
- : ring->next_to_use + ring->count);
- return ntu - ring->next_to_clean;
+ u32 head, tail;
+
+ head = i40e_get_head(ring);
+ tail = readl(ring->tail);
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
}
/**
@@ -606,6 +626,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
**/
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
{
+ u32 tx_done = tx_ring->stats.packets;
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = i40e_get_tx_pending(tx_ring);
struct i40e_pf *pf = tx_ring->vsi->back;
bool ret = false;
@@ -623,41 +645,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending >= I40E_MIN_DESC_PENDING)) {
+ if ((tx_done_old == tx_done) && tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
- } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending < I40E_MIN_DESC_PENDING) &&
- (tx_pending > 0)) {
+ } else if (tx_done_old == tx_done &&
+ (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
tx_pending, tx_ring->queue_index);
pf->tx_sluggish_count++;
} else {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_done;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
return ret;
}
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
#define WB_STRIDE 0x3
/**
@@ -2140,6 +2146,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
}
/**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct skb_frag_struct *frag;
+ bool linearize = false;
+ unsigned int size = 0;
+ u16 num_frags;
+ u16 gso_segs;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+ u16 j = 1;
+
+ if (num_frags < (I40E_MAX_BUFFER_TXD))
+ goto linearize_chk_done;
+ /* try the simple math, if we have too many frags per segment */
+ if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+ I40E_MAX_BUFFER_TXD) {
+ linearize = true;
+ goto linearize_chk_done;
+ }
+ frag = &skb_shinfo(skb)->frags[0];
+ size = hdr_len;
+ /* we might still have more fragments per segment */
+ do {
+ size += skb_frag_size(frag);
+ frag++; j++;
+ if (j == I40E_MAX_BUFFER_TXD) {
+ if (size < skb_shinfo(skb)->gso_size) {
+ linearize = true;
+ break;
+ }
+ j = 1;
+ size -= skb_shinfo(skb)->gso_size;
+ if (size)
+ j++;
+ size += hdr_len;
+ }
+ num_frags--;
+ } while (num_frags);
+ } else {
+ if (num_frags >= I40E_MAX_BUFFER_TXD)
+ linearize = true;
+ }
+
+linearize_chk_done:
+ return linearize;
+}
+
+/**
* i40e_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
* @skb: send buffer
@@ -2396,6 +2463,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
+ if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (skb_linearize(skb))
+ goto out_drop;
+
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 18b00231d2f1..dff0baeb1ecc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 8192
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 29004382f462..708891571dae 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -126,6 +126,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring: tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+ void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+ return le32_to_cpu(*(volatile __le32 *)head);
+}
+
+/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
*
@@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
**/
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
{
- u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
- ? ring->next_to_use
- : ring->next_to_use + ring->count);
- return ntu - ring->next_to_clean;
+ u32 head, tail;
+
+ head = i40e_get_head(ring);
+ tail = readl(ring->tail);
+
+ if (head != tail)
+ return (head < tail) ?
+ tail - head : (tail + ring->count - head);
+
+ return 0;
}
/**
@@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
**/
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
{
+ u32 tx_done = tx_ring->stats.packets;
+ u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
u32 tx_pending = i40e_get_tx_pending(tx_ring);
bool ret = false;
@@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
* run the check_tx_hang logic with a transmit completion
* pending but without time to complete it yet.
*/
- if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
- (tx_pending >= I40E_MIN_DESC_PENDING)) {
+ if ((tx_done_old == tx_done) && tx_pending) {
/* make sure it is true for two checks in a row */
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
&tx_ring->state);
- } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
- !(tx_pending < I40E_MIN_DESC_PENDING) ||
- !(tx_pending > 0)) {
+ } else if (tx_done_old == tx_done &&
+ (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
/* update completed stats and disarm the hang check */
- tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
+ tx_ring->tx_stats.tx_done_old = tx_done;
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
}
return ret;
}
-/**
- * i40e_get_head - Retrieve head from head writeback
- * @tx_ring: tx ring to fetch head of
- *
- * Returns value of Tx ring head based on value stored
- * in head write-back location
- **/
-static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
-{
- void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
-
- return le32_to_cpu(*(volatile __le32 *)head);
-}
-
#define WB_STRIDE 0x3
/**
@@ -1206,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- if (protocol == htons(ETH_P_IP)) {
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ if (iph->version == 4) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
- } else if (skb_is_gso_v6(skb)) {
-
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
- : ipv6_hdr(skb);
+ } else if (ipv6h->version == 6) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
ipv6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1274,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
- if (tx_flags & I40E_TX_FLAGS_TSO) {
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ if (tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
- } else {
- *cd_tunneling |=
- I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- }
}
/* Now set the ctx descriptor fields */
@@ -1290,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+ if (this_ip_hdr->version == 6) {
+ tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+ }
+
} else {
network_hdr_len = skb_network_header_len(skb);
@@ -1380,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
}
+ /**
+ * i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * @skb: send buffer
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ *
+ * Note: Our HW can't scatter-gather more than 8 fragments to build
+ * a packet on the wire and so we need to figure out the cases where we
+ * need to linearize the skb.
+ **/
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
+ const u8 hdr_len)
+{
+ struct skb_frag_struct *frag;
+ bool linearize = false;
+ unsigned int size = 0;
+ u16 num_frags;
+ u16 gso_segs;
+
+ num_frags = skb_shinfo(skb)->nr_frags;
+ gso_segs = skb_shinfo(skb)->gso_segs;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
+ u16 j = 1;
+
+ if (num_frags < (I40E_MAX_BUFFER_TXD))
+ goto linearize_chk_done;
+ /* try the simple math, if we have too many frags per segment */
+ if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
+ I40E_MAX_BUFFER_TXD) {
+ linearize = true;
+ goto linearize_chk_done;
+ }
+ frag = &skb_shinfo(skb)->frags[0];
+ size = hdr_len;
+ /* we might still have more fragments per segment */
+ do {
+ size += skb_frag_size(frag);
+ frag++; j++;
+ if (j == I40E_MAX_BUFFER_TXD) {
+ if (size < skb_shinfo(skb)->gso_size) {
+ linearize = true;
+ break;
+ }
+ j = 1;
+ size -= skb_shinfo(skb)->gso_size;
+ if (size)
+ j++;
+ size += hdr_len;
+ }
+ num_frags--;
+ } while (num_frags);
+ } else {
+ if (num_frags >= I40E_MAX_BUFFER_TXD)
+ linearize = true;
+ }
+
+linearize_chk_done:
+ return linearize;
+}
+
/**
* i40e_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
@@ -1654,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO;
+ if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+ if (skb_linearize(skb))
+ goto out_drop;
+
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 4e15903b2b6d..c950a038237c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
+#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
#define I40E_MAX_DATA_PER_TXD 8192
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b1560927abd4..c70ab40d8698 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
int rc = -EINVAL;
if (!rtl_fw_format_ok(tp, rtl_fw)) {
- netif_err(tp, ifup, dev, "invalid firwmare\n");
+ netif_err(tp, ifup, dev, "invalid firmware\n");
goto out;
}
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 34389b6aa67c..9fb6948e14c6 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -1257,9 +1257,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
if (enable)
- val |= 1 << rocker_port->lport;
+ val |= 1ULL << rocker_port->lport;
else
- val &= ~(1 << rocker_port->lport);
+ val &= ~(1ULL << rocker_port->lport);
rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
}
@@ -4201,6 +4201,8 @@ static int rocker_probe_ports(struct rocker *rocker)
alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+ if (!rocker->ports)
+ return -ENOMEM;
for (i = 0; i < rocker->port_count; i++) {
err = rocker_probe_port(rocker, i);
if (err)
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index fa3f193b5f4d..209ee1b27f8d 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -91,6 +91,10 @@ static const char version[] =
#include "smc91x.h"
+#if defined(CONFIG_ASSABET_NEPONSET)
+#include <mach/neponset.h>
+#endif
+
#ifndef SMC_NOWAIT
# define SMC_NOWAIT 0
#endif
@@ -2355,8 +2359,9 @@ static int smc_drv_probe(struct platform_device *pdev)
ret = smc_request_attrib(pdev, ndev);
if (ret)
goto out_release_io;
-#if defined(CONFIG_ASSABET_NEPONSET) && !defined(CONFIG_SA1100_PLEB)
- neponset_ncr_set(NCR_ENET_OSC_EN);
+#if defined(CONFIG_ASSABET_NEPONSET)
+ if (machine_is_assabet() && machine_has_neponset())
+ neponset_ncr_set(NCR_ENET_OSC_EN);
#endif
platform_set_drvdata(pdev, ndev);
ret = smc_enable_device(pdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index be67baf5f677..3a18501d1068 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -39,14 +39,7 @@
* Define your architecture specific bus configuration parameters here.
*/
-#if defined(CONFIG_ARCH_LUBBOCK) ||\
- defined(CONFIG_MACH_MAINSTONE) ||\
- defined(CONFIG_MACH_ZYLONITE) ||\
- defined(CONFIG_MACH_LITTLETON) ||\
- defined(CONFIG_MACH_ZYLONITE2) ||\
- defined(CONFIG_ARCH_VIPER) ||\
- defined(CONFIG_MACH_STARGATE2) ||\
- defined(CONFIG_ARCH_VERSATILE)
+#if defined(CONFIG_ARM)
#include <asm/mach-types.h>
@@ -74,95 +67,8 @@
/* We actually can't write halfwords properly if not word aligned */
static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
{
- if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) {
- unsigned int v = val << 16;
- v |= readl(ioaddr + (reg & ~2)) & 0xffff;
- writel(v, ioaddr + (reg & ~2));
- } else {
- writew(val, ioaddr + reg);
- }
-}
-
-#elif defined(CONFIG_SA1100_PLEB)
-/* We can only do 16-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS (-1)
-
-#elif defined(CONFIG_SA1100_ASSABET)
-
-#include <mach/neponset.h>
-
-/* We can only do 8-bit reads and writes in the static memory space. */
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 0
-#define SMC_CAN_USE_32BIT 0
-#define SMC_NOWAIT 1
-
-/* The first two address lines aren't connected... */
-#define SMC_IO_SHIFT 2
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
-#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
-#define SMC_IRQ_FLAGS (-1) /* from resource */
-
-#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \
- defined(CONFIG_MACH_NOMADIK_8815NHK)
-
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-
-#elif defined(CONFIG_ARCH_INNOKOM) || \
- defined(CONFIG_ARCH_PXA_IDP) || \
- defined(CONFIG_ARCH_RAMSES) || \
- defined(CONFIG_ARCH_PCM027)
-
-#define SMC_CAN_USE_8BIT 1
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 1
-#define SMC_IO_SHIFT 0
-#define SMC_NOWAIT 1
-#define SMC_USE_PXA_DMA 1
-
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_inl(a, r) readl((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
-#define SMC_outl(v, a, r) writel(v, (a) + (r))
-#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
-#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-#define SMC_IRQ_FLAGS (-1) /* from resource */
-
-/* We actually can't write halfwords properly if not word aligned */
-static inline void
-SMC_outw(u16 val, void __iomem *ioaddr, int reg)
-{
- if (reg & 2) {
+ if ((machine_is_mainstone() || machine_is_stargate2() ||
+ machine_is_pxa_idp()) && reg & 2) {
unsigned int v = val << 16;
v |= readl(ioaddr + (reg & ~2)) & 0xffff;
writel(v, ioaddr + (reg & ~2));
@@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define RPC_LSA_DEFAULT RPC_LED_100_10
#define RPC_LSB_DEFAULT RPC_LED_TX_RX
-#elif defined(CONFIG_ARCH_MSM)
-
-#define SMC_CAN_USE_8BIT 0
-#define SMC_CAN_USE_16BIT 1
-#define SMC_CAN_USE_32BIT 0
-#define SMC_NOWAIT 1
-
-#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
-#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-
-#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
-
#elif defined(CONFIG_COLDFIRE)
#define SMC_CAN_USE_8BIT 0
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 4b51f903fb73..0c5842aeb807 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type)
*flow_type = IP_USER_FLOW;
break;
default:
- return 0;
+ return -EINVAL;
}
- return 1;
+ return 0;
}
static int niu_ethflow_to_class(int flow_type, u64 *class)
@@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
TCAM_V4KEY0_CLASS_CODE_SHIFT;
ret = niu_class_to_ethflow(class, &fsp->flow_type);
-
if (ret < 0) {
netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
parent->index);
- ret = -EINVAL;
goto out;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7d8dd0d2182e..49b03368f4b9 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
port_mask, ALE_VLAN, slave->port_vlan, 0);
cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN, slave->port_vlan);
+ priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
}
static void soft_reset_slave(struct cpsw_slave *slave)
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
index 9e3af54c9010..32efbd48f326 100644
--- a/drivers/net/phy/amd-xgbe-phy.c
+++ b/drivers/net/phy/amd-xgbe-phy.c
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
+#define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
+#define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
#define XGBE_PHY_SPEEDS 3
#define XGBE_PHY_SPEED_1000 0
@@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define SPEED_10000_BLWC 0
#define SPEED_10000_CDR 0x7
#define SPEED_10000_PLL 0x1
-#define SPEED_10000_PQ 0x1e
+#define SPEED_10000_PQ 0x12
#define SPEED_10000_RATE 0x0
#define SPEED_10000_TXAMP 0xa
#define SPEED_10000_WORD 0x7
+#define SPEED_10000_DFE_TAP_CONFIG 0x1
+#define SPEED_10000_DFE_TAP_ENABLE 0x7f
#define SPEED_2500_BLWC 1
#define SPEED_2500_CDR 0x2
@@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define SPEED_2500_RATE 0x1
#define SPEED_2500_TXAMP 0xf
#define SPEED_2500_WORD 0x1
+#define SPEED_2500_DFE_TAP_CONFIG 0x3
+#define SPEED_2500_DFE_TAP_ENABLE 0x0
#define SPEED_1000_BLWC 1
#define SPEED_1000_CDR 0x2
@@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
#define SPEED_1000_RATE 0x3
#define SPEED_1000_TXAMP 0xf
#define SPEED_1000_WORD 0x1
+#define SPEED_1000_DFE_TAP_CONFIG 0x3
+#define SPEED_1000_DFE_TAP_ENABLE 0x0
/* SerDes RxTx register offsets */
+#define RXTX_REG6 0x0018
#define RXTX_REG20 0x0050
+#define RXTX_REG22 0x0058
#define RXTX_REG114 0x01c8
+#define RXTX_REG129 0x0204
/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX 8
+#define RXTX_REG6_RESETB_RXD_WIDTH 1
#define RXTX_REG20_BLWC_ENA_INDEX 2
#define RXTX_REG20_BLWC_ENA_WIDTH 1
#define RXTX_REG114_PQ_REG_INDEX 9
#define RXTX_REG114_PQ_REG_WIDTH 7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
@@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
SPEED_10000_TXAMP,
};
+static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
+ SPEED_1000_DFE_TAP_CONFIG,
+ SPEED_2500_DFE_TAP_CONFIG,
+ SPEED_10000_DFE_TAP_CONFIG,
+};
+
+static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
+ SPEED_1000_DFE_TAP_ENABLE,
+ SPEED_2500_DFE_TAP_ENABLE,
+ SPEED_10000_DFE_TAP_ENABLE,
+};
+
enum amd_xgbe_phy_an {
AMD_XGBE_AN_READY = 0,
AMD_XGBE_AN_PAGE_RECEIVED,
@@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv {
u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
+ u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
+ u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
/* Auto-negotiation state machine support */
struct mutex an_mutex;
@@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
status = XSIR0_IOREAD(priv, SIR0_STATUS);
if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
- return;
+ goto rx_reset;
}
netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
status);
+
+rx_reset:
+ /* Perform Rx reset for the DFE changes */
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
}
static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
@@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
+ XRXTX_IOWRITE(priv, RXTX_REG22,
+ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
amd_xgbe_phy_serdes_complete_ratechange(phydev);
@@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
+ XRXTX_IOWRITE(priv, RXTX_REG22,
+ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
amd_xgbe_phy_serdes_complete_ratechange(phydev);
@@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
+ XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
+ priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
+ XRXTX_IOWRITE(priv, RXTX_REG22,
+ priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
amd_xgbe_phy_serdes_complete_ratechange(phydev);
@@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
sizeof(priv->serdes_tx_amp));
}
+ if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_DFE_CFG_PROPERTY,
+ priv->serdes_dfe_tap_cfg,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_DFE_CFG_PROPERTY);
+ goto err_sir1;
+ }
+ } else {
+ memcpy(priv->serdes_dfe_tap_cfg,
+ amd_xgbe_phy_serdes_dfe_tap_cfg,
+ sizeof(priv->serdes_dfe_tap_cfg));
+ }
+
+ if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
+ ret = device_property_read_u32_array(phy_dev,
+ XGBE_PHY_DFE_ENA_PROPERTY,
+ priv->serdes_dfe_tap_ena,
+ XGBE_PHY_SPEEDS);
+ if (ret) {
+ dev_err(dev, "invalid %s property\n",
+ XGBE_PHY_DFE_ENA_PROPERTY);
+ goto err_sir1;
+ }
+ } else {
+ memcpy(priv->serdes_dfe_tap_ena,
+ amd_xgbe_phy_serdes_dfe_tap_ena,
+ sizeof(priv->serdes_dfe_tap_ena));
+ }
+
phydev->priv = priv;
if (!priv->adev || acpi_disabled)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 0e62274e884a..f1ee71e22241 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -43,9 +43,7 @@
static struct team_port *team_port_get_rcu(const struct net_device *dev)
{
- struct team_port *port = rcu_dereference(dev->rx_handler_data);
-
- return team_port_exists(dev) ? port : NULL;
+ return rcu_dereference(dev->rx_handler_data);
}
static struct team_port *team_port_get_rtnl(const struct net_device *dev)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 37eed4d84e9c..0732f9e16128 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -161,6 +161,7 @@ config USB_NET_AX8817X
* Linksys USB200M
* Netgear FA120
* Sitecom LN-029
+ * Sitecom LN-028
* Intellinet USB 2.0 Ethernet
* ST Lab USB 2.0 Ethernet
* TrendNet TU2-ET100
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index bf49792062a2..1173a24feda3 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -979,6 +979,10 @@ static const struct usb_device_id products [] = {
USB_DEVICE (0x0df6, 0x0056),
.driver_info = (unsigned long) &ax88178_info,
}, {
+ // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter"
+ USB_DEVICE (0x0df6, 0x061c),
+ .driver_info = (unsigned long) &ax88178_info,
+}, {
// corega FEther USB2-TX
USB_DEVICE (0x07aa, 0x0017),
.driver_info = (unsigned long) &ax8817x_info,
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f7a31d2cb3f1..c4d68d768408 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -655,9 +655,15 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
unsigned long flags;
do {
+ int notify;
+
spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
spin_unlock_irqrestore(&queue->response_lock, flags);
+ if (notify)
+ notify_remote_via_irq(queue->tx_irq);
+
if (cons == end)
break;
txp = RING_GET_REQUEST(&queue->tx, cons++);
@@ -1649,17 +1655,28 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
{
struct pending_tx_info *pending_tx_info;
pending_ring_idx_t index;
+ int notify;
unsigned long flags;
pending_tx_info = &queue->pending_tx_info[pending_idx];
+
spin_lock_irqsave(&queue->response_lock, flags);
+
make_tx_response(queue, &pending_tx_info->req, status);
- index = pending_index(queue->pending_prod);
+
+ /* Release the pending index before pusing the Tx response so
+ * its available before a new Tx request is pushed by the
+ * frontend.
+ */
+ index = pending_index(queue->pending_prod++);
queue->pending_ring[index] = pending_idx;
- /* TX shouldn't use the index before we give it back here */
- mb();
- queue->pending_prod++;
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
+
spin_unlock_irqrestore(&queue->response_lock, flags);
+
+ if (notify)
+ notify_remote_via_irq(queue->tx_irq);
}
@@ -1669,7 +1686,6 @@ static void make_tx_response(struct xenvif_queue *queue,
{
RING_IDX i = queue->tx.rsp_prod_pvt;
struct xen_netif_tx_response *resp;
- int notify;
resp = RING_GET_RESPONSE(&queue->tx, i);
resp->id = txp->id;
@@ -1679,9 +1695,6 @@ static void make_tx_response(struct xenvif_queue *queue,
RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
queue->tx.rsp_prod_pvt = ++i;
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
- if (notify)
- notify_remote_via_irq(queue->tx_irq);
}
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,