diff options
Diffstat (limited to 'drivers/net')
49 files changed, 609 insertions, 280 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index f0f5eab0fab1..798ae69fb63c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -175,7 +175,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to " "the same MAC; 0 for none (default), " "1 for active, 2 for follow"); module_param(all_slaves_active, int, 0); -MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" +MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface " "by setting active flag for all slaves; " "0 for never (default), 1 for always."); module_param(resend_igmp, int, 0); @@ -3659,8 +3659,14 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev else bond_xmit_slave_id(bond, skb, 0); } else { - slave_id = bond_rr_gen_slave_id(bond); - bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt); + int slave_cnt = ACCESS_ONCE(bond->slave_cnt); + + if (likely(slave_cnt)) { + slave_id = bond_rr_gen_slave_id(bond); + bond_xmit_slave_id(bond, skb, slave_id % slave_cnt); + } else { + dev_kfree_skb_any(skb); + } } return NETDEV_TX_OK; @@ -3691,8 +3697,13 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); + int slave_cnt = ACCESS_ONCE(bond->slave_cnt); - bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt); + if (likely(slave_cnt)) + bond_xmit_slave_id(bond, skb, + bond_xmit_hash(bond, skb) % slave_cnt); + else + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index f07fa89b5fd5..05e1aa090add 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1123,7 +1123,9 @@ static int at91_open(struct net_device *dev) struct at91_priv *priv = netdev_priv(dev); int err; - clk_enable(priv->clk); + err = clk_prepare_enable(priv->clk); + if (err) + return err; /* check or determine and set bittime */ err = open_candev(dev); @@ -1149,7 +1151,7 @@ static int at91_open(struct net_device *dev) out_close: close_candev(dev); out: - clk_disable(priv->clk); + clk_disable_unprepare(priv->clk); return err; } @@ -1166,7 +1168,7 @@ static int at91_close(struct net_device *dev) at91_chip_stop(dev, CAN_STATE_STOPPED); free_irq(dev->irq, dev); - clk_disable(priv->clk); + clk_disable_unprepare(priv->clk); close_candev(dev); diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 109cb44291f5..fb279d6ae484 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -97,14 +97,14 @@ static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable) ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); writel(ctrl, priv->raminit_ctrlreg); ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance); - c_can_hw_raminit_wait_ti(priv, ctrl, mask); + c_can_hw_raminit_wait_ti(priv, mask, ctrl); if (enable) { /* Set start bit and wait for the done bit. */ ctrl |= CAN_RAMINIT_START_MASK(priv->instance); writel(ctrl, priv->raminit_ctrlreg); ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); - c_can_hw_raminit_wait_ti(priv, ctrl, mask); + c_can_hw_raminit_wait_ti(priv, mask, ctrl); } spin_unlock(&raminit_lock); } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 944aa5d3af6e..6586309329e6 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -62,7 +62,7 @@ #define FLEXCAN_MCR_BCC BIT(16) #define FLEXCAN_MCR_LPRIO_EN BIT(13) #define FLEXCAN_MCR_AEN BIT(12) -#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f) +#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f) #define FLEXCAN_MCR_IDAM_A (0 << 8) #define FLEXCAN_MCR_IDAM_B (1 << 8) #define FLEXCAN_MCR_IDAM_C (2 << 8) @@ -125,7 +125,9 @@ FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT) /* FLEXCAN interrupt flag register (IFLAG) bits */ -#define FLEXCAN_TX_BUF_ID 8 +/* Errata ERR005829 step7: Reserve first valid MB */ +#define FLEXCAN_TX_BUF_RESERVED 8 +#define FLEXCAN_TX_BUF_ID 9 #define FLEXCAN_IFLAG_BUF(x) BIT(x) #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) @@ -136,6 +138,17 @@ /* FLEXCAN message buffers */ #define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24) +#define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24) +#define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24) +#define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24) +#define FLEXCAN_MB_CODE_RX_OVERRRUN (0x6 << 24) +#define FLEXCAN_MB_CODE_RX_RANSWER (0xa << 24) + +#define FLEXCAN_MB_CODE_TX_INACTIVE (0x8 << 24) +#define FLEXCAN_MB_CODE_TX_ABORT (0x9 << 24) +#define FLEXCAN_MB_CODE_TX_DATA (0xc << 24) +#define FLEXCAN_MB_CODE_TX_TANSWER (0xe << 24) + #define FLEXCAN_MB_CNT_SRR BIT(22) #define FLEXCAN_MB_CNT_IDE BIT(21) #define FLEXCAN_MB_CNT_RTR BIT(20) @@ -298,7 +311,7 @@ static int flexcan_chip_enable(struct flexcan_priv *priv) flexcan_write(reg, ®s->mcr); while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - usleep_range(10, 20); + udelay(10); if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) return -ETIMEDOUT; @@ -317,7 +330,7 @@ static int flexcan_chip_disable(struct flexcan_priv *priv) flexcan_write(reg, ®s->mcr); while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) - usleep_range(10, 20); + udelay(10); if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) return -ETIMEDOUT; @@ -336,7 +349,7 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv) flexcan_write(reg, ®s->mcr); while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) - usleep_range(100, 200); + udelay(100); if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) return -ETIMEDOUT; @@ -355,7 +368,7 @@ static int flexcan_chip_unfreeze(struct flexcan_priv *priv) flexcan_write(reg, ®s->mcr); while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) - usleep_range(10, 20); + udelay(10); if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK) return -ETIMEDOUT; @@ -370,7 +383,7 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv) flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)) - usleep_range(10, 20); + udelay(10); if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST) return -ETIMEDOUT; @@ -428,6 +441,14 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) flexcan_write(can_id, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_id); flexcan_write(ctrl, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); + /* Errata ERR005829 step8: + * Write twice INACTIVE(0x8) code to first MB. + */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); + return NETDEV_TX_OK; } @@ -744,6 +765,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) stats->tx_bytes += can_get_echo_skb(dev, 0); stats->tx_packets++; can_led_event(dev, CAN_LED_EVENT_TX); + /* after sending a RTR frame mailbox is in RX mode */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); flexcan_write((1 << FLEXCAN_TX_BUF_ID), ®s->iflag1); netif_wake_queue(dev); } @@ -801,6 +825,7 @@ static int flexcan_chip_start(struct net_device *dev) struct flexcan_regs __iomem *regs = priv->base; int err; u32 reg_mcr, reg_ctrl; + int i; /* enable module */ err = flexcan_chip_enable(priv); @@ -867,8 +892,18 @@ static int flexcan_chip_start(struct net_device *dev) netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); flexcan_write(reg_ctrl, ®s->ctrl); - /* Abort any pending TX, mark Mailbox as INACTIVE */ - flexcan_write(FLEXCAN_MB_CNT_CODE(0x4), + /* clear and invalidate all mailboxes first */ + for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->cantxfg); i++) { + flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE, + ®s->cantxfg[i].can_ctrl); + } + + /* Errata ERR005829: mark first TX mailbox as INACTIVE */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, + ®s->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl); + + /* mark TX mailbox as INACTIVE */ + flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE, ®s->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); /* acceptance mask/acceptance code (accept everything) */ diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index 7a85590fefb9..e5fac368068a 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -70,6 +70,8 @@ struct peak_pci_chan { #define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */ #define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */ #define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */ +#define PEAK_PCIE_OEM_ID 0x0009 /* PCAN-PCI Express OEM */ +#define PEAK_PCIEC34_DEVICE_ID 0x000A /* PCAN-PCI Express 34 (one channel) */ #define PEAK_PCI_CHAN_MAX 4 @@ -87,6 +89,7 @@ static const struct pci_device_id peak_pci_tbl[] = { {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, #ifdef CONFIG_CAN_PEAK_PCIEC {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, #endif {0,} }; @@ -653,7 +656,8 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * This must be done *before* register_sja1000dev() but * *after* devices linkage */ - if (pdev->device == PEAK_PCIEC_DEVICE_ID) { + if (pdev->device == PEAK_PCIEC_DEVICE_ID || + pdev->device == PEAK_PCIEC34_DEVICE_ID) { err = peak_pciec_probe(pdev, dev); if (err) { dev_err(&pdev->dev, diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 3fe45c705933..8ca49f04acec 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) int entry = vp->cur_tx % TX_RING_SIZE; struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; unsigned long flags; + dma_addr_t dma_addr; if (vortex_debug > 6) { pr_debug("boomerang_start_xmit()\n"); @@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); if (!skb_shinfo(skb)->nr_frags) { - vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, - skb->len, PCI_DMA_TODEVICE)); + dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, + PCI_DMA_TODEVICE); + if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) + goto out_dma_err; + + vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); } else { int i; - vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, - skb_headlen(skb), PCI_DMA_TODEVICE)); + dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, + skb_headlen(skb), PCI_DMA_TODEVICE); + if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) + goto out_dma_err; + + vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag, + 0, + frag->size, + DMA_TO_DEVICE); + if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) { + for(i = i-1; i >= 0; i--) + dma_unmap_page(&VORTEX_PCI(vp)->dev, + le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), + le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), + DMA_TO_DEVICE); + + pci_unmap_single(VORTEX_PCI(vp), + le32_to_cpu(vp->tx_ring[entry].frag[0].addr), + le32_to_cpu(vp->tx_ring[entry].frag[0].length), + PCI_DMA_TODEVICE); + + goto out_dma_err; + } + vp->tx_ring[entry].frag[i+1].addr = - cpu_to_le32(skb_frag_dma_map( - &VORTEX_PCI(vp)->dev, - frag, - frag->page_offset, frag->size, DMA_TO_DEVICE)); + cpu_to_le32(dma_addr); if (i == skb_shinfo(skb)->nr_frags-1) vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); @@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) } } #else - vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); + dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); + if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) + goto out_dma_err; + vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); #endif @@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); iowrite16(DownUnstall, ioaddr + EL3_CMD); spin_unlock_irqrestore(&vp->lock, flags); +out: return NETDEV_TX_OK; +out_dma_err: + dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n"); + goto out; } /* The interrupt handler does all of the Rx thread work and cleans up diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index fe5cfeace6e3..5919394d9f58 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -30,6 +30,17 @@ #define DRV_VERSION "1.0" /** + * arc_emac_tx_avail - Return the number of available slots in the tx ring. + * @priv: Pointer to ARC EMAC private data structure. + * + * returns: the number of slots available for transmission in tx the ring. + */ +static inline int arc_emac_tx_avail(struct arc_emac_priv *priv) +{ + return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM; +} + +/** * arc_emac_adjust_link - Adjust the PHY link duplex. * @ndev: Pointer to the net_device structure. * @@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev) txbd->info = 0; *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; - - if (netif_queue_stopped(ndev)) - netif_wake_queue(ndev); } + + /* Ensure that txbd_dirty is visible to tx() before checking + * for queue stopped. + */ + smp_mb(); + + if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv)) + netif_wake_queue(ndev); } /** @@ -298,7 +314,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget) work_done = arc_emac_rx(ndev, budget); if (work_done < budget) { napi_complete(napi); - arc_reg_or(priv, R_ENABLE, RXINT_MASK); + arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); } return work_done; @@ -327,9 +343,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance) /* Reset all flags except "MDIO complete" */ arc_reg_set(priv, R_STATUS, status); - if (status & RXINT_MASK) { + if (status & (RXINT_MASK | TXINT_MASK)) { if (likely(napi_schedule_prep(&priv->napi))) { - arc_reg_clr(priv, R_ENABLE, RXINT_MASK); + arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK); __napi_schedule(&priv->napi); } } @@ -440,7 +456,7 @@ static int arc_emac_open(struct net_device *ndev) arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); /* Enable interrupts */ - arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK); + arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); /* Set CONTROL */ arc_reg_set(priv, R_CTRL, @@ -511,7 +527,7 @@ static int arc_emac_stop(struct net_device *ndev) netif_stop_queue(ndev); /* Disable interrupts */ - arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK); + arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK); /* Disable EMAC */ arc_reg_clr(priv, R_CTRL, EN_MASK); @@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) len = max_t(unsigned int, ETH_ZLEN, skb->len); - /* EMAC still holds this buffer in its possession. - * CPU must not modify this buffer descriptor - */ - if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) { + if (unlikely(!arc_emac_tx_avail(priv))) { netif_stop_queue(ndev); + netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n"); return NETDEV_TX_BUSY; } @@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) /* Increment index to point to the next BD */ *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; - /* Get "info" of the next BD */ - info = &priv->txbd[*txbd_curr].info; + /* Ensure that tx_clean() sees the new txbd_curr before + * checking the queue status. This prevents an unneeded wake + * of the queue in tx_clean(). + */ + smp_mb(); - /* Check if if Tx BD ring is full - next BD is still owned by EMAC */ - if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) + if (!arc_emac_tx_avail(priv)) { netif_stop_queue(ndev); + /* Refresh tx_dirty */ + smp_mb(); + if (arc_emac_tx_avail(priv)) + netif_start_queue(ndev); + } arc_reg_set(priv, R_STATUS, TXPL_MASK); diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 4a7028d65912..d588136b23b9 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1697,7 +1697,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev, hwstat->tx_underruns + hwstat->tx_excessive_cols + hwstat->tx_late_cols); - nstat->multicast = hwstat->tx_multicast_pkts; + nstat->multicast = hwstat->rx_multicast_pkts; nstat->collisions = hwstat->tx_total_cols; nstat->rx_length_errors = (hwstat->rx_oversize_pkts + diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 6f4e18644bd4..d9b9170ed2fc 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -534,6 +534,25 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, while ((processed < to_process) && (processed < budget)) { cb = &priv->rx_cbs[priv->rx_read_ptr]; skb = cb->skb; + + processed++; + priv->rx_read_ptr++; + + if (priv->rx_read_ptr == priv->num_rx_bds) + priv->rx_read_ptr = 0; + + /* We do not have a backing SKB, so we do not a corresponding + * DMA mapping for this incoming packet since + * bcm_sysport_rx_refill always either has both skb and mapping + * or none. + */ + if (unlikely(!skb)) { + netif_err(priv, rx_err, ndev, "out of memory!\n"); + ndev->stats.rx_dropped++; + ndev->stats.rx_errors++; + goto refill; + } + dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), RX_BUF_LENGTH, DMA_FROM_DEVICE); @@ -543,23 +562,11 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & DESC_STATUS_MASK; - processed++; - priv->rx_read_ptr++; - if (priv->rx_read_ptr == priv->num_rx_bds) - priv->rx_read_ptr = 0; - netif_dbg(priv, rx_status, ndev, "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", p_index, priv->rx_c_index, priv->rx_read_ptr, len, status); - if (unlikely(!skb)) { - netif_err(priv, rx_err, ndev, "out of memory!\n"); - ndev->stats.rx_dropped++; - ndev->stats.rx_errors++; - goto refill; - } - if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { netif_err(priv, rx_status, ndev, "fragmented packet!\n"); ndev->stats.rx_dropped++; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 3f9d4de8173c..5cc9cae21ed5 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -875,6 +875,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, int last_tx_cn, last_c_index, num_tx_bds; struct enet_cb *tx_cb_ptr; struct netdev_queue *txq; + unsigned int bds_compl; unsigned int c_index; /* Compute how many buffers are transmitted since last xmit call */ @@ -899,7 +900,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, /* Reclaim transmitted buffers */ while (last_tx_cn-- > 0) { tx_cb_ptr = ring->cbs + last_c_index; + bds_compl = 0; if (tx_cb_ptr->skb) { + bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; dev->stats.tx_bytes += tx_cb_ptr->skb->len; dma_unmap_single(&dev->dev, dma_unmap_addr(tx_cb_ptr, dma_addr), @@ -916,7 +919,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); } dev->stats.tx_packets++; - ring->free_bds += 1; + ring->free_bds += bds_compl; last_c_index++; last_c_index &= (num_tx_bds - 1); @@ -1274,12 +1277,29 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, while ((rxpktprocessed < rxpkttoprocess) && (rxpktprocessed < budget)) { + cb = &priv->rx_cbs[priv->rx_read_ptr]; + skb = cb->skb; + + rxpktprocessed++; + + priv->rx_read_ptr++; + priv->rx_read_ptr &= (priv->num_rx_bds - 1); + + /* We do not have a backing SKB, so we do not have a + * corresponding DMA mapping for this incoming packet since + * bcmgenet_rx_refill always either has both skb and mapping or + * none. + */ + if (unlikely(!skb)) { + dev->stats.rx_dropped++; + dev->stats.rx_errors++; + goto refill; + } + /* Unmap the packet contents such that we can use the * RSV from the 64 bytes descriptor when enabled and save * a 32-bits register read */ - cb = &priv->rx_cbs[priv->rx_read_ptr]; - skb = cb->skb; dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), priv->rx_buf_len, DMA_FROM_DEVICE); @@ -1307,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv, __func__, p_index, priv->rx_c_index, priv->rx_read_ptr, dma_length_status); - rxpktprocessed++; - - priv->rx_read_ptr++; - priv->rx_read_ptr &= (priv->num_rx_bds - 1); - - /* out of memory, just drop packets at the hardware level */ - if (unlikely(!skb)) { - dev->stats.rx_dropped++; - dev->stats.rx_errors++; - goto refill; - } - if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { netif_err(priv, rx_status, dev, "dropping fragmented packet!\n"); @@ -1736,13 +1744,63 @@ static void bcmgenet_init_multiq(struct net_device *dev) bcmgenet_tdma_writel(priv, reg, DMA_CTRL); } +static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) +{ + int ret = 0; + int timeout = 0; + u32 reg; + + /* Disable TDMA to stop add more frames in TX DMA */ + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + /* Check TDMA status register to confirm TDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_tdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); + ret = -ETIMEDOUT; + } + + /* Wait 10ms for packet drain in both tx and rx dma */ + usleep_range(10000, 20000); + + /* Disable RDMA */ + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg &= ~DMA_EN; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + timeout = 0; + /* Check RDMA status register to confirm RDMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_rdma_readl(priv, DMA_STATUS); + if (reg & DMA_DISABLED) + break; + + udelay(1); + } + + if (timeout == DMA_TIMEOUT_VAL) { + netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); + ret = -ETIMEDOUT; + } + + return ret; +} + static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) { int i; /* disable DMA */ - bcmgenet_rdma_writel(priv, 0, DMA_CTRL); - bcmgenet_tdma_writel(priv, 0, DMA_CTRL); + bcmgenet_dma_teardown(priv); for (i = 0; i < priv->num_tx_bds; i++) { if (priv->tx_cbs[i].skb != NULL) { @@ -2101,57 +2159,6 @@ err_clk_disable: return ret; } -static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) -{ - int ret = 0; - int timeout = 0; - u32 reg; - - /* Disable TDMA to stop add more frames in TX DMA */ - reg = bcmgenet_tdma_readl(priv, DMA_CTRL); - reg &= ~DMA_EN; - bcmgenet_tdma_writel(priv, reg, DMA_CTRL); - - /* Check TDMA status register to confirm TDMA is disabled */ - while (timeout++ < DMA_TIMEOUT_VAL) { - reg = bcmgenet_tdma_readl(priv, DMA_STATUS); - if (reg & DMA_DISABLED) - break; - - udelay(1); - } - - if (timeout == DMA_TIMEOUT_VAL) { - netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); - ret = -ETIMEDOUT; - } - - /* Wait 10ms for packet drain in both tx and rx dma */ - usleep_range(10000, 20000); - - /* Disable RDMA */ - reg = bcmgenet_rdma_readl(priv, DMA_CTRL); - reg &= ~DMA_EN; - bcmgenet_rdma_writel(priv, reg, DMA_CTRL); - - timeout = 0; - /* Check RDMA status register to confirm RDMA is disabled */ - while (timeout++ < DMA_TIMEOUT_VAL) { - reg = bcmgenet_rdma_readl(priv, DMA_STATUS); - if (reg & DMA_DISABLED) - break; - - udelay(1); - } - - if (timeout == DMA_TIMEOUT_VAL) { - netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); - ret = -ETIMEDOUT; - } - - return ret; -} - static void bcmgenet_netif_stop(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index cb77ae93d89a..e7d3a620d96a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7914,8 +7914,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) entry = tnapi->tx_prod; base_flags = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL) - base_flags |= TXD_FLAG_TCPUDP_CSUM; mss = skb_shinfo(skb)->gso_size; if (mss) { @@ -7929,6 +7927,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; + /* HW/FW can not correctly segment packets that have been + * vlan encapsulated. + */ + if (skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) + return tg3_tso_bug(tp, tnapi, txq, skb); + if (!skb_is_gso_v6(skb)) { if (unlikely((ETH_HLEN + hdr_len) > 80) && tg3_flag(tp, TSO_BUG)) @@ -7979,6 +7984,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) base_flags |= tsflags << 12; } } + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + /* HW/FW can not correctly checksum packets that have been + * vlan encapsulated. + */ + if (skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) { + if (skb_checksum_help(skb)) + goto drop; + } else { + base_flags |= TXD_FLAG_TCPUDP_CSUM; + } } if (tg3_flag(tp, USE_JUMBO_BDFLAG) && diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 8c34811a1128..e5be511a3c38 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -6478,6 +6478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct port_info *pi; bool highdma = false; struct adapter *adapter = NULL; + void __iomem *regs; printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); @@ -6494,19 +6495,35 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_release_regions; } + regs = pci_ioremap_bar(pdev, 0); + if (!regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -ENOMEM; + goto out_disable_device; + } + + /* We control everything through one PF */ + func = SOURCEPF_GET(readl(regs + PL_WHOAMI)); + if (func != ent->driver_data) { + iounmap(regs); + pci_disable_device(pdev); + pci_save_state(pdev); /* to restore SR-IOV later */ + goto sriov; + } + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { highdma = true; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " "coherent allocations\n"); - goto out_disable_device; + goto out_unmap_bar0; } } else { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); - goto out_disable_device; + goto out_unmap_bar0; } } @@ -6518,7 +6535,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); if (!adapter) { err = -ENOMEM; - goto out_disable_device; + goto out_unmap_bar0; } adapter->workq = create_singlethread_workqueue("cxgb4"); @@ -6530,20 +6547,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* PCI device has been enabled */ adapter->flags |= DEV_ENABLED; - adapter->regs = pci_ioremap_bar(pdev, 0); - if (!adapter->regs) { - dev_err(&pdev->dev, "cannot map device registers\n"); - err = -ENOMEM; - goto out_free_adapter; - } - - /* We control everything through one PF */ - func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI)); - if (func != ent->driver_data) { - pci_save_state(pdev); /* to restore SR-IOV later */ - goto sriov; - } - + adapter->regs = regs; adapter->pdev = pdev; adapter->pdev_dev = &pdev->dev; adapter->mbox = func; @@ -6560,7 +6564,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = t4_prep_adapter(adapter); if (err) - goto out_unmap_bar0; + goto out_free_adapter; + if (!is_t4(adapter->params.chip)) { s_qpp = QUEUESPERPAGEPF1 * adapter->fn; @@ -6577,14 +6582,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "Incorrect number of egress queues per page\n"); err = -EINVAL; - goto out_unmap_bar0; + goto out_free_adapter; } adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); if (!adapter->bar2) { dev_err(&pdev->dev, "cannot map device bar2 region\n"); err = -ENOMEM; - goto out_unmap_bar0; + goto out_free_adapter; } } @@ -6722,13 +6727,13 @@ sriov: out_unmap_bar: if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); - out_unmap_bar0: - iounmap(adapter->regs); out_free_adapter: if (adapter->workq) destroy_workqueue(adapter->workq); kfree(adapter); + out_unmap_bar0: + iounmap(regs); out_disable_device: pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 9b33057a9477..70089c29d307 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1399,7 +1399,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) const void *mac_addr; if (!IS_ENABLED(CONFIG_OF) || !np) - return NULL; + return ERR_PTR(-ENXIO); pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 65a4a0f88ea0..02a2e90d581a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2389,6 +2389,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( } EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); +static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port) +{ + struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); + int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports) + + 1; + int max_port = min_port + + bitmap_weight(actv_ports.ports, dev->caps.num_ports); + + if (port < min_port) + port = min_port; + else if (port >= max_port) + port = max_port - 1; + + return port; +} + int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -2402,6 +2418,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) if (slave < 0) return -EINVAL; + port = mlx4_slaves_closest_port(dev, slave, port); s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; s_info->mac = mac; mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", @@ -2428,6 +2445,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos) if (slave < 0) return -EINVAL; + port = mlx4_slaves_closest_port(dev, slave, port); vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; if ((0 == vlan) && (0 == qos)) @@ -2455,6 +2473,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave, struct mlx4_priv *priv; priv = mlx4_priv(dev); + port = mlx4_slaves_closest_port(dev, slave, port); vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; if (MLX4_VGT != vp_oper->state.default_vlan) { @@ -2482,6 +2501,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) if (slave < 0) return -EINVAL; + port = mlx4_slaves_closest_port(dev, slave, port); s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; s_info->spoofchk = setting; @@ -2535,6 +2555,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat if (slave < 0) return -EINVAL; + port = mlx4_slaves_closest_port(dev, slave, port); switch (link_state) { case IFLA_VF_LINK_STATE_AUTO: /* get current link state */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index e22f24f784fc..35ff2925110a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -487,6 +487,9 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, struct mlx4_en_dev *mdev = priv->mdev; int err; + if (pause->autoneg) + return -EINVAL; + priv->prof->tx_pause = pause->tx_pause != 0; priv->prof->rx_pause = pause->rx_pause != 0; err = mlx4_SET_PORT_general(mdev->dev, priv->port, diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 0dc31d85fc3b..2301365c79c7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -390,13 +390,14 @@ err_icm: EXPORT_SYMBOL_GPL(mlx4_qp_alloc); #define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC -int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, +int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, enum mlx4_update_qp_attr attr, struct mlx4_update_qp_params *params) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_update_qp_context *cmd; u64 pri_addr_path_mask = 0; + u64 qp_mask = 0; int err = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); @@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, cmd->qp_context.pri_path.grh_mylmc = params->smac_index; } + if (attr & MLX4_UPDATE_QP_VSD) { + qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD; + if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE) + cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN); + } + cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); + cmd->qp_mask = cpu_to_be64(qp_mask); - err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0, + err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0, MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1089367fed22..5d2498dcf536 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev, struct mlx4_qp_context *qpc = inbox->buf + 8; struct mlx4_vport_oper_state *vp_oper; struct mlx4_priv *priv; + u32 qp_type; int port; port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; priv = mlx4_priv(dev); vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; + qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff; if (MLX4_VGT != vp_oper->state.default_vlan) { /* the reserved QPs (special, proxy, tunnel) @@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev, if (mlx4_is_qp_reserved(dev, qpn)) return 0; - /* force strip vlan by clear vsd */ - qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); + /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */ + if (qp_type == MLX4_QP_ST_UD || + (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) { + if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) { + *(__be32 *)inbox->buf = + cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) | + MLX4_QP_OPTPAR_VLAN_STRIPPING); + qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); + } else { + struct mlx4_update_qp_params params = {.flags = 0}; + + mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); + } + } if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { @@ -3998,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, } port = (rqp->sched_queue >> 6 & 1) + 1; - smac_index = cmd->qp_context.pri_path.grh_mylmc; - err = mac_find_smac_ix_in_slave(dev, slave, port, - smac_index, &mac); - if (err) { - mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", - qpn, smac_index); - goto err_mac; + + if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) { + smac_index = cmd->qp_context.pri_path.grh_mylmc; + err = mac_find_smac_ix_in_slave(dev, slave, port, + smac_index, &mac); + + if (err) { + mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", + qpn, smac_index); + goto err_mac; + } } err = mlx4_cmd(dev, inbox->dma, @@ -4818,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; upd_context = mailbox->buf; - upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD); + upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD); spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(qp, tmp, qp_list, com.list) { diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c index 979c6980639f..a42293092ea4 100644 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -290,9 +290,11 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) /* Read the hardware TX timestamp if one was recorded */ if (unlikely(re.s.tstamp)) { struct skb_shared_hwtstamps ts; + u64 ns; + memset(&ts, 0, sizeof(ts)); /* Read the timestamp */ - u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); + ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); /* Remove the timestamp from the FIFO */ cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); /* Tell the kernel about the timestamp */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig index 44c8be1c6805..5f7a35212796 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig @@ -7,6 +7,7 @@ config PCH_GBE depends on PCI && (X86_32 || COMPILE_TEST) select MII select PTP_1588_CLOCK_PCH + select NET_PTP_CLASSIFY ---help--- This is a gigabit ethernet driver for EG20T PCH. EG20T PCH is the platform controller hub that is used in Intel's diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 91652e7235e4..0921302553c6 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1783,33 +1783,31 @@ static void __rtl8169_set_features(struct net_device *dev, netdev_features_t features) { struct rtl8169_private *tp = netdev_priv(dev); - netdev_features_t changed = features ^ dev->features; void __iomem *ioaddr = tp->mmio_addr; + u32 rx_config; - if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | - NETIF_F_HW_VLAN_CTAG_RX))) - return; + rx_config = RTL_R32(RxConfig); + if (features & NETIF_F_RXALL) + rx_config |= (AcceptErr | AcceptRunt); + else + rx_config &= ~(AcceptErr | AcceptRunt); - if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) { - if (features & NETIF_F_RXCSUM) - tp->cp_cmd |= RxChkSum; - else - tp->cp_cmd &= ~RxChkSum; + RTL_W32(RxConfig, rx_config); - if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) - tp->cp_cmd |= RxVlan; - else - tp->cp_cmd &= ~RxVlan; + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; - RTL_W16(CPlusCmd, tp->cp_cmd); - RTL_R16(CPlusCmd); - } - if (changed & NETIF_F_RXALL) { - int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt)); - if (features & NETIF_F_RXALL) - tmp |= (AcceptErr | AcceptRunt); - RTL_W32(RxConfig, tmp); - } + if (features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + + tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum); + + RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_R16(CPlusCmd); } static int rtl8169_set_features(struct net_device *dev, @@ -1817,8 +1815,11 @@ static int rtl8169_set_features(struct net_device *dev, { struct rtl8169_private *tp = netdev_priv(dev); + features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; + rtl_lock_work(tp); - __rtl8169_set_features(dev, features); + if (features ^ dev->features) + __rtl8169_set_features(dev, features); rtl_unlock_work(tp); return 0; @@ -7118,8 +7119,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) } } -static int -rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; const unsigned int region = cfg->region; @@ -7194,7 +7194,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_mwi_2; } - tp->cp_cmd = RxChkSum; + tp->cp_cmd = 0; if ((sizeof(dma_addr_t) > 4) && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { @@ -7235,13 +7235,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - /* - * Pretend we are using VLANs; This bypasses a nasty bug where - * Interrupts stop flowing on high load on 8110SCd controllers. - */ - if (tp->mac_version == RTL_GIGA_MAC_VER_05) - tp->cp_cmd |= RxVlan; - rtl_init_mdio_ops(tp); rtl_init_pll_power_ops(tp); rtl_init_jumbo_ops(tp); @@ -7302,8 +7295,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA; + tp->cp_cmd |= RxChkSum | RxVlan; + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ if (tp->mac_version == RTL_GIGA_MAC_VER_05) - /* 8110SCd requires hardware Rx VLAN - disallow toggling */ + /* Disallow toggling */ dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; if (tp->txd_version == RTL_TD_0) diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 0537381cd2f6..6859437b59fb 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -2933,6 +2933,9 @@ void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) u32 crc; int bit; + if (!efx_dev_registered(efx)) + return; + netif_addr_lock_bh(net_dev); efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 23c89ab5a6ad..f67539650c38 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -350,14 +350,17 @@ static int vnet_walk_rx_one(struct vnet_port *port, if (IS_ERR(desc)) return PTR_ERR(desc); + if (desc->hdr.state != VIO_DESC_READY) + return 1; + + rmb(); + viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", desc->hdr.state, desc->hdr.ack, desc->size, desc->ncookies, desc->cookies[0].cookie_addr, desc->cookies[0].cookie_size); - if (desc->hdr.state != VIO_DESC_READY) - return 1; err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); if (err == -ECONNRESET) return err; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 999fb72688d2..e2a00287f8eb 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -699,6 +699,28 @@ static void cpsw_rx_handler(void *token, int len, int status) cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { + bool ndev_status = false; + struct cpsw_slave *slave = priv->slaves; + int n; + + if (priv->data.dual_emac) { + /* In dual emac mode check for all interfaces */ + for (n = priv->data.slaves; n; n--, slave++) + if (netif_running(slave->ndev)) + ndev_status = true; + } + + if (ndev_status && (status >= 0)) { + /* The packet received is for the interface which + * is already down and the other interface is up + * and running, intead of freeing which results + * in reducing of the number of rx descriptor in + * DMA engine, requeue skb back to cpdma. + */ + new_skb = skb; + goto requeue; + } + /* the interface is going down, skbs are purged */ dev_kfree_skb_any(skb); return; @@ -717,6 +739,7 @@ static void cpsw_rx_handler(void *token, int len, int status) new_skb = skb; } +requeue: ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, skb_tailroom(new_skb), 0); if (WARN_ON(ret < 0)) @@ -2311,10 +2334,19 @@ static int cpsw_suspend(struct device *dev) struct net_device *ndev = platform_get_drvdata(pdev); struct cpsw_priv *priv = netdev_priv(ndev); - if (netif_running(ndev)) - cpsw_ndo_stop(ndev); + if (priv->data.dual_emac) { + int i; - for_each_slave(priv, soft_reset_slave); + for (i = 0; i < priv->data.slaves; i++) { + if (netif_running(priv->slaves[i].ndev)) + cpsw_ndo_stop(priv->slaves[i].ndev); + soft_reset_slave(priv->slaves + i); + } + } else { + if (netif_running(ndev)) + cpsw_ndo_stop(ndev); + for_each_slave(priv, soft_reset_slave); + } pm_runtime_put_sync(&pdev->dev); @@ -2328,14 +2360,24 @@ static int cpsw_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct net_device *ndev = platform_get_drvdata(pdev); + struct cpsw_priv *priv = netdev_priv(ndev); pm_runtime_get_sync(&pdev->dev); /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev); - if (netif_running(ndev)) - cpsw_ndo_open(ndev); + if (priv->data.dual_emac) { + int i; + + for (i = 0; i < priv->data.slaves; i++) { + if (netif_running(priv->slaves[i].ndev)) + cpsw_ndo_open(priv->slaves[i].ndev); + } + } else { + if (netif_running(ndev)) + cpsw_ndo_open(ndev); + } return 0; } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a96955597755..726edabff26b 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -36,6 +36,7 @@ #include <linux/netpoll.h> #define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) +#define MACVLAN_BC_QUEUE_LEN 1000 struct macvlan_port { struct net_device *dev; @@ -248,7 +249,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, goto err; spin_lock(&port->bc_queue.lock); - if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) { + if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) { __skb_queue_tail(&port->bc_queue, nskb); err = 0; } @@ -806,6 +807,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, features, mask); features |= ALWAYS_ON_FEATURES; + features &= ~NETIF_F_NETNS_LOCAL; return features; } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index fd0ea7c50ee6..011dbda2b2f1 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -592,8 +592,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ9031, .phy_id_mask = 0x00fffff0, .name = "Micrel KSZ9031 Gigabit PHY", - .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause - | SUPPORTED_Asym_Pause), + .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = ksz9031_config_init, .config_aneg = genphy_config_aneg, diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 87f710476217..74760e8143e3 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -2019,7 +2019,7 @@ static int rtl8153_enable(struct r8152 *tp) return rtl_enable(tp); } -static void rtl8152_disable(struct r8152 *tp) +static void rtl_disable(struct r8152 *tp) { u32 ocp_data; int i; @@ -2232,6 +2232,13 @@ static inline void r8152b_enable_aldps(struct r8152 *tp) LINKENA | DIS_SDSAVE); } +static void rtl8152_disable(struct r8152 *tp) +{ + r8152b_disable_aldps(tp); + rtl_disable(tp); + r8152b_enable_aldps(tp); +} + static void r8152b_hw_phy_cfg(struct r8152 *tp) { u16 data; @@ -2242,11 +2249,8 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp) r8152_mdio_write(tp, MII_BMCR, data); } - r8152b_disable_aldps(tp); - rtl_clear_bp(tp); - r8152b_enable_aldps(tp); set_bit(PHY_RESET, &tp->flags); } @@ -2255,9 +2259,6 @@ static void r8152b_exit_oob(struct r8152 *tp) u32 ocp_data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) - return; - ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); @@ -2347,7 +2348,7 @@ static void r8152b_enter_oob(struct r8152 *tp) ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); - rtl8152_disable(tp); + rtl_disable(tp); for (i = 0; i < 1000; i++) { ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); @@ -2485,9 +2486,6 @@ static void r8153_first_init(struct r8152 *tp) u32 ocp_data; int i; - if (test_bit(RTL8152_UNPLUG, &tp->flags)) - return; - rxdy_gated_en(tp, true); r8153_teredo_off(tp); @@ -2560,7 +2558,7 @@ static void r8153_enter_oob(struct r8152 *tp) ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); - rtl8152_disable(tp); + rtl_disable(tp); for (i = 0; i < 1000; i++) { ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); @@ -2624,6 +2622,13 @@ static void r8153_enable_aldps(struct r8152 *tp) ocp_reg_write(tp, OCP_POWER_CFG, data); } +static void rtl8153_disable(struct r8152 *tp) +{ + r8153_disable_aldps(tp); + rtl_disable(tp); + r8153_enable_aldps(tp); +} + static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) { u16 bmcr, anar, gbcr; @@ -2714,6 +2719,16 @@ out: return ret; } +static void rtl8152_up(struct r8152 *tp) +{ + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + r8152b_disable_aldps(tp); + r8152b_exit_oob(tp); + r8152b_enable_aldps(tp); +} + static void rtl8152_down(struct r8152 *tp) { if (test_bit(RTL8152_UNPLUG, &tp->flags)) { @@ -2727,6 +2742,16 @@ static void rtl8152_down(struct r8152 *tp) r8152b_enable_aldps(tp); } +static void rtl8153_up(struct r8152 *tp) +{ + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + r8153_disable_aldps(tp); + r8153_first_init(tp); + r8153_enable_aldps(tp); +} + static void rtl8153_down(struct r8152 *tp) { if (test_bit(RTL8152_UNPLUG, &tp->flags)) { @@ -2946,6 +2971,8 @@ static void r8152b_init(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; + r8152b_disable_aldps(tp); + if (tp->version == RTL_VER_01) { ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); ocp_data &= ~LED_MODE_MASK; @@ -2984,6 +3011,7 @@ static void r8153_init(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; + r8153_disable_aldps(tp); r8153_u1u2en(tp, false); for (i = 0; i < 500; i++) { @@ -3392,7 +3420,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) ops->init = r8152b_init; ops->enable = rtl8152_enable; ops->disable = rtl8152_disable; - ops->up = r8152b_exit_oob; + ops->up = rtl8152_up; ops->down = rtl8152_down; ops->unload = rtl8152_unload; ret = 0; @@ -3400,8 +3428,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) case PRODUCT_ID_RTL8153: ops->init = r8153_init; ops->enable = rtl8153_enable; - ops->disable = rtl8152_disable; - ops->up = r8153_first_init; + ops->disable = rtl8153_disable; + ops->up = rtl8153_up; ops->down = rtl8153_down; ops->unload = rtl8153_unload; ret = 0; @@ -3416,8 +3444,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) case PRODUCT_ID_SAMSUNG: ops->init = r8153_init; ops->enable = rtl8153_enable; - ops->disable = rtl8152_disable; - ops->up = r8153_first_init; + ops->disable = rtl8153_disable; + ops->up = rtl8153_up; ops->down = rtl8153_down; ops->unload = rtl8153_unload; ret = 0; diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.c b/drivers/net/wireless/ath/ath9k/common-beacon.c index 733be5178481..6ad44470d0f2 100644 --- a/drivers/net/wireless/ath/ath9k/common-beacon.c +++ b/drivers/net/wireless/ath/ath9k/common-beacon.c @@ -57,7 +57,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah, struct ath9k_beacon_state *bs) { struct ath_common *common = ath9k_hw_common(ah); - int dtim_intval, sleepduration; + int dtim_intval; u64 tsf; /* No need to configure beacon if we are not associated */ @@ -75,7 +75,6 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah, * last beacon we received (which may be none). */ dtim_intval = conf->intval * conf->dtim_period; - sleepduration = ah->hw->conf.listen_interval * conf->intval; /* * Pull nexttbtt forward to reflect the current @@ -113,7 +112,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah, */ bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100), - sleepduration)); + conf->intval)); if (bs->bs_sleepduration > bs->bs_dtimperiod) bs->bs_sleepduration = bs->bs_dtimperiod; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index bb86eb2ffc95..f0484b1b617e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -978,7 +978,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, struct ath_hw *ah = common->ah; struct ath_htc_rx_status *rxstatus; struct ath_rx_status rx_stats; - bool decrypt_error; + bool decrypt_error = false; if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { ath_err(common, "Corrupted RX frame, dropping (len: %d)\n", diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig index b8e2561ea645..fe3dc126b149 100644 --- a/drivers/net/wireless/brcm80211/Kconfig +++ b/drivers/net/wireless/brcm80211/Kconfig @@ -27,10 +27,17 @@ config BRCMFMAC one of the bus interface support. If you choose to build a module, it'll be called brcmfmac.ko. +config BRCMFMAC_PROTO_BCDC + bool + +config BRCMFMAC_PROTO_MSGBUF + bool + config BRCMFMAC_SDIO bool "SDIO bus interface support for FullMAC driver" depends on (MMC = y || MMC = BRCMFMAC) depends on BRCMFMAC + select BRCMFMAC_PROTO_BCDC select FW_LOADER default y ---help--- @@ -42,6 +49,7 @@ config BRCMFMAC_USB bool "USB bus interface support for FullMAC driver" depends on (USB = y || USB = BRCMFMAC) depends on BRCMFMAC + select BRCMFMAC_PROTO_BCDC select FW_LOADER ---help--- This option enables the USB bus interface support for Broadcom @@ -52,6 +60,8 @@ config BRCMFMAC_PCIE bool "PCIE bus interface support for FullMAC driver" depends on BRCMFMAC depends on PCI + depends on HAS_DMA + select BRCMFMAC_PROTO_MSGBUF select FW_LOADER ---help--- This option enables the PCIE bus interface support for Broadcom diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile index c35adf4bc70b..90a977fe9a64 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile +++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile @@ -30,16 +30,18 @@ brcmfmac-objs += \ fwsignal.o \ p2p.o \ proto.o \ - bcdc.o \ - commonring.o \ - flowring.o \ - msgbuf.o \ dhd_common.o \ dhd_linux.o \ firmware.o \ feature.o \ btcoex.o \ vendor.o +brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \ + bcdc.o +brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \ + commonring.o \ + flowring.o \ + msgbuf.o brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ dhd_sdio.o \ bcmsdh.o diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h index 17e8c039ff32..6003179c0ceb 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h @@ -16,9 +16,12 @@ #ifndef BRCMFMAC_BCDC_H #define BRCMFMAC_BCDC_H - +#ifdef CONFIG_BRCMFMAC_PROTO_BCDC int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr); void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr); - +#else +static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; } +static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {} +#endif #endif /* BRCMFMAC_BCDC_H */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c index 4f1daabc551b..44fc85f68f7a 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c @@ -185,7 +185,13 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, ifevent->action, ifevent->ifidx, ifevent->bssidx, ifevent->flags, ifevent->role); - if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) { + /* The P2P Device interface event must not be ignored + * contrary to what firmware tells us. The only way to + * distinguish the P2P Device is by looking at the ifidx + * and bssidx received. + */ + if (!(ifevent->ifidx == 0 && ifevent->bssidx == 1) && + (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) { brcmf_dbg(EVENT, "event can be ignored\n"); return; } @@ -210,12 +216,12 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, return; } - if (ifevent->action == BRCMF_E_IF_CHANGE) + if (ifp && ifevent->action == BRCMF_E_IF_CHANGE) brcmf_fws_reset_interface(ifp); err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); - if (ifevent->action == BRCMF_E_IF_DEL) { + if (ifp && ifevent->action == BRCMF_E_IF_DEL) { brcmf_fws_del_interface(ifp); brcmf_del_if(drvr, ifevent->bssidx); } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h index dd20b1862d44..cbf033f59109 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h @@ -172,6 +172,8 @@ enum brcmf_fweh_event_code { #define BRCMF_E_IF_ROLE_STA 0 #define BRCMF_E_IF_ROLE_AP 1 #define BRCMF_E_IF_ROLE_WDS 2 +#define BRCMF_E_IF_ROLE_P2P_GO 3 +#define BRCMF_E_IF_ROLE_P2P_CLIENT 4 /** * definitions for event packet validation. diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h index f901ae52bf2b..77a51b8c1e12 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h @@ -15,6 +15,7 @@ #ifndef BRCMFMAC_MSGBUF_H #define BRCMFMAC_MSGBUF_H +#ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20 #define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256 @@ -32,9 +33,15 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev); +void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid); int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr); void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr); -void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid); - +#else +static inline int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) +{ + return 0; +} +static inline void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) {} +#endif #endif /* BRCMFMAC_MSGBUF_H */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 02fe706fc9ec..f3a9804988a6 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -497,8 +497,11 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable) static void brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev) { - struct net_device *ndev = wdev->netdev; - struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_cfg80211_vif *vif; + struct brcmf_if *ifp; + + vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); + ifp = vif->ifp; if ((wdev->iftype == NL80211_IFTYPE_ADHOC) || (wdev->iftype == NL80211_IFTYPE_AP) || @@ -5143,6 +5146,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) ch.band = BRCMU_CHAN_BAND_2G; ch.bw = BRCMU_CHAN_BW_40; + ch.sb = BRCMU_CHAN_SB_NONE; ch.chnum = 0; cfg->d11inf.encchspec(&ch); @@ -5176,6 +5180,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) brcmf_update_bw40_channel_flag(&band->channels[j], &ch); } + kfree(pbuf); } return err; } diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c index 760c45c34ef3..1513dbc79c14 100644 --- a/drivers/net/wireless/iwlwifi/dvm/power.c +++ b/drivers/net/wireless/iwlwifi/dvm/power.c @@ -40,7 +40,7 @@ #include "commands.h" #include "power.h" -static bool force_cam; +static bool force_cam = true; module_param(force_cam, bool, 0644); MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)"); diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index d67a37a786aa..d53adc245497 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -83,6 +83,8 @@ #define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL3160_NVM_VERSION 0x709 #define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ +#define IWL3165_NVM_VERSION 0x709 +#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL7265_NVM_VERSION 0x0a1d #define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ @@ -92,6 +94,9 @@ #define IWL3160_FW_PRE "iwlwifi-3160-" #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" +#define IWL3165_FW_PRE "iwlwifi-3165-" +#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode" + #define IWL7265_FW_PRE "iwlwifi-7265-" #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" @@ -213,6 +218,16 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = { {0}, }; +const struct iwl_cfg iwl3165_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC 3165", + .fw_name_pre = IWL3165_FW_PRE, + IWL_DEVICE_7000, + .ht_params = &iwl7000_ht_params, + .nvm_ver = IWL3165_NVM_VERSION, + .nvm_calib_ver = IWL3165_TX_POWER_VERSION, + .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, +}; + const struct iwl_cfg iwl7265_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 7265", .fw_name_pre = IWL7265_FW_PRE, @@ -245,4 +260,5 @@ const struct iwl_cfg iwl7265_n_cfg = { MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); +MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 8da596db9abe..3d7cc37420ae 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -120,6 +120,8 @@ enum iwl_led_mode { #define IWL_LONG_WD_TIMEOUT 10000 #define IWL_MAX_WD_TIMEOUT 120000 +#define IWL_DEFAULT_MAX_TX_POWER 22 + /* Antenna presence definitions */ #define ANT_NONE 0x0 #define ANT_A BIT(0) @@ -335,6 +337,7 @@ extern const struct iwl_cfg iwl7260_n_cfg; extern const struct iwl_cfg iwl3160_2ac_cfg; extern const struct iwl_cfg iwl3160_2n_cfg; extern const struct iwl_cfg iwl3160_n_cfg; +extern const struct iwl_cfg iwl3165_2ac_cfg; extern const struct iwl_cfg iwl7265_2ac_cfg; extern const struct iwl_cfg iwl7265_2n_cfg; extern const struct iwl_cfg iwl7265_n_cfg; diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 018af2957d3b..354255f08754 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -146,8 +146,6 @@ static const u8 iwl_nvm_channels_family_8000[] = { #define LAST_2GHZ_HT_PLUS 9 #define LAST_5GHZ_HT 161 -#define DEFAULT_MAX_TX_POWER 16 - /* rate data (static) */ static struct ieee80211_rate iwl_cfg80211_rates[] = { { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, @@ -295,7 +293,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, * Default value - highest tx power value. max_power * is not used in mvm, and is used for backwards compatibility */ - channel->max_power = DEFAULT_MAX_TX_POWER; + channel->max_power = IWL_DEFAULT_MAX_TX_POWER; is_5ghz = channel->band == IEEE80211_BAND_5GHZ; IWL_DEBUG_EEPROM(dev, "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 2291bbcaaeab..ce71625f497f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -585,8 +585,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { - u32 mode; - switch (mvm->bt_force_ant_mode) { case BT_FORCE_ANT_BT: mode = BT_COEX_BT; @@ -756,7 +754,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, struct iwl_bt_iterator_data *data = _data; struct iwl_mvm *mvm = data->mvm; struct ieee80211_chanctx_conf *chanctx_conf; - enum ieee80211_smps_mode smps_mode; + /* default smps_mode is AUTOMATIC - only used for client modes */ + enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC; u32 bt_activity_grading; int ave_rssi; @@ -764,8 +763,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, switch (vif->type) { case NL80211_IFTYPE_STATION: - /* default smps_mode for BSS / P2P client is AUTOMATIC */ - smps_mode = IEEE80211_SMPS_AUTOMATIC; break; case NL80211_IFTYPE_AP: if (!mvmvif->ap_ibss_active) @@ -797,7 +794,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, else if (bt_activity_grading >= BT_LOW_TRAFFIC) smps_mode = IEEE80211_SMPS_DYNAMIC; - /* relax SMPS contraints for next association */ + /* relax SMPS constraints for next association */ if (!vif->bss_conf.assoc) smps_mode = IEEE80211_SMPS_AUTOMATIC; diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c index 2e90ff795c13..87e517bffedc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c @@ -74,8 +74,7 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, switch (param) { case MVM_DEBUGFS_PM_KEEP_ALIVE: { - struct ieee80211_hw *hw = mvm->hw; - int dtimper = hw->conf.ps_dtim_period ?: 1; + int dtimper = vif->bss_conf.dtim_period ?: 1; int dtimper_msec = dtimper * vif->bss_conf.beacon_int; IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 95f5b3274efb..9a922f3bd16b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -1563,14 +1563,14 @@ enum iwl_sf_scenario { /** * Smart Fifo configuration command. - * @state: smart fifo state, types listed in iwl_sf_sate. + * @state: smart fifo state, types listed in enum %iwl_sf_sate. * @watermark: Minimum allowed availabe free space in RXF for transient state. * @long_delay_timeouts: aging and idle timer values for each scenario * in long delay state. * @full_on_timeouts: timer values for each scenario in full on state. */ struct iwl_sf_cfg_cmd { - enum iwl_sf_state state; + __le32 state; __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 0e523e28cabf..8242e689ddb1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -721,11 +721,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, !force_assoc_off) { u32 dtim_offs; - /* Allow beacons to pass through as long as we are not - * associated, or we do not have dtim period information. - */ - cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); - /* * The DTIM count counts down, so when it is N that means N * more beacon intervals happen until the DTIM TBTT. Therefore @@ -759,6 +754,11 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, ctxt_sta->is_assoc = cpu_to_le32(1); } else { ctxt_sta->is_assoc = cpu_to_le32(0); + + /* Allow beacons to pass through as long as we are not + * associated, or we do not have dtim period information. + */ + cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); } ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 7c8796584c25..cdc272d776e7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -396,12 +396,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) else hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; - /* TODO: enable that only for firmwares that don't crash */ - /* hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; */ - hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; - hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; - /* we create the 802.11 header and zero length SSID IE. */ - hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; + if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) { + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; + hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; + hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; + /* we create the 802.11 header and zero length SSID IE. */ + hw->wiphy->max_sched_scan_ie_len = + SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; + } hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | NL80211_FEATURE_LOW_PRIORITY_SCAN | @@ -1524,11 +1526,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, */ iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); - } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | - BSS_CHANGED_QOS)) { - ret = iwl_mvm_power_update_mac(mvm); - if (ret) - IWL_ERR(mvm, "failed to update power mode\n"); } if (changes & BSS_CHANGED_BEACON_INFO) { @@ -1536,6 +1533,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); } + if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) { + ret = iwl_mvm_power_update_mac(mvm); + if (ret) + IWL_ERR(mvm, "failed to update power mode\n"); + } + if (changes & BSS_CHANGED_TXPOWER) { IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", bss_conf->txpower); diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index 2b2d10800a55..d9769a23c68b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -281,7 +281,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_power_cmd *cmd) { - struct ieee80211_hw *hw = mvm->hw; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *chan; int dtimper, dtimper_msec; @@ -292,7 +291,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); - dtimper = hw->conf.ps_dtim_period ?: 1; + dtimper = vif->bss_conf.dtim_period; /* * Regardless of power management state the driver must set @@ -885,7 +884,7 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, iwl_mvm_power_build_cmd(mvm, vif, &cmd); if (enable) { /* configure skip over dtim up to 300 msec */ - int dtimper = mvm->hw->conf.ps_dtim_period ?: 1; + int dtimper = vif->bss_conf.dtim_period ?: 1; int dtimper_msec = dtimper * vif->bss_conf.beacon_int; if (WARN_ON(!dtimper_msec)) diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 4b98987fc413..bf5cd8c8b0f7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -149,13 +149,13 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> IWL_RX_INFO_ENERGY_ANT_A_POS; - energy_a = energy_a ? -energy_a : -256; + energy_a = energy_a ? -energy_a : S8_MIN; energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> IWL_RX_INFO_ENERGY_ANT_B_POS; - energy_b = energy_b ? -energy_b : -256; + energy_b = energy_b ? -energy_b : S8_MIN; energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> IWL_RX_INFO_ENERGY_ANT_C_POS; - energy_c = energy_c ? -energy_c : -256; + energy_c = energy_c ? -energy_c : S8_MIN; max_energy = max(energy_a, energy_b); max_energy = max(max_energy, energy_c); diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c index 7edfd15efc9d..e843b67f2201 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/iwlwifi/mvm/sf.c @@ -172,7 +172,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, enum iwl_sf_state new_state) { struct iwl_sf_cfg_cmd sf_cmd = { - .state = new_state, + .state = cpu_to_le32(new_state), }; struct ieee80211_sta *sta; int ret = 0; diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index dbc870713882..9ee410bf6da2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -168,10 +168,14 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, /* * for data packets, rate info comes from the table inside the fw. This - * table is controlled by LINK_QUALITY commands + * table is controlled by LINK_QUALITY commands. Exclude ctrl port + * frames like EAPOLs which should be treated as mgmt frames. This + * avoids them being sent initially in high rates which increases the + * chances for completion of the 4-Way handshake. */ - if (ieee80211_is_data(fc) && sta) { + if (ieee80211_is_data(fc) && sta && + !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); return; diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index f0e722ced080..073a68b97a72 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -352,11 +352,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)}, + {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, +/* 3165 Series */ + {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)}, + /* 7265 Series */ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, @@ -378,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, |