diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-03-31 09:08:13 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-03-31 09:08:13 +0200 |
commit | c5e77f5216abdd1d98e6d14d9a3eb4e88d80011a (patch) | |
tree | a542b5bb7d96a8f37c4d5e3319086064448ed67b /drivers/net | |
parent | de81e64b250d3865a75d221a80b4311e3273670a (diff) | |
parent | e42391cd048809d903291d07f86ed3934ce138e9 (diff) | |
download | linux-c5e77f5216abdd1d98e6d14d9a3eb4e88d80011a.tar.bz2 |
Merge tag 'v4.0-rc6' into timers/core, before applying new patches
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/net')
97 files changed, 1469 insertions, 849 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 84673ebcf428..df51d6025a90 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -157,7 +157,7 @@ config IPVLAN making it transparent to the connected L2 switch. Ipvlan devices can be added using the "ip" command from the - iproute2 package starting with the iproute2-X.Y.ZZ release: + iproute2 package starting with the iproute2-3.19 release: "ip link add link <main-dev> [ NAME ] type ipvlan" diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig index 4ce6ca5f3d36..dc6b78e5342f 100644 --- a/drivers/net/appletalk/Kconfig +++ b/drivers/net/appletalk/Kconfig @@ -40,7 +40,7 @@ config DEV_APPLETALK config LTPC tristate "Apple/Farallon LocalTalk PC support" - depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API + depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS help This allows you to use the AppleTalk PC card to connect to LocalTalk networks. The card is also known as the Farallon PhoneNet PC card. diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 98d73aab52fe..58808f651452 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -131,7 +131,7 @@ config CAN_RCAR config CAN_XILINXCAN tristate "Xilinx CAN" - depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST + depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST depends on COMMON_CLK && HAS_IOMEM ---help--- Xilinx CAN driver. This driver supports both soft AXI CAN IP and diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3c82e02e3dae..b0f69248cb71 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -579,6 +579,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + can_skb_reserve(skb); can_skb_prv(skb)->ifindex = dev->ifindex; @@ -603,6 +607,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + can_skb_reserve(skb); can_skb_prv(skb)->ifindex = dev->ifindex; diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 2928f7003041..e97a08ce0b90 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -14,6 +14,8 @@ * Copyright (C) 2015 Valeo S.A. */ +#include <linux/spinlock.h> +#include <linux/kernel.h> #include <linux/completion.h> #include <linux/module.h> #include <linux/netdevice.h> @@ -466,10 +468,11 @@ struct kvaser_usb { struct kvaser_usb_net_priv { struct can_priv can; - atomic_t active_tx_urbs; - struct usb_anchor tx_submitted; + spinlock_t tx_contexts_lock; + int active_tx_contexts; struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS]; + struct usb_anchor tx_submitted; struct completion start_comp, stop_comp; struct kvaser_usb *dev; @@ -584,8 +587,15 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, while (pos <= actual_len - MSG_HEADER_LEN) { tmp = buf + pos; - if (!tmp->len) - break; + /* Handle messages crossing the USB endpoint max packet + * size boundary. Check kvaser_usb_read_bulk_callback() + * for further details. + */ + if (tmp->len == 0) { + pos = round_up(pos, + dev->bulk_in->wMaxPacketSize); + continue; + } if (pos + tmp->len > actual_len) { dev_err(dev->udev->dev.parent, @@ -686,6 +696,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, struct kvaser_usb_net_priv *priv; struct sk_buff *skb; struct can_frame *cf; + unsigned long flags; u8 channel, tid; channel = msg->u.tx_acknowledge_header.channel; @@ -729,12 +740,15 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, stats->tx_packets++; stats->tx_bytes += context->dlc; - can_get_echo_skb(priv->netdev, context->echo_index); - context->echo_index = MAX_TX_URBS; - atomic_dec(&priv->active_tx_urbs); + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + can_get_echo_skb(priv->netdev, context->echo_index); + context->echo_index = MAX_TX_URBS; + --priv->active_tx_contexts; netif_wake_queue(priv->netdev); + + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); } static void kvaser_usb_simple_msg_callback(struct urb *urb) @@ -787,7 +801,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, netdev_err(netdev, "Error transmitting URB\n"); usb_unanchor_urb(urb); usb_free_urb(urb); - kfree(buf); return err; } @@ -796,17 +809,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, return 0; } -static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) -{ - int i; - - usb_kill_anchored_urbs(&priv->tx_submitted); - atomic_set(&priv->active_tx_urbs, 0); - - for (i = 0; i < MAX_TX_URBS; i++) - priv->tx_contexts[i].echo_index = MAX_TX_URBS; -} - static void kvaser_usb_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, const struct kvaser_usb_error_summary *es, struct can_frame *cf) @@ -1317,8 +1319,19 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) while (pos <= urb->actual_length - MSG_HEADER_LEN) { msg = urb->transfer_buffer + pos; - if (!msg->len) - break; + /* The Kvaser firmware can only read and write messages that + * does not cross the USB's endpoint wMaxPacketSize boundary. + * If a follow-up command crosses such boundary, firmware puts + * a placeholder zero-length command in its place then aligns + * the real command to the next max packet size. + * + * Handle such cases or we're going to miss a significant + * number of events in case of a heavy rx load on the bus. + */ + if (msg->len == 0) { + pos = round_up(pos, dev->bulk_in->wMaxPacketSize); + continue; + } if (pos + msg->len > urb->actual_length) { dev_err(dev->udev->dev.parent, "Format error\n"); @@ -1326,7 +1339,6 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) } kvaser_usb_handle_message(dev, msg); - pos += msg->len; } @@ -1498,6 +1510,24 @@ error: return err; } +static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) +{ + int i; + + priv->active_tx_contexts = 0; + for (i = 0; i < MAX_TX_URBS; i++) + priv->tx_contexts[i].echo_index = MAX_TX_URBS; +} + +/* This method might sleep. Do not call it in the atomic context + * of URB completions. + */ +static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) +{ + usb_kill_anchored_urbs(&priv->tx_submitted); + kvaser_usb_reset_tx_urb_contexts(priv); +} + static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) { int i; @@ -1615,9 +1645,9 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, struct urb *urb; void *buf; struct kvaser_msg *msg; - int i, err; - int ret = NETDEV_TX_OK; + int i, err, ret = NETDEV_TX_OK; u8 *msg_tx_can_flags = NULL; /* GCC */ + unsigned long flags; if (can_dropped_invalid_skb(netdev, skb)) return NETDEV_TX_OK; @@ -1634,7 +1664,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, if (!buf) { stats->tx_dropped++; dev_kfree_skb(skb); - goto nobufmem; + goto freeurb; } msg = buf; @@ -1671,22 +1701,32 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, if (cf->can_id & CAN_RTR_FLAG) *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; + spin_lock_irqsave(&priv->tx_contexts_lock, flags); for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { context = &priv->tx_contexts[i]; + + context->echo_index = i; + can_put_echo_skb(skb, netdev, context->echo_index); + ++priv->active_tx_contexts; + if (priv->active_tx_contexts >= MAX_TX_URBS) + netif_stop_queue(netdev); + break; } } + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); /* This should never happen; it implies a flow control bug */ if (!context) { netdev_warn(netdev, "cannot find free context\n"); + + kfree(buf); ret = NETDEV_TX_BUSY; - goto releasebuf; + goto freeurb; } context->priv = priv; - context->echo_index = i; context->dlc = cf->can_dlc; msg->u.tx_can.tid = context->echo_index; @@ -1698,18 +1738,17 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, kvaser_usb_write_bulk_callback, context); usb_anchor_urb(urb, &priv->tx_submitted); - can_put_echo_skb(skb, netdev, context->echo_index); - - atomic_inc(&priv->active_tx_urbs); - - if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) - netif_stop_queue(netdev); - err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { + spin_lock_irqsave(&priv->tx_contexts_lock, flags); + can_free_echo_skb(netdev, context->echo_index); + context->echo_index = MAX_TX_URBS; + --priv->active_tx_contexts; + netif_wake_queue(netdev); + + spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); - atomic_dec(&priv->active_tx_urbs); usb_unanchor_urb(urb); stats->tx_dropped++; @@ -1719,16 +1758,12 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, else netdev_warn(netdev, "Failed tx_urb %d\n", err); - goto releasebuf; + goto freeurb; } - usb_free_urb(urb); - - return NETDEV_TX_OK; + ret = NETDEV_TX_OK; -releasebuf: - kfree(buf); -nobufmem: +freeurb: usb_free_urb(urb); return ret; } @@ -1840,7 +1875,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf, struct kvaser_usb *dev = usb_get_intfdata(intf); struct net_device *netdev; struct kvaser_usb_net_priv *priv; - int i, err; + int err; err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); if (err) @@ -1854,19 +1889,17 @@ static int kvaser_usb_init_one(struct usb_interface *intf, priv = netdev_priv(netdev); + init_usb_anchor(&priv->tx_submitted); init_completion(&priv->start_comp); init_completion(&priv->stop_comp); - init_usb_anchor(&priv->tx_submitted); - atomic_set(&priv->active_tx_urbs, 0); - - for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) - priv->tx_contexts[i].echo_index = MAX_TX_URBS; - priv->dev = dev; priv->netdev = netdev; priv->channel = channel; + spin_lock_init(&priv->tx_contexts_lock); + kvaser_usb_reset_tx_urb_contexts(priv); + priv->can.state = CAN_STATE_STOPPED; priv->can.clock.freq = CAN_USB_CLOCK; priv->can.bittiming_const = &kvaser_usb_bittiming_const; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 962c3f027383..0bac0f14edc3 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -879,6 +879,10 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) pdev->usb_if = ppdev->usb_if; pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; + + /* do a copy of the ctrlmode[_supported] too */ + dev->can.ctrlmode = ppdev->dev.can.ctrlmode; + dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported; } pdev->usb_if->dev[dev->ctrl_idx] = dev; diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index ee9f650d5026..7b7053d3c5fa 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -105,8 +105,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \ { \ u32 indir, dir; \ spin_lock(&priv->indir_lock); \ - indir = reg_readl(priv, REG_DIR_DATA_READ); \ dir = __raw_readl(priv->name + off); \ + indir = reg_readl(priv, REG_DIR_DATA_READ); \ spin_unlock(&priv->indir_lock); \ return (u64)indir << 32 | dir; \ } \ diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index 7769c05543f1..ec6eac1f8c95 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev) link->open++; info->link_status = 0x00; - init_timer(&info->watchdog); - info->watchdog.function = ei_watchdog; - info->watchdog.data = (u_long)dev; - info->watchdog.expires = jiffies + HZ; - add_timer(&info->watchdog); + setup_timer(&info->watchdog, ei_watchdog, (u_long)dev); + mod_timer(&info->watchdog, jiffies + HZ); return ax_open(dev); } /* axnet_open */ diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index 9fb7b9d4fd6c..2777289a26c0 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev) info->phy_id = info->eth_phy; info->link_status = 0x00; - init_timer(&info->watchdog); - info->watchdog.function = ei_watchdog; - info->watchdog.data = (u_long)dev; - info->watchdog.expires = jiffies + HZ; - add_timer(&info->watchdog); + setup_timer(&info->watchdog, ei_watchdog, (u_long)dev); + mod_timer(&info->watchdog, jiffies + HZ); return ei_open(dev); } /* pcnet_open */ diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 760c72c6e2ac..6725dc00750b 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit) u16 pktlength; u16 pktstatus; - while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) { + while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) && + (count < limit)) { pktstatus = rxstatus >> 16; pktlength = rxstatus & 0xffff; @@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget) struct altera_tse_private *priv = container_of(napi, struct altera_tse_private, napi); int rxcomplete = 0; - int txcomplete = 0; unsigned long int flags; - txcomplete = tse_tx_complete(priv); + tse_tx_complete(priv); rxcomplete = tse_rx(priv, budget); - if (rxcomplete >= budget || txcomplete > 0) - return rxcomplete; + if (rxcomplete < budget) { - napi_gro_flush(napi, false); - __napi_complete(napi); + napi_gro_flush(napi, false); + __napi_complete(napi); - netdev_dbg(priv->dev, - "NAPI Complete, did %d packets with budget %d\n", - txcomplete+rxcomplete, budget); + netdev_dbg(priv->dev, + "NAPI Complete, did %d packets with budget %d\n", + rxcomplete, budget); - spin_lock_irqsave(&priv->rxdma_irq_lock, flags); - priv->dmaops->enable_rxirq(priv); - priv->dmaops->enable_txirq(priv); - spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); - return rxcomplete + txcomplete; + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + priv->dmaops->enable_rxirq(priv); + priv->dmaops->enable_txirq(priv); + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + } + return rxcomplete; } /* DMA TX & RX FIFO interrupt routing @@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id) { struct net_device *dev = dev_id; struct altera_tse_private *priv; - unsigned long int flags; if (unlikely(!dev)) { pr_err("%s: invalid dev pointer\n", __func__); @@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id) } priv = netdev_priv(dev); - /* turn off desc irqs and enable napi rx */ - spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + spin_lock(&priv->rxdma_irq_lock); + /* reset IRQs */ + priv->dmaops->clear_rxirq(priv); + priv->dmaops->clear_txirq(priv); + spin_unlock(&priv->rxdma_irq_lock); if (likely(napi_schedule_prep(&priv->napi))) { + spin_lock(&priv->rxdma_irq_lock); priv->dmaops->disable_rxirq(priv); priv->dmaops->disable_txirq(priv); + spin_unlock(&priv->rxdma_irq_lock); __napi_schedule(&priv->napi); } - /* reset IRQs */ - priv->dmaops->clear_rxirq(priv); - priv->dmaops->clear_txirq(priv); - - spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); return IRQ_HANDLED; } @@ -1399,7 +1398,7 @@ static int altera_tse_probe(struct platform_device *pdev) } if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", - &priv->rx_fifo_depth)) { + &priv->tx_fifo_depth)) { dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); ret = -ENXIO; goto err_free_netdev; diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 11d6e6561df1..15a8190a6f75 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) { struct pcnet32_private *lp; int i, media; - int fdx, mii, fset, dxsuflo; + int fdx, mii, fset, dxsuflo, sram; int chip_version; char *chipname; struct net_device *dev; @@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) } /* initialize variables */ - fdx = mii = fset = dxsuflo = 0; + fdx = mii = fset = dxsuflo = sram = 0; chip_version = (chip_version >> 12) & 0xffff; switch (chip_version) { @@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) chipname = "PCnet/FAST III 79C973"; /* PCI */ fdx = 1; mii = 1; + sram = 1; break; case 0x2626: chipname = "PCnet/Home 79C978"; /* PCI */ @@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) chipname = "PCnet/FAST III 79C975"; /* PCI */ fdx = 1; mii = 1; + sram = 1; break; case 0x2628: chipname = "PCnet/PRO 79C976"; @@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) dxsuflo = 1; } + /* + * The Am79C973/Am79C975 controllers come with 12K of SRAM + * which we can use for the Tx/Rx buffers but most importantly, + * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid + * Tx fifo underflows. + */ + if (sram) { + /* + * The SRAM is being configured in two steps. First we + * set the SRAM size in the BCR25:SRAM_SIZE bits. According + * to the datasheet, each bit corresponds to a 512-byte + * page so we can have at most 24 pages. The SRAM_SIZE + * holds the value of the upper 8 bits of the 16-bit SRAM size. + * The low 8-bits start at 0x00 and end at 0xff. So the + * address range is from 0x0000 up to 0x17ff. Therefore, + * the SRAM_SIZE is set to 0x17. The next step is to set + * the BCR26:SRAM_BND midway through so the Tx and Rx + * buffers can share the SRAM equally. + */ + a->write_bcr(ioaddr, 25, 0x17); + a->write_bcr(ioaddr, 26, 0xc); + /* And finally enable the NOUFLO bit */ + a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11)); + } + dev = alloc_etherdev(sizeof(*lp)); if (!dev) { ret = -ENOMEM; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index b93d4404d975..885b02b5be07 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) } } +static int xgbe_request_irqs(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + struct net_device *netdev = pdata->netdev; + unsigned int i; + int ret; + + ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, + netdev->name, pdata); + if (ret) { + netdev_alert(netdev, "error requesting irq %d\n", + pdata->dev_irq); + return ret; + } + + if (!pdata->per_channel_irq) + return 0; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + snprintf(channel->dma_irq_name, + sizeof(channel->dma_irq_name) - 1, + "%s-TxRx-%u", netdev_name(netdev), + channel->queue_index); + + ret = devm_request_irq(pdata->dev, channel->dma_irq, + xgbe_dma_isr, 0, + channel->dma_irq_name, channel); + if (ret) { + netdev_alert(netdev, "error requesting irq %d\n", + channel->dma_irq); + goto err_irq; + } + } + + return 0; + +err_irq: + /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ + for (i--, channel--; i < pdata->channel_count; i--, channel--) + devm_free_irq(pdata->dev, channel->dma_irq, channel); + + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + + return ret; +} + +static void xgbe_free_irqs(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + + if (!pdata->per_channel_irq) + return; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) + devm_free_irq(pdata->dev, channel->dma_irq, channel); +} + void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; @@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller) return -EINVAL; } - phy_stop(pdata->phydev); - spin_lock_irqsave(&pdata->lock, flags); if (caller == XGMAC_DRIVER_CONTEXT) netif_device_detach(netdev); netif_tx_stop_all_queues(netdev); - xgbe_napi_disable(pdata, 0); - /* Powerdown Tx/Rx */ hw_if->powerdown_tx(pdata); hw_if->powerdown_rx(pdata); + xgbe_napi_disable(pdata, 0); + + phy_stop(pdata->phydev); + pdata->power_down = 1; spin_unlock_irqrestore(&pdata->lock, flags); @@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller) phy_start(pdata->phydev); - /* Enable Tx/Rx */ + xgbe_napi_enable(pdata, 0); + hw_if->powerup_tx(pdata); hw_if->powerup_rx(pdata); if (caller == XGMAC_DRIVER_CONTEXT) netif_device_attach(netdev); - xgbe_napi_enable(pdata, 0); netif_tx_start_all_queues(netdev); spin_unlock_irqrestore(&pdata->lock, flags); @@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; struct net_device *netdev = pdata->netdev; + int ret; DBGPR("-->xgbe_start\n"); @@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata) phy_start(pdata->phydev); + xgbe_napi_enable(pdata, 1); + + ret = xgbe_request_irqs(pdata); + if (ret) + goto err_napi; + hw_if->enable_tx(pdata); hw_if->enable_rx(pdata); xgbe_init_tx_timers(pdata); - xgbe_napi_enable(pdata, 1); netif_tx_start_all_queues(netdev); DBGPR("<--xgbe_start\n"); return 0; + +err_napi: + xgbe_napi_disable(pdata, 1); + + phy_stop(pdata->phydev); + + hw_if->exit(pdata); + + return ret; } static void xgbe_stop(struct xgbe_prv_data *pdata) @@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) DBGPR("-->xgbe_stop\n"); - phy_stop(pdata->phydev); - netif_tx_stop_all_queues(netdev); - xgbe_napi_disable(pdata, 1); xgbe_stop_tx_timers(pdata); hw_if->disable_tx(pdata); hw_if->disable_rx(pdata); + xgbe_free_irqs(pdata); + + xgbe_napi_disable(pdata, 1); + + phy_stop(pdata->phydev); + + hw_if->exit(pdata); + channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) @@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) static void xgbe_restart_dev(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; - struct xgbe_hw_if *hw_if = &pdata->hw_if; - unsigned int i; - DBGPR("-->xgbe_restart_dev\n"); /* If not running, "restart" will happen on open */ @@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata) return; xgbe_stop(pdata); - synchronize_irq(pdata->dev_irq); - if (pdata->per_channel_irq) { - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) - synchronize_irq(channel->dma_irq); - } xgbe_free_tx_data(pdata); xgbe_free_rx_data(pdata); - /* Issue software reset to device */ - hw_if->exit(pdata); - xgbe_start(pdata); DBGPR("<--xgbe_restart_dev\n"); @@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, static int xgbe_open(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_desc_if *desc_if = &pdata->desc_if; - struct xgbe_channel *channel = NULL; - unsigned int i = 0; int ret; DBGPR("-->xgbe_open\n"); @@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev) INIT_WORK(&pdata->restart_work, xgbe_restart); INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); - /* Request interrupts */ - ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, - netdev->name, pdata); - if (ret) { - netdev_alert(netdev, "error requesting irq %d\n", - pdata->dev_irq); - goto err_rings; - } - - if (pdata->per_channel_irq) { - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - snprintf(channel->dma_irq_name, - sizeof(channel->dma_irq_name) - 1, - "%s-TxRx-%u", netdev_name(netdev), - channel->queue_index); - - ret = devm_request_irq(pdata->dev, channel->dma_irq, - xgbe_dma_isr, 0, - channel->dma_irq_name, channel); - if (ret) { - netdev_alert(netdev, - "error requesting irq %d\n", - channel->dma_irq); - goto err_irq; - } - } - } - ret = xgbe_start(pdata); if (ret) - goto err_start; + goto err_rings; DBGPR("<--xgbe_open\n"); return 0; -err_start: - hw_if->exit(pdata); - -err_irq: - if (pdata->per_channel_irq) { - /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ - for (i--, channel--; i < pdata->channel_count; i--, channel--) - devm_free_irq(pdata->dev, channel->dma_irq, channel); - } - - devm_free_irq(pdata->dev, pdata->dev_irq, pdata); - err_rings: desc_if->free_ring_resources(pdata); @@ -1399,30 +1424,16 @@ err_phy_init: static int xgbe_close(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_desc_if *desc_if = &pdata->desc_if; - struct xgbe_channel *channel; - unsigned int i; DBGPR("-->xgbe_close\n"); /* Stop the device */ xgbe_stop(pdata); - /* Issue software reset to device */ - hw_if->exit(pdata); - /* Free the ring descriptors and buffers */ desc_if->free_ring_resources(pdata); - /* Release the interrupts */ - devm_free_irq(pdata->dev, pdata->dev_irq, pdata); - if (pdata->per_channel_irq) { - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) - devm_free_irq(pdata->dev, channel->dma_irq, channel); - } - /* Free the channel and ring structures */ xgbe_free_channels(pdata); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 869d97fcf781..b927021c6c40 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -593,7 +593,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) if (!xgene_ring_mgr_init(pdata)) return -ENODEV; - if (!efi_enabled(EFI_BOOT)) { + if (pdata->clk) { clk_prepare_enable(pdata->clk); clk_disable_unprepare(pdata->clk); clk_prepare_enable(pdata->clk); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 4de62b210c85..635a83be7e5e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1025,6 +1025,8 @@ static int xgene_enet_remove(struct platform_device *pdev) #ifdef CONFIG_ACPI static const struct acpi_device_id xgene_enet_acpi_match[] = { { "APMC0D05", }, + { "APMC0D30", }, + { "APMC0D31", }, { } }; MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); @@ -1033,6 +1035,8 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); #ifdef CONFIG_OF static struct of_device_id xgene_enet_of_match[] = { {.compatible = "apm,xgene-enet",}, + {.compatible = "apm,xgene1-sgenet",}, + {.compatible = "apm,xgene1-xgenet",}, {}, }; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 21206d33b638..a7f2cc3e485e 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -486,7 +486,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) { struct bcm_enet_priv *priv; struct net_device *dev; - int tx_work_done, rx_work_done; + int rx_work_done; priv = container_of(napi, struct bcm_enet_priv, napi); dev = priv->net_dev; @@ -498,14 +498,14 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) ENETDMAC_IR, priv->tx_chan); /* reclaim sent skb */ - tx_work_done = bcm_enet_tx_reclaim(dev, 0); + bcm_enet_tx_reclaim(dev, 0); spin_lock(&priv->rx_lock); rx_work_done = bcm_enet_receive_queue(dev, budget); spin_unlock(&priv->rx_lock); - if (rx_work_done >= budget || tx_work_done > 0) { - /* rx/tx queue is not yet empty/clean */ + if (rx_work_done >= budget) { + /* rx queue is not yet empty/clean */ return rx_work_done; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 5b308a4a4d0e..783543ad1fcf 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { /* RBUF misc statistics */ STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), - STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), - STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed), - STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed), + STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), + STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), }; #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) @@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) s = &bcm_sysport_gstrings_stats[i]; switch (s->type) { case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_SOFT: continue; case BCM_SYSPORT_STAT_MIB_RX: case BCM_SYSPORT_STAT_MIB_TX: diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index fc19417d82a5..7e3d87a88c76 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -570,6 +570,7 @@ enum bcm_sysport_stat_type { BCM_SYSPORT_STAT_RUNT, BCM_SYSPORT_STAT_RXCHK, BCM_SYSPORT_STAT_RBUF, + BCM_SYSPORT_STAT_SOFT, }; /* Macros to help define ethtool statistics */ @@ -590,6 +591,7 @@ enum bcm_sysport_stat_type { #define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX) #define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX) #define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT) +#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT) #define STAT_RXCHK(str, m, ofs) { \ .stat_string = str, \ diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 676ffe093180..0469f72c6e7e 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -302,9 +302,6 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, slot->skb = skb; slot->dma_addr = dma_addr; - if (slot->dma_addr & 0xC0000000) - bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); - return 0; } @@ -505,8 +502,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) ring->mmio_base); goto err_dma_free; } - if (ring->dma_base & 0xC0000000) - bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); ring->unaligned = bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX); @@ -536,8 +531,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) err = -ENOMEM; goto err_dma_free; } - if (ring->dma_base & 0xC0000000) - bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); ring->unaligned = bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7155e1d2c208..996e215fc324 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12722,6 +12722,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); + /* Set PCIe reset type to fundamental for EEH recovery */ + pdev->needs_freset = 1; + /* AER (Advanced Error reporting) configuration */ rc = pci_enable_pcie_error_reporting(pdev); if (!rc) @@ -12766,7 +12769,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; - if (!CHIP_IS_E1x(bp)) { + if (!chip_is_e1x) { dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; dev->hw_enc_features = diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index ff83c46bc389..6befde61c203 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -487,6 +487,7 @@ enum bcmgenet_stat_type { BCMGENET_STAT_MIB_TX, BCMGENET_STAT_RUNT, BCMGENET_STAT_MISC, + BCMGENET_STAT_SOFT, }; struct bcmgenet_stats { @@ -515,6 +516,7 @@ struct bcmgenet_stats { #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) +#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) #define STAT_GENET_MISC(str, m, offset) { \ .stat_string = str, \ @@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { UMAC_RBUF_OVFL_CNT), STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), - STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), - STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed), - STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed), + STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), + STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), }; #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) @@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) s = &bcmgenet_gstrings_stats[i]; switch (s->type) { case BCMGENET_STAT_NETDEV: + case BCMGENET_STAT_SOFT: continue; case BCMGENET_STAT_MIB_RX: case BCMGENET_STAT_MIB_TX: @@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, } /* Unlocked version of the reclaim routine */ -static void __bcmgenet_tx_reclaim(struct net_device *dev, - struct bcmgenet_tx_ring *ring) +static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) { struct bcmgenet_priv *priv = netdev_priv(dev); int last_tx_cn, last_c_index, num_tx_bds; struct enet_cb *tx_cb_ptr; struct netdev_queue *txq; + unsigned int pkts_compl = 0; unsigned int bds_compl; unsigned int c_index; @@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, tx_cb_ptr = ring->cbs + last_c_index; bds_compl = 0; if (tx_cb_ptr->skb) { + pkts_compl++; bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; dev->stats.tx_bytes += tx_cb_ptr->skb->len; dma_unmap_single(&dev->dev, @@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, last_c_index &= (num_tx_bds - 1); } - if (ring->free_bds > (MAX_SKB_FRAGS + 1)) - ring->int_disable(priv, ring); - - if (netif_tx_queue_stopped(txq)) - netif_tx_wake_queue(txq); + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { + if (netif_tx_queue_stopped(txq)) + netif_tx_wake_queue(txq); + } ring->c_index = c_index; + + return pkts_compl; } -static void bcmgenet_tx_reclaim(struct net_device *dev, +static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, struct bcmgenet_tx_ring *ring) { + unsigned int released; unsigned long flags; spin_lock_irqsave(&ring->lock, flags); - __bcmgenet_tx_reclaim(dev, ring); + released = __bcmgenet_tx_reclaim(dev, ring); spin_unlock_irqrestore(&ring->lock, flags); + + return released; +} + +static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_tx_ring *ring = + container_of(napi, struct bcmgenet_tx_ring, napi); + unsigned int work_done = 0; + + work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring); + + if (work_done == 0) { + napi_complete(napi); + ring->int_enable(ring->priv, ring); + + return 0; + } + + return budget; } static void bcmgenet_tx_reclaim_all(struct net_device *dev) @@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) bcmgenet_tdma_ring_writel(priv, ring->index, ring->prod_index, TDMA_PROD_INDEX); - if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { + if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) netif_tx_stop_queue(txq); - ring->int_enable(priv, ring); - } out: spin_unlock_irqrestore(&ring->lock, flags); @@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv) struct device *kdev = &priv->pdev->dev; int ret; u32 reg, cpu_mask_clear; + int index; dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); @@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv) bcmgenet_intr_disable(priv); - cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; + cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE; dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); @@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv) bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); + for (index = 0; index < priv->hw_params->tx_queues; index++) + bcmgenet_intrl2_1_writel(priv, (1 << index), + INTRL2_CPU_MASK_CLEAR); + /* Enable rx/tx engine.*/ dev_dbg(kdev, "done init umac\n"); @@ -1693,6 +1723,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, unsigned int first_bd; spin_lock_init(&ring->lock); + ring->priv = priv; + netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); ring->index = index; if (index == DESC_INDEX) { ring->queue = 0; @@ -1738,6 +1770,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, TDMA_WRITE_PTR); bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, DMA_END_ADDR); + + napi_enable(&ring->napi); +} + +static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv, + unsigned int index) +{ + struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; + + napi_disable(&ring->napi); + netif_napi_del(&ring->napi); } /* Initialize a RDMA ring */ @@ -1907,7 +1950,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) return ret; } -static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) +static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv) { int i; @@ -1926,6 +1969,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) kfree(priv->tx_cbs); } +static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) +{ + int i; + + bcmgenet_fini_tx_ring(priv, DESC_INDEX); + + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_fini_tx_ring(priv, i); + + __bcmgenet_fini_dma(priv); +} + /* init_edma: Initialize DMA control register */ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) { @@ -1952,7 +2007,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), GFP_KERNEL); if (!priv->tx_cbs) { - bcmgenet_fini_dma(priv); + __bcmgenet_fini_dma(priv); return -ENOMEM; } @@ -1975,9 +2030,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget) struct bcmgenet_priv, napi); unsigned int work_done; - /* tx reclaim */ - bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); - work_done = bcmgenet_desc_rx(priv, budget); /* Advancing our consumer index*/ @@ -2022,28 +2074,34 @@ static void bcmgenet_irq_task(struct work_struct *work) static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) { struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_tx_ring *ring; unsigned int index; /* Save irq status for bottom-half processing. */ priv->irq1_stat = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & - ~priv->int1_mask; + ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); /* clear interrupts */ bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); netif_dbg(priv, intr, priv->dev, "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); + /* Check the MBDONE interrupts. * packet is done, reclaim descriptors */ - if (priv->irq1_stat & 0x0000ffff) { - index = 0; - for (index = 0; index < 16; index++) { - if (priv->irq1_stat & (1 << index)) - bcmgenet_tx_reclaim(priv->dev, - &priv->tx_rings[index]); + for (index = 0; index < priv->hw_params->tx_queues; index++) { + if (!(priv->irq1_stat & BIT(index))) + continue; + + ring = &priv->tx_rings[index]; + + if (likely(napi_schedule_prep(&ring->napi))) { + ring->int_disable(priv, ring); + __napi_schedule(&ring->napi); } } + return IRQ_HANDLED; } @@ -2075,8 +2133,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) } if (priv->irq0_stat & (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { - /* Tx reclaim */ - bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); + struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX]; + + if (likely(napi_schedule_prep(&ring->napi))) { + ring->int_disable(priv, ring); + __napi_schedule(&ring->napi); + } } if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | UMAC_IRQ_PHY_DET_F | diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index b36ddec0cc0a..0d370d168aee 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -520,6 +520,7 @@ struct bcmgenet_hw_params { struct bcmgenet_tx_ring { spinlock_t lock; /* ring lock */ + struct napi_struct napi; /* NAPI per tx queue */ unsigned int index; /* ring index */ unsigned int queue; /* queue index */ struct enet_cb *cbs; /* tx ring buffer control block*/ @@ -534,6 +535,7 @@ struct bcmgenet_tx_ring { struct bcmgenet_tx_ring *); void (*int_disable)(struct bcmgenet_priv *priv, struct bcmgenet_tx_ring *); + struct bcmgenet_priv *priv; }; /* device context */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 149a0d70c108..b97122926d3a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -73,15 +73,17 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) return -EINVAL; + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); if (wol->wolopts & WAKE_MAGICSECURE) { bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), UMAC_MPD_PW_MS); bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), UMAC_MPD_PW_LS); - reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); reg |= MPD_PW_EN; - bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } else { + reg &= ~MPD_PW_EN; } + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); /* Flag the device and relevant IRQ as wakeup capable */ if (wol->wolopts) { diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ad76b8e35a00..81d41539fcba 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -2113,17 +2113,17 @@ static const struct net_device_ops macb_netdev_ops = { }; #if defined(CONFIG_OF) -static struct macb_config pc302gem_config = { +static const struct macb_config pc302gem_config = { .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, .dma_burst_length = 16, }; -static struct macb_config sama5d3_config = { +static const struct macb_config sama5d3_config = { .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, .dma_burst_length = 16, }; -static struct macb_config sama5d4_config = { +static const struct macb_config sama5d4_config = { .caps = 0, .dma_burst_length = 4, }; @@ -2154,7 +2154,7 @@ static void macb_configure_caps(struct macb *bp) if (bp->pdev->dev.of_node) { match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); if (match && match->data) { - config = (const struct macb_config *)match->data; + config = match->data; bp->caps = config->caps; /* diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 31dc080f2437..ff85619a9732 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -351,7 +351,7 @@ /* Bitfields in MID */ #define MACB_IDNUM_OFFSET 16 -#define MACB_IDNUM_SIZE 16 +#define MACB_IDNUM_SIZE 12 #define MACB_REV_OFFSET 0 #define MACB_REV_SIZE 16 diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c index 9062a8434246..c308429dd9c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c @@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key) } static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr, - int addr_len) + u8 v6) { - return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) : - ipv6_clip_hash(ctbl, addr); + return v6 ? ipv6_clip_hash(ctbl, addr) : + ipv4_clip_hash(ctbl, addr); } static int clip6_get_mbox(const struct net_device *dev, @@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) struct clip_entry *ce, *cte; u32 *addr = (u32 *)lip; int hash; - int addr_len; - int ret = 0; + int ret = -1; if (!ctbl) return 0; - if (v6) - addr_len = 16; - else - addr_len = 4; - - hash = clip_addr_hash(ctbl, addr, addr_len); + hash = clip_addr_hash(ctbl, addr, v6); read_lock_bh(&ctbl->lock); list_for_each_entry(cte, &ctbl->hash_list[hash], list) { - if (addr_len == cte->addr_len && - memcmp(lip, cte->addr, cte->addr_len) == 0) { + if (cte->addr6.sin6_family == AF_INET6 && v6) + ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr, + sizeof(struct in6_addr)); + else if (cte->addr.sin_family == AF_INET && !v6) + ret = memcmp(lip, (char *)(&cte->addr.sin_addr), + sizeof(struct in_addr)); + if (!ret) { ce = cte; read_unlock_bh(&ctbl->lock); goto found; @@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) spin_lock_init(&ce->lock); atomic_set(&ce->refcnt, 0); atomic_dec(&ctbl->nfree); - ce->addr_len = addr_len; - memcpy(ce->addr, lip, addr_len); list_add_tail(&ce->list, &ctbl->hash_list[hash]); if (v6) { + ce->addr6.sin6_family = AF_INET6; + memcpy(ce->addr6.sin6_addr.s6_addr, + lip, sizeof(struct in6_addr)); ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); if (ret) { write_unlock_bh(&ctbl->lock); return ret; } + } else { + ce->addr.sin_family = AF_INET; + memcpy((char *)(&ce->addr.sin_addr), lip, + sizeof(struct in_addr)); } } else { write_unlock_bh(&ctbl->lock); @@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6) struct clip_entry *ce, *cte; u32 *addr = (u32 *)lip; int hash; - int addr_len; - - if (v6) - addr_len = 16; - else - addr_len = 4; + int ret = -1; - hash = clip_addr_hash(ctbl, addr, addr_len); + hash = clip_addr_hash(ctbl, addr, v6); read_lock_bh(&ctbl->lock); list_for_each_entry(cte, &ctbl->hash_list[hash], list) { - if (addr_len == cte->addr_len && - memcmp(lip, cte->addr, cte->addr_len) == 0) { + if (cte->addr6.sin6_family == AF_INET6 && v6) + ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr, + sizeof(struct in6_addr)); + else if (cte->addr.sin_family == AF_INET && !v6) + ret = memcmp(lip, (char *)(&cte->addr.sin_addr), + sizeof(struct in_addr)); + if (!ret) { ce = cte; read_unlock_bh(&ctbl->lock); goto found; @@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v) for (i = 0 ; i < ctbl->clipt_size; ++i) { list_for_each_entry(ce, &ctbl->hash_list[i], list) { ip[0] = '\0'; - if (ce->addr_len == 16) - sprintf(ip, "%pI6c", ce->addr); - else - sprintf(ip, "%pI4c", ce->addr); + sprintf(ip, "%pISc", &ce->addr); seq_printf(seq, "%-25s %u\n", ip, atomic_read(&ce->refcnt)); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h index 2eaba0161cf8..35eb43c6bcbb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h @@ -14,8 +14,10 @@ struct clip_entry { spinlock_t lock; /* Hold while modifying clip reference */ atomic_t refcnt; struct list_head list; - u32 addr[4]; - int addr_len; + union { + struct sockaddr_in addr; + struct sockaddr_in6 addr6; + }; }; struct clip_tbl { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index d6cda17efe6e..97842d03675b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); #define T4_MEMORY_WRITE 0 #define T4_MEMORY_READ 1 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, - __be32 *buf, int dir); + void *buf, int dir); static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, __be32 *buf) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 4d643b65265e..1abdfa123c6c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC * @addr: address within indicated memory type * @len: amount of memory to transfer - * @buf: host memory buffer + * @hbuf: host memory buffer * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) * * Reads/writes an [almost] arbitrary memory region in the firmware: the @@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) * caller's responsibility to perform appropriate byte order conversions. */ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, - u32 len, __be32 *buf, int dir) + u32 len, void *hbuf, int dir) { u32 pos, offset, resid, memoffset; u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; + u32 *buf; /* Argument sanity checks ... */ - if (addr & 0x3) + if (addr & 0x3 || (uintptr_t)hbuf & 0x3) return -EINVAL; + buf = (u32 *)hbuf; /* It's convenient to be able to handle lengths which aren't a * multiple of 32-bits because we often end up transferring files to @@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, /* Transfer data to/from the adapter as long as there's an integral * number of 32-bit transfers to complete. + * + * A note on Endianness issues: + * + * The "register" reads and writes below from/to the PCI-E Memory + * Window invoke the standard adapter Big-Endian to PCI-E Link + * Little-Endian "swizzel." As a result, if we have the following + * data in adapter memory: + * + * Memory: ... | b0 | b1 | b2 | b3 | ... + * Address: i+0 i+1 i+2 i+3 + * + * Then a read of the adapter memory via the PCI-E Memory Window + * will yield: + * + * x = readl(i) + * 31 0 + * [ b3 | b2 | b1 | b0 ] + * + * If this value is stored into local memory on a Little-Endian system + * it will show up correctly in local memory as: + * + * ( ..., b0, b1, b2, b3, ... ) + * + * But on a Big-Endian system, the store will show up in memory + * incorrectly swizzled as: + * + * ( ..., b3, b2, b1, b0, ... ) + * + * So we need to account for this in the reads and writes to the + * PCI-E Memory Window below by undoing the register read/write + * swizzels. */ while (len > 0) { if (dir == T4_MEMORY_READ) - *buf++ = (__force __be32) t4_read_reg(adap, - mem_base + offset); + *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap, + mem_base + offset)); else t4_write_reg(adap, mem_base + offset, - (__force u32) *buf++); + (__force u32)cpu_to_le32(*buf++)); offset += sizeof(__be32); len -= sizeof(__be32); @@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, */ if (resid) { union { - __be32 word; + u32 word; char byte[4]; } last; unsigned char *bp; int i; if (dir == T4_MEMORY_READ) { - last.word = (__force __be32) t4_read_reg(adap, - mem_base + offset); + last.word = le32_to_cpu( + (__force __le32)t4_read_reg(adap, + mem_base + offset)); for (bp = (unsigned char *)buf, i = resid; i < 4; i++) bp[i] = last.byte[i]; } else { @@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, for (i = resid; i < 4; i++) last.byte[i] = 0; t4_write_reg(adap, mem_base + offset, - (__force u32) last.word); + (__force u32)cpu_to_le32(last.word)); } } @@ -1086,7 +1120,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, } /* Installed successfully, update the cached header too. */ - memcpy(card_fw, fs_fw, sizeof(*card_fw)); + *card_fw = *fs_fw; card_fw_usable = 1; *reset = 0; /* already reset as part of load_fw */ } diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 9cbe038a388e..a5179bfcdc2c 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data) } if (ENIC_TEST_INTR(pba, notify_intr)) { - vnic_intr_return_all_credits(&enic->intr[notify_intr]); enic_notify_check(enic); + vnic_intr_return_all_credits(&enic->intr[notify_intr]); } if (ENIC_TEST_INTR(pba, err_intr)) { @@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data) struct enic *enic = data; unsigned int intr = enic_msix_notify_intr(enic); - vnic_intr_return_all_credits(&enic->intr[intr]); enic_notify_check(enic); + vnic_intr_return_all_credits(&enic->intr[intr]); return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 3b42556f7f8d..ed41559bae77 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -589,7 +589,7 @@ static void tulip_tx_timeout(struct net_device *dev) (unsigned int)tp->rx_ring[i].buffer1, (unsigned int)tp->rx_ring[i].buffer2, buf[0], buf[1], buf[2]); - for (j = 0; buf[j] != 0xee && j < 1600; j++) + for (j = 0; ((j < 1600) && buf[j] != 0xee); j++) if (j < 100) pr_cont(" %02x", buf[j]); pr_cont(" j=%d\n", j); diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 27de37aa90af..27b9fe99a9bd 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -354,6 +354,7 @@ struct be_vf_cfg { u16 vlan_tag; u32 tx_rate; u32 plink_tracking; + u32 privileges; }; enum vf_state { @@ -423,6 +424,7 @@ struct be_adapter { u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ u8 __iomem *db; /* Door Bell */ + u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ struct be_dma_mem mbox_mem; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 36916cfa70f9..7f05f309e935 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1902,15 +1902,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, { int num_eqs, i = 0; - if (lancer_chip(adapter) && num > 8) { - while (num) { - num_eqs = min(num, 8); - __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); - i += num_eqs; - num -= num_eqs; - } - } else { - __be_cmd_modify_eqd(adapter, set_eqd, num); + while (num) { + num_eqs = min(num, 8); + __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); + i += num_eqs; + num -= num_eqs; } return 0; @@ -1918,7 +1914,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, /* Uses sycnhronous mcc */ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, - u32 num) + u32 num, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_vlan_config *req; @@ -1936,6 +1932,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL); + req->hdr.domain = domain; req->interface_id = if_id; req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index db761e8e42a3..a7634a3f052a 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -2256,7 +2256,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, int be_cmd_get_fw_ver(struct be_adapter *adapter); int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, - u32 num); + u32 num, u32 domain); int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0a816859aca5..e6b790f0d9dc 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1171,7 +1171,7 @@ static int be_vid_config(struct be_adapter *adapter) for_each_set_bit(i, adapter->vids, VLAN_N_VID) vids[num++] = cpu_to_le16(i); - status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); + status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0); if (status) { dev_err(dev, "Setting HW VLAN filtering failed\n"); /* Set to VLAN promisc mode as setting VLAN filter failed */ @@ -1380,11 +1380,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf, return 0; } +static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan) +{ + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; + u16 vids[BE_NUM_VLANS_SUPPORTED]; + int vf_if_id = vf_cfg->if_handle; + int status; + + /* Enable Transparent VLAN Tagging */ + status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0); + if (status) + return status; + + /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */ + vids[0] = 0; + status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1); + if (!status) + dev_info(&adapter->pdev->dev, + "Cleared guest VLANs on VF%d", vf); + + /* After TVT is enabled, disallow VFs to program VLAN filters */ + if (vf_cfg->privileges & BE_PRIV_FILTMGMT) { + status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges & + ~BE_PRIV_FILTMGMT, vf + 1); + if (!status) + vf_cfg->privileges &= ~BE_PRIV_FILTMGMT; + } + return 0; +} + +static int be_clear_vf_tvt(struct be_adapter *adapter, int vf) +{ + struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; + struct device *dev = &adapter->pdev->dev; + int status; + + /* Reset Transparent VLAN Tagging. */ + status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1, + vf_cfg->if_handle, 0); + if (status) + return status; + + /* Allow VFs to program VLAN filtering */ + if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { + status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges | + BE_PRIV_FILTMGMT, vf + 1); + if (!status) { + vf_cfg->privileges |= BE_PRIV_FILTMGMT; + dev_info(dev, "VF%d: FILTMGMT priv enabled", vf); + } + } + + dev_info(dev, + "Disable/re-enable i/f in VM to clear Transparent VLAN tag"); + return 0; +} + static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) { struct be_adapter *adapter = netdev_priv(netdev); struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; - int status = 0; + int status; if (!sriov_enabled(adapter)) return -EPERM; @@ -1394,24 +1450,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) if (vlan || qos) { vlan |= qos << VLAN_PRIO_SHIFT; - if (vf_cfg->vlan_tag != vlan) - status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, - vf_cfg->if_handle, 0); + status = be_set_vf_tvt(adapter, vf, vlan); } else { - /* Reset Transparent Vlan Tagging. */ - status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, - vf + 1, vf_cfg->if_handle, 0); + status = be_clear_vf_tvt(adapter, vf); } if (status) { dev_err(&adapter->pdev->dev, - "VLAN %d config on VF %d failed : %#x\n", vlan, - vf, status); + "VLAN %d config on VF %d failed : %#x\n", vlan, vf, + status); return be_cmd_status(status); } vf_cfg->vlan_tag = vlan; - return 0; } @@ -2772,14 +2823,12 @@ void be_detect_error(struct be_adapter *adapter) } } } else { - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_LOW, &ue_lo); - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_HIGH, &ue_hi); - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); - pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); + ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW); + ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH); + ue_lo_mask = ioread32(adapter->pcicfg + + PCICFG_UE_STATUS_LOW_MASK); + ue_hi_mask = ioread32(adapter->pcicfg + + PCICFG_UE_STATUS_HI_MASK); ue_lo = (ue_lo & ~ue_lo_mask); ue_hi = (ue_hi & ~ue_hi_mask); @@ -3339,7 +3388,6 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle, u32 cap_flags, u32 vf) { u32 en_flags; - int status; en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | @@ -3347,10 +3395,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle, en_flags &= cap_flags; - status = be_cmd_if_create(adapter, cap_flags, en_flags, - if_handle, vf); - - return status; + return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf); } static int be_vfs_if_create(struct be_adapter *adapter) @@ -3368,8 +3413,13 @@ static int be_vfs_if_create(struct be_adapter *adapter) if (!BE3_chip(adapter)) { status = be_cmd_get_profile_config(adapter, &res, vf + 1); - if (!status) + if (!status) { cap_flags = res.if_cap_flags; + /* Prevent VFs from enabling VLAN promiscuous + * mode + */ + cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; + } } status = be_if_create(adapter, &vf_cfg->if_handle, @@ -3403,7 +3453,6 @@ static int be_vf_setup(struct be_adapter *adapter) struct device *dev = &adapter->pdev->dev; struct be_vf_cfg *vf_cfg; int status, old_vfs, vf; - u32 privileges; old_vfs = pci_num_vf(adapter->pdev); @@ -3433,15 +3482,18 @@ static int be_vf_setup(struct be_adapter *adapter) for_all_vfs(adapter, vf_cfg, vf) { /* Allow VFs to programs MAC/VLAN filters */ - status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1); - if (!status && !(privileges & BE_PRIV_FILTMGMT)) { + status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges, + vf + 1); + if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) { status = be_cmd_set_fn_privileges(adapter, - privileges | + vf_cfg->privileges | BE_PRIV_FILTMGMT, vf + 1); - if (!status) + if (!status) { + vf_cfg->privileges |= BE_PRIV_FILTMGMT; dev_info(dev, "VF%d has FILTMGMT privilege\n", vf); + } } /* Allow full available bandwidth */ @@ -4820,24 +4872,37 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter) static int be_map_pci_bars(struct be_adapter *adapter) { + struct pci_dev *pdev = adapter->pdev; u8 __iomem *addr; if (BEx_chip(adapter) && be_physfn(adapter)) { - adapter->csr = pci_iomap(adapter->pdev, 2, 0); + adapter->csr = pci_iomap(pdev, 2, 0); if (!adapter->csr) return -ENOMEM; } - addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); + addr = pci_iomap(pdev, db_bar(adapter), 0); if (!addr) goto pci_map_err; adapter->db = addr; + if (skyhawk_chip(adapter) || BEx_chip(adapter)) { + if (be_physfn(adapter)) { + /* PCICFG is the 2nd BAR in BE2 */ + addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0); + if (!addr) + goto pci_map_err; + adapter->pcicfg = addr; + } else { + adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; + } + } + be_roce_map_pci_bars(adapter); return 0; pci_map_err: - dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n"); + dev_err(&pdev->dev, "Error in mapping PCI BARs\n"); be_unmap_pci_bars(adapter); return -ENOMEM; } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9bb6220663b2..78e1ce09b1ab 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1189,13 +1189,12 @@ static void fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) { struct fec_enet_private *fep; - struct bufdesc *bdp, *bdp_t; + struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; struct fec_enet_priv_tx_q *txq; struct netdev_queue *nq; int index = 0; - int i, bdnum; int entries_free; fep = netdev_priv(ndev); @@ -1216,29 +1215,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) if (bdp == txq->cur_tx) break; - bdp_t = bdp; - bdnum = 1; - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep); - skb = txq->tx_skbuff[index]; - while (!skb) { - bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id); - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep); - skb = txq->tx_skbuff[index]; - bdnum++; - } - if (skb_shinfo(skb)->nr_frags && - (status = bdp_t->cbd_sc) & BD_ENET_TX_READY) - break; + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); - for (i = 0; i < bdnum; i++) { - if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - bdp->cbd_datlen, DMA_TO_DEVICE); - bdp->cbd_bufaddr = 0; - if (i < bdnum - 1) - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); - } + skb = txq->tx_skbuff[index]; txq->tx_skbuff[index] = NULL; + if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + bdp->cbd_datlen, DMA_TO_DEVICE); + bdp->cbd_bufaddr = 0; + if (!skb) { + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + continue; + } /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | @@ -1479,8 +1467,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) vlan_packet_rcvd = true; - skb_copy_to_linear_data_offset(skb, VLAN_HLEN, - data, (2 * ETH_ALEN)); + memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); skb_pull(skb, VLAN_HLEN); } @@ -1597,7 +1584,7 @@ fec_enet_interrupt(int irq, void *dev_id) writel(int_events, fep->hwp + FEC_IEVENT); fec_enet_collect_events(fep, int_events); - if (fep->work_tx || fep->work_rx) { + if ((fep->work_tx || fep->work_rx) && fep->link) { ret = IRQ_HANDLED; if (napi_schedule_prep(&fep->napi)) { @@ -3383,7 +3370,6 @@ fec_drv_remove(struct platform_device *pdev) regulator_disable(fep->reg_phy); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); - fec_enet_clk_enable(ndev, false); of_node_put(fep->phy_node); free_netdev(ndev); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 43df78882e48..7bf3682cdf47 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -747,6 +747,18 @@ static int gfar_parse_group(struct device_node *np, return 0; } +static int gfar_of_group_count(struct device_node *np) +{ + struct device_node *child; + int num = 0; + + for_each_available_child_of_node(np, child) + if (!of_node_cmp(child->name, "queue-group")) + num++; + + return num; +} + static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) { const char *model; @@ -784,7 +796,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) num_rx_qs = 1; } else { /* MQ_MG_MODE */ /* get the actual number of supported groups */ - unsigned int num_grps = of_get_available_child_count(np); + unsigned int num_grps = gfar_of_group_count(np); if (num_grps == 0 || num_grps > MAXGROUPS) { dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", @@ -851,7 +863,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) /* Parse and initialize group specific information */ if (priv->mode == MQ_MG_MODE) { - for_each_child_of_node(np, child) { + for_each_available_child_of_node(np, child) { + if (of_node_cmp(child->name, "queue-group")) + continue; + err = gfar_parse_group(child, priv, model); if (err) goto err_grp_init; @@ -3162,8 +3177,8 @@ static void adjust_link(struct net_device *dev) struct phy_device *phydev = priv->phydev; if (unlikely(phydev->link != priv->oldlink || - phydev->duplex != priv->oldduplex || - phydev->speed != priv->oldspeed)) + (phydev->link && (phydev->duplex != priv->oldduplex || + phydev->speed != priv->oldspeed)))) gfar_update_link_state(priv); } diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index e8a1adb7a962..c05e50759621 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev) device_remove_file(&dev->dev, &dev_attr_remove_port); } +static int ehea_reboot_notifier(struct notifier_block *nb, + unsigned long action, void *unused) +{ + if (action == SYS_RESTART) { + pr_info("Reboot: freeing all eHEA resources\n"); + ibmebus_unregister_driver(&ehea_driver); + } + return NOTIFY_DONE; +} + +static struct notifier_block ehea_reboot_nb = { + .notifier_call = ehea_reboot_notifier, +}; + +static int ehea_mem_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + int ret = NOTIFY_BAD; + struct memory_notify *arg = data; + + mutex_lock(&dlpar_mem_lock); + + switch (action) { + case MEM_CANCEL_OFFLINE: + pr_info("memory offlining canceled"); + /* Fall through: re-add canceled memory block */ + + case MEM_ONLINE: + pr_info("memory is going online"); + set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); + if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) + goto out_unlock; + ehea_rereg_mrs(); + break; + + case MEM_GOING_OFFLINE: + pr_info("memory is going offline"); + set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); + if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) + goto out_unlock; + ehea_rereg_mrs(); + break; + + default: + break; + } + + ehea_update_firmware_handles(); + ret = NOTIFY_OK; + +out_unlock: + mutex_unlock(&dlpar_mem_lock); + return ret; +} + +static struct notifier_block ehea_mem_nb = { + .notifier_call = ehea_mem_notifier, +}; + +static void ehea_crash_handler(void) +{ + int i; + + if (ehea_fw_handles.arr) + for (i = 0; i < ehea_fw_handles.num_entries; i++) + ehea_h_free_resource(ehea_fw_handles.arr[i].adh, + ehea_fw_handles.arr[i].fwh, + FORCE_FREE); + + if (ehea_bcmc_regs.arr) + for (i = 0; i < ehea_bcmc_regs.num_entries; i++) + ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh, + ehea_bcmc_regs.arr[i].port_id, + ehea_bcmc_regs.arr[i].reg_type, + ehea_bcmc_regs.arr[i].macaddr, + 0, H_DEREG_BCMC); +} + +static atomic_t ehea_memory_hooks_registered; + +/* Register memory hooks on probe of first adapter */ +static int ehea_register_memory_hooks(void) +{ + int ret = 0; + + if (atomic_inc_and_test(&ehea_memory_hooks_registered)) + return 0; + + ret = ehea_create_busmap(); + if (ret) { + pr_info("ehea_create_busmap failed\n"); + goto out; + } + + ret = register_reboot_notifier(&ehea_reboot_nb); + if (ret) { + pr_info("register_reboot_notifier failed\n"); + goto out; + } + + ret = register_memory_notifier(&ehea_mem_nb); + if (ret) { + pr_info("register_memory_notifier failed\n"); + goto out2; + } + + ret = crash_shutdown_register(ehea_crash_handler); + if (ret) { + pr_info("crash_shutdown_register failed\n"); + goto out3; + } + + return 0; + +out3: + unregister_memory_notifier(&ehea_mem_nb); +out2: + unregister_reboot_notifier(&ehea_reboot_nb); +out: + return ret; +} + +static void ehea_unregister_memory_hooks(void) +{ + if (atomic_read(&ehea_memory_hooks_registered)) + return; + + unregister_reboot_notifier(&ehea_reboot_nb); + if (crash_shutdown_unregister(ehea_crash_handler)) + pr_info("failed unregistering crash handler\n"); + unregister_memory_notifier(&ehea_mem_nb); +} + static int ehea_probe_adapter(struct platform_device *dev) { struct ehea_adapter *adapter; @@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev) int ret; int i; + ret = ehea_register_memory_hooks(); + if (ret) + return ret; + if (!dev || !dev->dev.of_node) { pr_err("Invalid ibmebus device probed\n"); return -EINVAL; @@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev) return 0; } -static void ehea_crash_handler(void) -{ - int i; - - if (ehea_fw_handles.arr) - for (i = 0; i < ehea_fw_handles.num_entries; i++) - ehea_h_free_resource(ehea_fw_handles.arr[i].adh, - ehea_fw_handles.arr[i].fwh, - FORCE_FREE); - - if (ehea_bcmc_regs.arr) - for (i = 0; i < ehea_bcmc_regs.num_entries; i++) - ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh, - ehea_bcmc_regs.arr[i].port_id, - ehea_bcmc_regs.arr[i].reg_type, - ehea_bcmc_regs.arr[i].macaddr, - 0, H_DEREG_BCMC); -} - -static int ehea_mem_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - int ret = NOTIFY_BAD; - struct memory_notify *arg = data; - - mutex_lock(&dlpar_mem_lock); - - switch (action) { - case MEM_CANCEL_OFFLINE: - pr_info("memory offlining canceled"); - /* Readd canceled memory block */ - case MEM_ONLINE: - pr_info("memory is going online"); - set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); - if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) - goto out_unlock; - ehea_rereg_mrs(); - break; - case MEM_GOING_OFFLINE: - pr_info("memory is going offline"); - set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); - if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) - goto out_unlock; - ehea_rereg_mrs(); - break; - default: - break; - } - - ehea_update_firmware_handles(); - ret = NOTIFY_OK; - -out_unlock: - mutex_unlock(&dlpar_mem_lock); - return ret; -} - -static struct notifier_block ehea_mem_nb = { - .notifier_call = ehea_mem_notifier, -}; - -static int ehea_reboot_notifier(struct notifier_block *nb, - unsigned long action, void *unused) -{ - if (action == SYS_RESTART) { - pr_info("Reboot: freeing all eHEA resources\n"); - ibmebus_unregister_driver(&ehea_driver); - } - return NOTIFY_DONE; -} - -static struct notifier_block ehea_reboot_nb = { - .notifier_call = ehea_reboot_notifier, -}; - static int check_module_parm(void) { int ret = 0; @@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void) if (ret) goto out; - ret = ehea_create_busmap(); - if (ret) - goto out; - - ret = register_reboot_notifier(&ehea_reboot_nb); - if (ret) - pr_info("failed registering reboot notifier\n"); - - ret = register_memory_notifier(&ehea_mem_nb); - if (ret) - pr_info("failed registering memory remove notifier\n"); - - ret = crash_shutdown_register(ehea_crash_handler); - if (ret) - pr_info("failed registering crash handler\n"); - ret = ibmebus_register_driver(&ehea_driver); if (ret) { pr_err("failed registering eHEA device driver on ebus\n"); - goto out2; + goto out; } ret = driver_create_file(&ehea_driver.driver, @@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void) if (ret) { pr_err("failed to register capabilities attribute, ret=%d\n", ret); - goto out3; + goto out2; } return ret; -out3: - ibmebus_unregister_driver(&ehea_driver); out2: - unregister_memory_notifier(&ehea_mem_nb); - unregister_reboot_notifier(&ehea_reboot_nb); - crash_shutdown_unregister(ehea_crash_handler); + ibmebus_unregister_driver(&ehea_driver); out: return ret; } static void __exit ehea_module_exit(void) { - int ret; - driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); ibmebus_unregister_driver(&ehea_driver); - unregister_reboot_notifier(&ehea_reboot_nb); - ret = crash_shutdown_unregister(ehea_crash_handler); - if (ret) - pr_info("failed unregistering crash handler\n"); - unregister_memory_notifier(&ehea_mem_nb); + ehea_unregister_memory_hooks(); kfree(ehea_fw_handles.arr); kfree(ehea_bcmc_regs.arr); ehea_destroy_busmap(); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 21978cc019e7..cd7675ac5bf9 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1136,6 +1136,8 @@ restart_poll: ibmveth_replenish_task(adapter); if (frames_processed < budget) { + napi_complete(napi); + /* We think we are done - reenable interrupts, * then check once more to make sure we are done. */ @@ -1144,8 +1146,6 @@ restart_poll: BUG_ON(lpar_rc != H_SUCCESS); - napi_complete(napi); - if (ibmveth_rxq_pending_buffer(adapter) && napi_reschedule(napi)) { lpar_rc = h_vio_signal(adapter->vdev->unit_address, @@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) return ret; } +static int ibmveth_set_mac_addr(struct net_device *dev, void *p) +{ + struct ibmveth_adapter *adapter = netdev_priv(dev); + struct sockaddr *addr = p; + u64 mac_address; + int rc; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + mac_address = ibmveth_encode_mac_addr(addr->sa_data); + rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address); + if (rc) { + netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc); + return rc; + } + + ether_addr_copy(dev->dev_addr, addr->sa_data); + + return 0; +} + static const struct net_device_ops ibmveth_netdev_ops = { .ndo_open = ibmveth_open, .ndo_stop = ibmveth_close, @@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = { .ndo_fix_features = ibmveth_fix_features, .ndo_set_features = ibmveth_set_features, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = ibmveth_set_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ibmveth_poll_controller, #endif diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 11a9ffebf8d8..6aea65dae5ed 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -868,8 +868,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) * The grst delay value is in 100ms units, and we'll wait a * couple counts longer to be sure we don't just miss the end. */ - grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK - >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; + grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & + I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> + I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; for (cnt = 0; cnt < grst_del + 2; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) @@ -2846,7 +2847,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - if (!status) + if (!status && filter_index) *filter_index = resp->index; return status; diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 183dcb63ce98..a11c70ca5a28 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay) u32 val; val = rd32(hw, I40E_PRTDCB_GENC); - *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >> + *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >> I40E_PRTDCB_GENC_PFCLDA_SHIFT); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 61236f983971..c17ee77100d3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -989,8 +989,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, if (!cmd_buf) return count; bytes_not_copied = copy_from_user(cmd_buf, buffer, count); - if (bytes_not_copied < 0) + if (bytes_not_copied < 0) { + kfree(cmd_buf); return bytes_not_copied; + } if (bytes_not_copied > 0) count -= bytes_not_copied; cmd_buf[count] = '\0'; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index cbe281be1c9f..dadda3c5d658 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1512,7 +1512,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, vsi->tc_config.numtc = numtc; vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; /* Number of queues per enabled TC */ - num_tc_qps = vsi->alloc_queue_pairs/numtc; + /* In MFP case we can have a much lower count of MSIx + * vectors available and so we need to lower the used + * q count. + */ + qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); + num_tc_qps = qcount / numtc; num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); /* Setup queue offset/count for all TCs for given VSI */ @@ -2684,8 +2689,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) u16 qoffset, qcount; int i, n; - if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) - return; + if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { + /* Reset the TC information */ + for (i = 0; i < vsi->num_queue_pairs; i++) { + rx_ring = vsi->rx_rings[i]; + tx_ring = vsi->tx_rings[i]; + rx_ring->dcb_tc = 0; + tx_ring->dcb_tc = 0; + } + } for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { if (!(vsi->tc_config.enabled_tc & (1 << n))) @@ -3830,6 +3842,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) { int i; + i40e_stop_misc_vector(pf); + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + synchronize_irq(pf->msix_entries[0].vector); + free_irq(pf->msix_entries[0].vector, pf); + } + i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i]) @@ -5254,8 +5272,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* Wait for the PF's Tx queues to be disabled */ ret = i40e_pf_wait_txq_disabled(pf); - if (!ret) + if (ret) { + /* Schedule PF reset to recover */ + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + i40e_service_event_schedule(pf); + } else { i40e_pf_unquiesce_all_vsi(pf); + } + exit: return ret; } @@ -5587,7 +5611,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf) int i, v; /* If we're down or resetting, just bail */ - if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) + if (test_bit(__I40E_DOWN, &pf->state) || + test_bit(__I40E_CONFIG_BUSY, &pf->state)) return; /* for each VSI/netdev @@ -9533,6 +9558,7 @@ static void i40e_remove(struct pci_dev *pdev) set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); + i40e_fdir_teardown(pf); if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { i40e_free_vfs(pf); @@ -9559,12 +9585,6 @@ static void i40e_remove(struct pci_dev *pdev) if (pf->vsi[pf->lan_vsi]) i40e_vsi_release(pf->vsi[pf->lan_vsi]); - i40e_stop_misc_vector(pf); - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { - synchronize_irq(pf->msix_entries[0].vector); - free_irq(pf->msix_entries[0].vector, pf); - } - /* shutdown and destroy the HMC */ if (pf->hw.hmc.hmc_obj) { ret_code = i40e_shutdown_lan_hmc(&pf->hw); @@ -9718,6 +9738,8 @@ static void i40e_shutdown(struct pci_dev *pdev) wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + i40e_clear_interrupt_scheme(pf); + if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, pf->wol_en); pci_set_power_state(pdev, PCI_D3hot); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 3e70f2e45a47..5defe0d63514 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -679,9 +679,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, { i40e_status status; enum i40e_nvmupd_cmd upd_cmd; + bool retry_attempt = false; upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); +retry: switch (upd_cmd) { case I40E_NVMUPD_WRITE_CON: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); @@ -725,6 +727,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, *errno = -ESRCH; break; } + + /* In some circumstances, a multi-write transaction takes longer + * than the default 3 minute timeout on the write semaphore. If + * the write failed with an EBUSY status, this is likely the problem, + * so here we try to reacquire the semaphore then retry the write. + * We only do one retry, then give up. + */ + if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && + !retry_attempt) { + i40e_status old_status = status; + u32 old_asq_status = hw->aq.asq_last_status; + u32 gtime; + + gtime = rd32(hw, I40E_GLVFGEN_TIMER); + if (gtime >= hw->nvm.hw_semaphore_timeout) { + i40e_debug(hw, I40E_DEBUG_ALL, + "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", + gtime, hw->nvm.hw_semaphore_timeout); + i40e_release_nvm(hw); + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "NVMUPD: write semaphore reacquire failed aq_err = %d\n", + hw->aq.asq_last_status); + status = old_status; + hw->aq.asq_last_status = old_asq_status; + } else { + retry_attempt = true; + goto retry; + } + } + } + return status; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 2206d2d36f0f..bbf1b1247ac4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -586,6 +586,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) } /** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** * i40e_get_tx_pending - how many tx descriptors not processed * @tx_ring: the ring of descriptors * @@ -594,10 +608,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) **/ static u32 i40e_get_tx_pending(struct i40e_ring *ring) { - u32 ntu = ((ring->next_to_clean <= ring->next_to_use) - ? ring->next_to_use - : ring->next_to_use + ring->count); - return ntu - ring->next_to_clean; + u32 head, tail; + + head = i40e_get_head(ring); + tail = readl(ring->tail); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; } /** @@ -606,6 +626,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) **/ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) { + u32 tx_done = tx_ring->stats.packets; + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; u32 tx_pending = i40e_get_tx_pending(tx_ring); struct i40e_pf *pf = tx_ring->vsi->back; bool ret = false; @@ -623,41 +645,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) * run the check_tx_hang logic with a transmit completion * pending but without time to complete it yet. */ - if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && - (tx_pending >= I40E_MIN_DESC_PENDING)) { + if ((tx_done_old == tx_done) && tx_pending) { /* make sure it is true for two checks in a row */ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); - } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && - (tx_pending < I40E_MIN_DESC_PENDING) && - (tx_pending > 0)) { + } else if (tx_done_old == tx_done && + (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { if (I40E_DEBUG_FLOW & pf->hw.debug_mask) dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", tx_pending, tx_ring->queue_index); pf->tx_sluggish_count++; } else { /* update completed stats and disarm the hang check */ - tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; + tx_ring->tx_stats.tx_done_old = tx_done; clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); } return ret; } -/** - * i40e_get_head - Retrieve head from head writeback - * @tx_ring: tx ring to fetch head of - * - * Returns value of Tx ring head based on value stored - * in head write-back location - **/ -static inline u32 i40e_get_head(struct i40e_ring *tx_ring) -{ - void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; - - return le32_to_cpu(*(volatile __le32 *)head); -} - #define WB_STRIDE 0x3 /** @@ -2140,6 +2146,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) } /** + * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @tx_flags: collected send information + * @hdr_len: size of the packet header + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + **/ +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, + const u8 hdr_len) +{ + struct skb_frag_struct *frag; + bool linearize = false; + unsigned int size = 0; + u16 num_frags; + u16 gso_segs; + + num_frags = skb_shinfo(skb)->nr_frags; + gso_segs = skb_shinfo(skb)->gso_segs; + + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { + u16 j = 1; + + if (num_frags < (I40E_MAX_BUFFER_TXD)) + goto linearize_chk_done; + /* try the simple math, if we have too many frags per segment */ + if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > + I40E_MAX_BUFFER_TXD) { + linearize = true; + goto linearize_chk_done; + } + frag = &skb_shinfo(skb)->frags[0]; + size = hdr_len; + /* we might still have more fragments per segment */ + do { + size += skb_frag_size(frag); + frag++; j++; + if (j == I40E_MAX_BUFFER_TXD) { + if (size < skb_shinfo(skb)->gso_size) { + linearize = true; + break; + } + j = 1; + size -= skb_shinfo(skb)->gso_size; + if (size) + j++; + size += hdr_len; + } + num_frags--; + } while (num_frags); + } else { + if (num_frags >= I40E_MAX_BUFFER_TXD) + linearize = true; + } + +linearize_chk_done: + return linearize; +} + +/** * i40e_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on * @skb: send buffer @@ -2396,6 +2463,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (tsyn) tx_flags |= I40E_TX_FLAGS_TSYN; + if (i40e_chk_linearize(skb, tx_flags, hdr_len)) + if (skb_linearize(skb)) + goto out_drop; + skb_tx_timestamp(skb); /* always enable CRC insertion offload */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 18b00231d2f1..dff0baeb1ecc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -112,6 +112,7 @@ enum i40e_dyn_idx_t { #define i40e_rx_desc i40e_32byte_rx_desc +#define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 #define I40E_MAX_DATA_PER_TXD 8192 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 29004382f462..708891571dae 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -126,6 +126,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) } /** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** * i40e_get_tx_pending - how many tx descriptors not processed * @tx_ring: the ring of descriptors * @@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) **/ static u32 i40e_get_tx_pending(struct i40e_ring *ring) { - u32 ntu = ((ring->next_to_clean <= ring->next_to_use) - ? ring->next_to_use - : ring->next_to_use + ring->count); - return ntu - ring->next_to_clean; + u32 head, tail; + + head = i40e_get_head(ring); + tail = readl(ring->tail); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; } /** @@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) **/ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) { + u32 tx_done = tx_ring->stats.packets; + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; u32 tx_pending = i40e_get_tx_pending(tx_ring); bool ret = false; @@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) * run the check_tx_hang logic with a transmit completion * pending but without time to complete it yet. */ - if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && - (tx_pending >= I40E_MIN_DESC_PENDING)) { + if ((tx_done_old == tx_done) && tx_pending) { /* make sure it is true for two checks in a row */ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); - } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) || - !(tx_pending < I40E_MIN_DESC_PENDING) || - !(tx_pending > 0)) { + } else if (tx_done_old == tx_done && + (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { /* update completed stats and disarm the hang check */ - tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; + tx_ring->tx_stats.tx_done_old = tx_done; clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); } return ret; } -/** - * i40e_get_head - Retrieve head from head writeback - * @tx_ring: tx ring to fetch head of - * - * Returns value of Tx ring head based on value stored - * in head write-back location - **/ -static inline u32 i40e_get_head(struct i40e_ring *tx_ring) -{ - void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; - - return le32_to_cpu(*(volatile __le32 *)head); -} - #define WB_STRIDE 0x3 /** @@ -1206,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, if (err < 0) return err; - if (protocol == htons(ETH_P_IP)) { - iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + + if (iph->version == 4) { tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); iph->tot_len = 0; iph->check = 0; tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); - } else if (skb_is_gso_v6(skb)) { - - ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) - : ipv6_hdr(skb); + } else if (ipv6h->version == 6) { tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); ipv6h->payload_len = 0; tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, @@ -1274,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } } else if (tx_flags & I40E_TX_FLAGS_IPV6) { - if (tx_flags & I40E_TX_FLAGS_TSO) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + if (tx_flags & I40E_TX_FLAGS_TSO) ip_hdr(skb)->check = 0; - } else { - *cd_tunneling |= - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; - } } /* Now set the ctx descriptor fields */ @@ -1290,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, ((skb_inner_network_offset(skb) - skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; + if (this_ip_hdr->version == 6) { + tx_flags &= ~I40E_TX_FLAGS_IPV4; + tx_flags |= I40E_TX_FLAGS_IPV6; + } + } else { network_hdr_len = skb_network_header_len(skb); @@ -1380,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); } + /** + * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @tx_flags: collected send information + * @hdr_len: size of the packet header + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + **/ +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, + const u8 hdr_len) +{ + struct skb_frag_struct *frag; + bool linearize = false; + unsigned int size = 0; + u16 num_frags; + u16 gso_segs; + + num_frags = skb_shinfo(skb)->nr_frags; + gso_segs = skb_shinfo(skb)->gso_segs; + + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { + u16 j = 1; + + if (num_frags < (I40E_MAX_BUFFER_TXD)) + goto linearize_chk_done; + /* try the simple math, if we have too many frags per segment */ + if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > + I40E_MAX_BUFFER_TXD) { + linearize = true; + goto linearize_chk_done; + } + frag = &skb_shinfo(skb)->frags[0]; + size = hdr_len; + /* we might still have more fragments per segment */ + do { + size += skb_frag_size(frag); + frag++; j++; + if (j == I40E_MAX_BUFFER_TXD) { + if (size < skb_shinfo(skb)->gso_size) { + linearize = true; + break; + } + j = 1; + size -= skb_shinfo(skb)->gso_size; + if (size) + j++; + size += hdr_len; + } + num_frags--; + } while (num_frags); + } else { + if (num_frags >= I40E_MAX_BUFFER_TXD) + linearize = true; + } + +linearize_chk_done: + return linearize; +} + /** * i40e_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on @@ -1654,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (tso) tx_flags |= I40E_TX_FLAGS_TSO; + if (i40e_chk_linearize(skb, tx_flags, hdr_len)) + if (skb_linearize(skb)) + goto out_drop; + skb_tx_timestamp(skb); /* always enable CRC insertion offload */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 4e15903b2b6d..c950a038237c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -112,6 +112,7 @@ enum i40e_dyn_idx_t { #define i40e_rx_desc i40e_32byte_rx_desc +#define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 #define I40E_MAX_DATA_PER_TXD 8192 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 2a210c4efb89..ebce5bb24df9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1698,8 +1698,6 @@ int mlx4_en_start_port(struct net_device *dev) /* Schedule multicast task to populate multicast list */ queue_work(mdev->workqueue, &priv->rx_mode_task); - mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); - #ifdef CONFIG_MLX4_EN_VXLAN if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) vxlan_get_rx_port(dev); @@ -2853,6 +2851,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); + mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); + return 0; out: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 2d8ee66138e8..a61009f4b2df 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) { u32 loopback_ok = 0; int i; - + bool gro_enabled; priv->loopback_ok = 0; priv->validate_loopback = 1; + gro_enabled = priv->dev->features & NETIF_F_GRO; mlx4_en_update_loopback_state(priv->dev, priv->dev->features); + priv->dev->features &= ~NETIF_F_GRO; /* xmit */ if (mlx4_en_test_loopback_xmit(priv)) { @@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) mlx4_en_test_loopback_exit: priv->validate_loopback = 0; + + if (gro_enabled) + priv->dev->features |= NETIF_F_GRO; + mlx4_en_update_loopback_state(priv->dev, priv->dev->features); return !loopback_ok; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 2a8268e6be15..ebbe244e80dd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -453,7 +453,7 @@ struct mlx4_en_port_stats { unsigned long rx_chksum_none; unsigned long rx_chksum_complete; unsigned long tx_chksum_offload; -#define NUM_PORT_STATS 9 +#define NUM_PORT_STATS 10 }; struct mlx4_en_perf_stats { diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 2bb8553bd905..eda29dbbfcd2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -412,7 +412,6 @@ err_icm: EXPORT_SYMBOL_GPL(mlx4_qp_alloc); -#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, enum mlx4_update_qp_attr attr, struct mlx4_update_qp_params *params) diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 486e3d26cd4a..d97ca88c55b5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev, struct mlx4_vport_oper_state *vp_oper; struct mlx4_priv *priv; u32 qp_type; - int port; + int port, err = 0; port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; priv = mlx4_priv(dev); @@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev, } else { struct mlx4_update_qp_params params = {.flags = 0}; - mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); + err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); + if (err) + goto out; } } @@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev, qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; } - return 0; +out: + return err; } static int mpt_mask(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 44e8d7d25547..57a6e6cd74fc 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev) if (mac->phydev) phy_start(mac->phydev); - init_timer(&mac->tx->clean_timer); - mac->tx->clean_timer.function = pasemi_mac_tx_timer; - mac->tx->clean_timer.data = (unsigned long)mac->tx; - mac->tx->clean_timer.expires = jiffies+HZ; - add_timer(&mac->tx->clean_timer); + setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer, + (unsigned long)mac->tx); + mod_timer(&mac->tx->clean_timer, jiffies + HZ); return 0; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index 6e426ae94692..0a5e204a0179 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -354,7 +354,7 @@ struct cmd_desc_type0 { } __attribute__ ((aligned(64))); -/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ +/* Note: sizeof(rcv_desc) should always be a multiple of 2 */ struct rcv_desc { __le16 reference_handle; __le16 reserved; @@ -499,7 +499,7 @@ struct uni_data_desc{ #define NETXEN_IMAGE_START 0x43000 /* compressed image */ #define NETXEN_SECONDARY_START 0x200000 /* backup images */ #define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ -#define NETXEN_USER_START 0x3E8000 /* Firmare info */ +#define NETXEN_USER_START 0x3E8000 /* Firmware info */ #define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ #define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index fa4317611fd6..f221126a5c4e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -314,7 +314,7 @@ struct qlcnic_fdt { #define QLCNIC_BRDCFG_START 0x4000 /* board config */ #define QLCNIC_BOOTLD_START 0x10000 /* bootld */ #define QLCNIC_IMAGE_START 0x43000 /* compressed image */ -#define QLCNIC_USER_START 0x3E8000 /* Firmare info */ +#define QLCNIC_USER_START 0x3E8000 /* Firmware info */ #define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) #define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index ad0020af2193..c70ab40d8698 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) int rc = -EINVAL; if (!rtl_fw_format_ok(tp, rtl_fw)) { - netif_err(tp, ifup, dev, "invalid firwmare\n"); + netif_err(tp, ifup, dev, "invalid firmware\n"); goto out; } @@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp) RTL_W8(ChipCmd, CmdReset); rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); - - netdev_reset_queue(tp->dev); } static void rtl_request_uncached_firmware(struct rtl8169_private *tp) @@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, u32 status, len; u32 opts[2]; int frags; - bool stop_queue; if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); @@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, txd->opts2 = cpu_to_le32(opts[1]); - netdev_sent_queue(dev, skb->len); - skb_tx_timestamp(skb); /* Force memory writes to complete before releasing descriptor */ @@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, tp->cur_tx += frags + 1; - stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS); + RTL_W8(TxPoll, NPQ); - if (!skb->xmit_more || stop_queue || - netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) { - RTL_W8(TxPoll, NPQ); - - mmiowb(); - } + mmiowb(); - if (stop_queue) { + if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must * not miss a ring update when it notices a stopped queue. */ @@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) { unsigned int dirty_tx, tx_left; - unsigned int bytes_compl = 0, pkts_compl = 0; dirty_tx = tp->dirty_tx; smp_rmb(); @@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, tp->TxDescArray + entry); if (status & LastFrag) { - pkts_compl++; - bytes_compl += tx_skb->skb->len; + u64_stats_update_begin(&tp->tx_stats.syncp); + tp->tx_stats.packets++; + tp->tx_stats.bytes += tx_skb->skb->len; + u64_stats_update_end(&tp->tx_stats.syncp); dev_kfree_skb_any(tx_skb->skb); tx_skb->skb = NULL; } @@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) } if (tp->dirty_tx != dirty_tx) { - netdev_completed_queue(tp->dev, pkts_compl, bytes_compl); - - u64_stats_update_begin(&tp->tx_stats.syncp); - tp->tx_stats.packets += pkts_compl; - tp->tx_stats.bytes += bytes_compl; - u64_stats_update_end(&tp->tx_stats.syncp); - tp->dirty_tx = dirty_tx; /* Sync with rtl8169_start_xmit: * - publish dirty_tx ring index (write barrier) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 4da8bd263997..736d5d1624a1 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = { .tpauser = 1, .hw_swap = 1, .rmiimode = 1, - .shift_rd0 = 1, }; static void sh_eth_set_rate_sh7724(struct net_device *ndev) @@ -1392,6 +1391,9 @@ static void sh_eth_dev_exit(struct net_device *ndev) msleep(2); /* max frame time at 10 Mbps < 1250 us */ sh_eth_get_stats(ndev); sh_eth_reset(ndev); + + /* Set MAC address again */ + update_mac_address(ndev); } /* free Tx skb function */ @@ -1407,6 +1409,8 @@ static int sh_eth_txfree(struct net_device *ndev) txdesc = &mdp->tx_ring[entry]; if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) break; + /* TACT bit must be checked before all the following reads */ + rmb(); /* Free the original skb. */ if (mdp->tx_skbuff[entry]) { dma_unmap_single(&ndev->dev, txdesc->addr, @@ -1444,6 +1448,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) limit = boguscnt; rxdesc = &mdp->rx_ring[entry]; while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { + /* RACT bit must be checked before all the following reads */ + rmb(); desc_status = edmac_to_cpu(mdp, rxdesc->status); pkt_len = rxdesc->frame_length; @@ -1455,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) /* In case of almost all GETHER/ETHERs, the Receive Frame State * (RFS) bits in the Receive Descriptor 0 are from bit 9 to - * bit 0. However, in case of the R8A7740, R8A779x, and - * R7S72100 the RFS bits are from bit 25 to bit 16. So, the + * bit 0. However, in case of the R8A7740 and R7S72100 + * the RFS bits are from bit 25 to bit 16. So, the * driver needs right shifting by 16. */ if (mdp->cd->shift_rd0) @@ -1523,6 +1529,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) skb_checksum_none_assert(skb); rxdesc->addr = dma_addr; } + wmb(); /* RACT bit must be set after all the above writes */ if (entry >= mdp->num_rx_ring - 1) rxdesc->status |= cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); @@ -1535,7 +1542,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) /* If we don't need to check status, don't. -KDU */ if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { /* fix the values for the next receiving if RDE is set */ - if (intr_status & EESR_RDE) { + if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) { u32 count = (sh_eth_read(ndev, RDFAR) - sh_eth_read(ndev, RDLAR)) >> 4; @@ -2174,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) } spin_unlock_irqrestore(&mdp->lock, flags); - if (skb_padto(skb, ETH_ZLEN)) + if (skb_put_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; entry = mdp->cur_tx % mdp->num_tx_ring; @@ -2192,6 +2199,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) } txdesc->buffer_length = skb->len; + wmb(); /* TACT bit must be set after all the above writes */ if (entry >= mdp->num_tx_ring - 1) txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); else diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 34389b6aa67c..9fb6948e14c6 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -1257,9 +1257,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable) u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); if (enable) - val |= 1 << rocker_port->lport; + val |= 1ULL << rocker_port->lport; else - val &= ~(1 << rocker_port->lport); + val &= ~(1ULL << rocker_port->lport); rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); } @@ -4201,6 +4201,8 @@ static int rocker_probe_ports(struct rocker *rocker) alloc_size = sizeof(struct rocker_port *) * rocker->port_count; rocker->ports = kmalloc(alloc_size, GFP_KERNEL); + if (!rocker->ports) + return -ENOMEM; for (i = 0; i < rocker->port_count; i++) { err = rocker_probe_port(rocker, i); if (err) diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 6b33127ab352..3449893aea8d 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev) smc->packets_waiting = 0; smc_reset(dev); - init_timer(&smc->media); - smc->media.function = media_check; - smc->media.data = (u_long) dev; - smc->media.expires = jiffies + HZ; - add_timer(&smc->media); + setup_timer(&smc->media, media_check, (u_long)dev); + mod_timer(&smc->media, jiffies + HZ); return 0; } /* smc_open */ diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 88a55f95fe09..8678e39aba08 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -91,6 +91,11 @@ static const char version[] = #include "smc91x.h" +#if defined(CONFIG_ASSABET_NEPONSET) +#include <mach/assabet.h> +#include <mach/neponset.h> +#endif + #ifndef SMC_NOWAIT # define SMC_NOWAIT 0 #endif @@ -2243,10 +2248,9 @@ static int smc_drv_probe(struct platform_device *pdev) const struct of_device_id *match = NULL; struct smc_local *lp; struct net_device *ndev; - struct resource *res; + struct resource *res, *ires; unsigned int __iomem *addr; unsigned long irq_flags = SMC_IRQ_FLAGS; - unsigned long irq_resflags; int ret; ndev = alloc_etherdev(sizeof(struct smc_local)); @@ -2338,25 +2342,23 @@ static int smc_drv_probe(struct platform_device *pdev) goto out_free_netdev; } - ndev->irq = platform_get_irq(pdev, 0); - if (ndev->irq <= 0) { + ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!ires) { ret = -ENODEV; goto out_release_io; } - /* - * If this platform does not specify any special irqflags, or if - * the resource supplies a trigger, override the irqflags with - * the trigger flags from the resource. - */ - irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq)); - if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK) - irq_flags = irq_resflags & IRQF_TRIGGER_MASK; + + ndev->irq = ires->start; + + if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) + irq_flags = ires->flags & IRQF_TRIGGER_MASK; ret = smc_request_attrib(pdev, ndev); if (ret) goto out_release_io; -#if defined(CONFIG_SA1100_ASSABET) - neponset_ncr_set(NCR_ENET_OSC_EN); +#if defined(CONFIG_ASSABET_NEPONSET) + if (machine_is_assabet() && machine_has_neponset()) + neponset_ncr_set(NCR_ENET_OSC_EN); #endif platform_set_drvdata(pdev, ndev); ret = smc_enable_device(pdev); diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index be67baf5f677..3a18501d1068 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -39,14 +39,7 @@ * Define your architecture specific bus configuration parameters here. */ -#if defined(CONFIG_ARCH_LUBBOCK) ||\ - defined(CONFIG_MACH_MAINSTONE) ||\ - defined(CONFIG_MACH_ZYLONITE) ||\ - defined(CONFIG_MACH_LITTLETON) ||\ - defined(CONFIG_MACH_ZYLONITE2) ||\ - defined(CONFIG_ARCH_VIPER) ||\ - defined(CONFIG_MACH_STARGATE2) ||\ - defined(CONFIG_ARCH_VERSATILE) +#if defined(CONFIG_ARM) #include <asm/mach-types.h> @@ -74,95 +67,8 @@ /* We actually can't write halfwords properly if not word aligned */ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) { - if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) { - unsigned int v = val << 16; - v |= readl(ioaddr + (reg & ~2)) & 0xffff; - writel(v, ioaddr + (reg & ~2)); - } else { - writew(val, ioaddr + reg); - } -} - -#elif defined(CONFIG_SA1100_PLEB) -/* We can only do 16-bit reads and writes in the static memory space. */ -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_IO_SHIFT 0 -#define SMC_NOWAIT 1 - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) - -#define SMC_IRQ_FLAGS (-1) - -#elif defined(CONFIG_SA1100_ASSABET) - -#include <mach/neponset.h> - -/* We can only do 8-bit reads and writes in the static memory space. */ -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 0 -#define SMC_CAN_USE_32BIT 0 -#define SMC_NOWAIT 1 - -/* The first two address lines aren't connected... */ -#define SMC_IO_SHIFT 2 - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) -#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) -#define SMC_IRQ_FLAGS (-1) /* from resource */ - -#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \ - defined(CONFIG_MACH_NOMADIK_8815NHK) - -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_IO_SHIFT 0 -#define SMC_NOWAIT 1 - -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) - -#elif defined(CONFIG_ARCH_INNOKOM) || \ - defined(CONFIG_ARCH_PXA_IDP) || \ - defined(CONFIG_ARCH_RAMSES) || \ - defined(CONFIG_ARCH_PCM027) - -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 1 -#define SMC_IO_SHIFT 0 -#define SMC_NOWAIT 1 -#define SMC_USE_PXA_DMA 1 - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_inl(a, r) readl((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_outl(v, a, r) writel(v, (a) + (r)) -#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) -#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) -#define SMC_IRQ_FLAGS (-1) /* from resource */ - -/* We actually can't write halfwords properly if not word aligned */ -static inline void -SMC_outw(u16 val, void __iomem *ioaddr, int reg) -{ - if (reg & 2) { + if ((machine_is_mainstone() || machine_is_stargate2() || + machine_is_pxa_idp()) && reg & 2) { unsigned int v = val << 16; v |= readl(ioaddr + (reg & ~2)) & 0xffff; writel(v, ioaddr + (reg & ~2)); @@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) #define RPC_LSA_DEFAULT RPC_LED_100_10 #define RPC_LSB_DEFAULT RPC_LED_TX_RX -#elif defined(CONFIG_ARCH_MSM) - -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_NOWAIT 1 - -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) - -#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH - #elif defined(CONFIG_COLDFIRE) #define SMC_CAN_USE_8BIT 0 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 55e89b3838f1..a0ea84fe6519 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv) spin_lock_irqsave(&priv->lock, flags); if (!priv->eee_active) { priv->eee_active = 1; - init_timer(&priv->eee_ctrl_timer); - priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; - priv->eee_ctrl_timer.data = (unsigned long)priv; - priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer); - add_timer(&priv->eee_ctrl_timer); + setup_timer(&priv->eee_ctrl_timer, + stmmac_eee_ctrl_timer, + (unsigned long)priv); + mod_timer(&priv->eee_ctrl_timer, + STMMAC_LPI_T(eee_timer)); priv->hw->mac->set_eee_timer(priv->hw, STMMAC_DEFAULT_LIT_LS, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index fb846ebba1d9..f9b42f11950f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -272,6 +272,37 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) struct stmmac_priv *priv = NULL; struct plat_stmmacenet_data *plat_dat = NULL; const char *mac = NULL; + int irq, wol_irq, lpi_irq; + + /* Get IRQ information early to have an ability to ask for deferred + * probe if needed before we went too far with resource allocation. + */ + irq = platform_get_irq_byname(pdev, "macirq"); + if (irq < 0) { + if (irq != -EPROBE_DEFER) { + dev_err(dev, + "MAC IRQ configuration information not found\n"); + } + return irq; + } + + /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq + * The external wake up irq can be passed through the platform code + * named as "eth_wake_irq" + * + * In case the wake up interrupt is not passed from the platform + * so the driver will continue to use the mac irq (ndev->irq) + */ + wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + if (wol_irq < 0) { + if (wol_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + wol_irq = irq; + } + + lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); + if (lpi_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); addr = devm_ioremap_resource(dev, res); @@ -323,39 +354,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) return PTR_ERR(priv); } + /* Copy IRQ values to priv structure which is now avaialble */ + priv->dev->irq = irq; + priv->wol_irq = wol_irq; + priv->lpi_irq = lpi_irq; + /* Get MAC address if available (DT) */ if (mac) memcpy(priv->dev->dev_addr, mac, ETH_ALEN); - /* Get the MAC information */ - priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); - if (priv->dev->irq < 0) { - if (priv->dev->irq != -EPROBE_DEFER) { - netdev_err(priv->dev, - "MAC IRQ configuration information not found\n"); - } - return priv->dev->irq; - } - - /* - * On some platforms e.g. SPEAr the wake up irq differs from the mac irq - * The external wake up irq can be passed through the platform code - * named as "eth_wake_irq" - * - * In case the wake up interrupt is not passed from the platform - * so the driver will continue to use the mac irq (ndev->irq) - */ - priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); - if (priv->wol_irq < 0) { - if (priv->wol_irq == -EPROBE_DEFER) - return -EPROBE_DEFER; - priv->wol_irq = priv->dev->irq; - } - - priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); - if (priv->lpi_irq == -EPROBE_DEFER) - return -EPROBE_DEFER; - platform_set_drvdata(pdev, priv->dev); pr_debug("STMMAC platform driver registration completed"); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 4b51f903fb73..0c5842aeb807 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type) *flow_type = IP_USER_FLOW; break; default: - return 0; + return -EINVAL; } - return 1; + return 0; } static int niu_ethflow_to_class(int flow_type, u64 *class) @@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np, class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> TCAM_V4KEY0_CLASS_CODE_SHIFT; ret = niu_class_to_ethflow(class, &fsp->flow_type); - if (ret < 0) { netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", parent->index); - ret = -EINVAL; goto out; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 7d8dd0d2182e..a1bbaf6352ba 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries( cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, port_mask, ALE_VLAN, slave->port_vlan, 0); cpsw_ale_add_ucast(priv->ale, priv->mac_addr, - priv->host_port, ALE_VLAN, slave->port_vlan); + priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan); } static void soft_reset_slave(struct cpsw_slave *slave) @@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP static int cpsw_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev) } return 0; } +#endif -static const struct dev_pm_ops cpsw_pm_ops = { - .suspend = cpsw_suspend, - .resume = cpsw_resume, -}; +static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume); static const struct of_device_id cpsw_of_mtable[] = { { .compatible = "ti,cpsw", }, diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 98655b44b97e..c00084d689f3 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP static int davinci_mdio_suspend(struct device *dev) { struct davinci_mdio_data *data = dev_get_drvdata(dev); @@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev) return 0; } +#endif static const struct dev_pm_ops davinci_mdio_pm_ops = { - .suspend_late = davinci_mdio_suspend, - .resume_early = davinci_mdio_resume, + SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume) }; #if IS_ENABLED(CONFIG_OF) diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index a495931a66a1..0e0fbb5842b3 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -498,9 +498,9 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { + napi_complete(napi); w5100_write(priv, W5100_IMR, IR_S0); mmiowb(); - napi_complete(napi); } return rx_count; diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 09322d9db578..4b310002258d 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -418,9 +418,9 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget) } if (rx_count < budget) { + napi_complete(napi); w5300_write(priv, W5300_IMR, IR_S0); mmiowb(); - napi_complete(napi); } return rx_count; diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index f7e0f0f7c2e2..9e16a2819d48 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev) int i; static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; - if (dev->flags & IFF_ALLMULTI) { + if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) { for (i = 0; i < ETH_ALEN; i++) { __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index e40fdfccc9c1..27ecc5c4fa26 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q, } /* else everything is zero */ } +/* Neighbour code has some assumptions on HH_DATA_MOD alignment */ +#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN) + /* Get packet from user space buffer */ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, struct iov_iter *from, int noblock) { - int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); + int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE); struct sk_buff *skb; struct macvlan_dev *vlan; unsigned long total_len = iov_iter_count(from); @@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); } - skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, + skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen, linear, noblock, &err); if (!skb) goto err; diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c index 9e3af54c9010..32efbd48f326 100644 --- a/drivers/net/phy/amd-xgbe-phy.c +++ b/drivers/net/phy/amd-xgbe-phy.c @@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate" #define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew" #define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp" +#define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config" +#define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable" #define XGBE_PHY_SPEEDS 3 #define XGBE_PHY_SPEED_1000 0 @@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #define SPEED_10000_BLWC 0 #define SPEED_10000_CDR 0x7 #define SPEED_10000_PLL 0x1 -#define SPEED_10000_PQ 0x1e +#define SPEED_10000_PQ 0x12 #define SPEED_10000_RATE 0x0 #define SPEED_10000_TXAMP 0xa #define SPEED_10000_WORD 0x7 +#define SPEED_10000_DFE_TAP_CONFIG 0x1 +#define SPEED_10000_DFE_TAP_ENABLE 0x7f #define SPEED_2500_BLWC 1 #define SPEED_2500_CDR 0x2 @@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #define SPEED_2500_RATE 0x1 #define SPEED_2500_TXAMP 0xf #define SPEED_2500_WORD 0x1 +#define SPEED_2500_DFE_TAP_CONFIG 0x3 +#define SPEED_2500_DFE_TAP_ENABLE 0x0 #define SPEED_1000_BLWC 1 #define SPEED_1000_CDR 0x2 @@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #define SPEED_1000_RATE 0x3 #define SPEED_1000_TXAMP 0xf #define SPEED_1000_WORD 0x1 +#define SPEED_1000_DFE_TAP_CONFIG 0x3 +#define SPEED_1000_DFE_TAP_ENABLE 0x0 /* SerDes RxTx register offsets */ +#define RXTX_REG6 0x0018 #define RXTX_REG20 0x0050 +#define RXTX_REG22 0x0058 #define RXTX_REG114 0x01c8 +#define RXTX_REG129 0x0204 /* SerDes RxTx register entry bit positions and sizes */ +#define RXTX_REG6_RESETB_RXD_INDEX 8 +#define RXTX_REG6_RESETB_RXD_WIDTH 1 #define RXTX_REG20_BLWC_ENA_INDEX 2 #define RXTX_REG20_BLWC_ENA_WIDTH 1 #define RXTX_REG114_PQ_REG_INDEX 9 #define RXTX_REG114_PQ_REG_WIDTH 7 +#define RXTX_REG129_RXDFE_CONFIG_INDEX 14 +#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2 /* Bit setting and getting macros * The get macro will extract the current bit field value from within @@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = { SPEED_10000_TXAMP, }; +static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = { + SPEED_1000_DFE_TAP_CONFIG, + SPEED_2500_DFE_TAP_CONFIG, + SPEED_10000_DFE_TAP_CONFIG, +}; + +static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = { + SPEED_1000_DFE_TAP_ENABLE, + SPEED_2500_DFE_TAP_ENABLE, + SPEED_10000_DFE_TAP_ENABLE, +}; + enum amd_xgbe_phy_an { AMD_XGBE_AN_READY = 0, AMD_XGBE_AN_PAGE_RECEIVED, @@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv { u32 serdes_cdr_rate[XGBE_PHY_SPEEDS]; u32 serdes_pq_skew[XGBE_PHY_SPEEDS]; u32 serdes_tx_amp[XGBE_PHY_SPEEDS]; + u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS]; + u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS]; /* Auto-negotiation state machine support */ struct mutex an_mutex; @@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev) status = XSIR0_IOREAD(priv, SIR0_STATUS); if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) && XSIR_GET_BITS(status, SIR0_STATUS, TX_READY)) - return; + goto rx_reset; } netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n", status); + +rx_reset: + /* Perform Rx reset for the DFE changes */ + XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0); + XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1); } static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) @@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) priv->serdes_blwc[XGBE_PHY_SPEED_10000]); XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]); + XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG, + priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]); + XRXTX_IOWRITE(priv, RXTX_REG22, + priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]); amd_xgbe_phy_serdes_complete_ratechange(phydev); @@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev) priv->serdes_blwc[XGBE_PHY_SPEED_2500]); XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]); + XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG, + priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]); + XRXTX_IOWRITE(priv, RXTX_REG22, + priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]); amd_xgbe_phy_serdes_complete_ratechange(phydev); @@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev) priv->serdes_blwc[XGBE_PHY_SPEED_1000]); XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]); + XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG, + priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]); + XRXTX_IOWRITE(priv, RXTX_REG22, + priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]); amd_xgbe_phy_serdes_complete_ratechange(phydev); @@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev) sizeof(priv->serdes_tx_amp)); } + if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) { + ret = device_property_read_u32_array(phy_dev, + XGBE_PHY_DFE_CFG_PROPERTY, + priv->serdes_dfe_tap_cfg, + XGBE_PHY_SPEEDS); + if (ret) { + dev_err(dev, "invalid %s property\n", + XGBE_PHY_DFE_CFG_PROPERTY); + goto err_sir1; + } + } else { + memcpy(priv->serdes_dfe_tap_cfg, + amd_xgbe_phy_serdes_dfe_tap_cfg, + sizeof(priv->serdes_dfe_tap_cfg)); + } + + if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) { + ret = device_property_read_u32_array(phy_dev, + XGBE_PHY_DFE_ENA_PROPERTY, + priv->serdes_dfe_tap_ena, + XGBE_PHY_SPEEDS); + if (ret) { + dev_err(dev, "invalid %s property\n", + XGBE_PHY_DFE_ENA_PROPERTY); + goto err_sir1; + } + } else { + memcpy(priv->serdes_dfe_tap_ena, + amd_xgbe_phy_serdes_dfe_tap_ena, + sizeof(priv->serdes_dfe_tap_ena)); + } + phydev->priv = priv; if (!priv->adev || acpi_disabled) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index cdcac6aa4260..52cd8db2c57d 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -236,6 +236,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features) } /** + * phy_check_valid - check if there is a valid PHY setting which matches + * speed, duplex, and feature mask + * @speed: speed to match + * @duplex: duplex to match + * @features: A mask of the valid settings + * + * Description: Returns true if there is a valid setting, false otherwise. + */ +static inline bool phy_check_valid(int speed, int duplex, u32 features) +{ + unsigned int idx; + + idx = phy_find_valid(phy_find_setting(speed, duplex), features); + + return settings[idx].speed == speed && settings[idx].duplex == duplex && + (settings[idx].setting & features); +} + +/** * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex * @phydev: the target phy_device struct * @@ -1045,7 +1064,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) int eee_lp, eee_cap, eee_adv; u32 lp, cap, adv; int status; - unsigned int idx; /* Read phy status to properly get the right settings */ status = phy_read_status(phydev); @@ -1077,8 +1095,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); - idx = phy_find_setting(phydev->speed, phydev->duplex); - if (!(lp & adv & settings[idx].setting)) + if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv)) goto eee_exit_err; if (clk_stop_enable) { diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 0e62274e884a..7d394846afc2 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -43,9 +43,7 @@ static struct team_port *team_port_get_rcu(const struct net_device *dev) { - struct team_port *port = rcu_dereference(dev->rx_handler_data); - - return team_port_exists(dev) ? port : NULL; + return rcu_dereference(dev->rx_handler_data); } static struct team_port *team_port_get_rtnl(const struct net_device *dev) @@ -1732,11 +1730,11 @@ static int team_set_mac_address(struct net_device *dev, void *p) if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - rcu_read_lock(); - list_for_each_entry_rcu(port, &team->port_list, list) + mutex_lock(&team->lock); + list_for_each_entry(port, &team->port_list, list) if (team->ops.port_change_dev_addr) team->ops.port_change_dev_addr(team, port); - rcu_read_unlock(); + mutex_unlock(&team->lock); return 0; } diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 3bd9678315ad..7ba8d0885f12 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -161,6 +161,7 @@ config USB_NET_AX8817X * Linksys USB200M * Netgear FA120 * Sitecom LN-029 + * Sitecom LN-028 * Intellinet USB 2.0 Ethernet * ST Lab USB 2.0 Ethernet * TrendNet TU2-ET100 diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index bf49792062a2..1173a24feda3 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -979,6 +979,10 @@ static const struct usb_device_id products [] = { USB_DEVICE (0x0df6, 0x0056), .driver_info = (unsigned long) &ax88178_info, }, { + // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter" + USB_DEVICE (0x0df6, 0x061c), + .driver_info = (unsigned long) &ax88178_info, +}, { // corega FEther USB2-TX USB_DEVICE (0x07aa, 0x0017), .driver_info = (unsigned long) &ax8817x_info, diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c index 3eed708a6182..1762ad3910b2 100644 --- a/drivers/net/usb/cx82310_eth.c +++ b/drivers/net/usb/cx82310_eth.c @@ -46,8 +46,7 @@ enum cx82310_status { }; #define CMD_PACKET_SIZE 64 -/* first command after power on can take around 8 seconds */ -#define CMD_TIMEOUT 15000 +#define CMD_TIMEOUT 100 #define CMD_REPLY_RETRY 5 #define CX82310_MTU 1514 @@ -78,8 +77,9 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply, ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf, CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); if (ret < 0) { - dev_err(&dev->udev->dev, "send command %#x: error %d\n", - cmd, ret); + if (cmd != CMD_GET_LINK_STATUS) + dev_err(&dev->udev->dev, "send command %#x: error %d\n", + cmd, ret); goto end; } @@ -90,8 +90,10 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply, buf, CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT); if (ret < 0) { - dev_err(&dev->udev->dev, - "reply receive error %d\n", ret); + if (cmd != CMD_GET_LINK_STATUS) + dev_err(&dev->udev->dev, + "reply receive error %d\n", + ret); goto end; } if (actual_len > 0) @@ -134,6 +136,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) int ret; char buf[15]; struct usb_device *udev = dev->udev; + u8 link[3]; + int timeout = 50; /* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */ if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0 @@ -160,6 +164,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf) if (!dev->partial_data) return -ENOMEM; + /* wait for firmware to become ready (indicated by the link being up) */ + while (--timeout) { + ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0, + link, sizeof(link)); + /* the command can time out during boot - it's not an error */ + if (!ret && link[0] == 1 && link[2] == 1) + break; + msleep(500); + }; + if (!timeout) { + dev_err(&udev->dev, "firmware not ready in time\n"); + return -ETIMEDOUT; + } + /* enable ethernet mode (?) */ ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0); if (ret) { @@ -300,9 +318,18 @@ static const struct driver_info cx82310_info = { .tx_fixup = cx82310_tx_fixup, }; +#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ + USB_DEVICE_ID_MATCH_DEV_INFO, \ + .idVendor = (vend), \ + .idProduct = (prod), \ + .bDeviceClass = (cl), \ + .bDeviceSubClass = (sc), \ + .bDeviceProtocol = (pr) + static const struct usb_device_id products[] = { { - USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0), + USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0), .driver_info = (unsigned long) &cx82310_info }, { }, diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 9cdfb3fe9c15..778e91531fac 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1594,7 +1594,7 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg) } cprev = cnow; } - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(&tiocmget->waitq, &wait); return ret; diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c index 3d18bb0eee85..1bfe0fcaccf5 100644 --- a/drivers/net/usb/plusb.c +++ b/drivers/net/usb/plusb.c @@ -134,6 +134,11 @@ static const struct usb_device_id products [] = { }, { USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */ .driver_info = (unsigned long) &prolific_info, +}, { + USB_DEVICE(0x3923, 0x7825), /* National Instruments USB + * Host-to-Host Cable + */ + .driver_info = (unsigned long) &prolific_info, }, { }, // END diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f1ff3666f090..59b0e9754ae3 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1448,8 +1448,10 @@ static void virtnet_free_queues(struct virtnet_info *vi) { int i; - for (i = 0; i < vi->max_queue_pairs; i++) + for (i = 0; i < vi->max_queue_pairs; i++) { + napi_hash_del(&vi->rq[i].napi); netif_napi_del(&vi->rq[i].napi); + } kfree(vi->rq); kfree(vi->sq); @@ -1948,11 +1950,8 @@ static int virtnet_freeze(struct virtio_device *vdev) cancel_delayed_work_sync(&vi->refill); if (netif_running(vi->dev)) { - for (i = 0; i < vi->max_queue_pairs; i++) { + for (i = 0; i < vi->max_queue_pairs; i++) napi_disable(&vi->rq[i].napi); - napi_hash_del(&vi->rq[i].napi); - netif_napi_del(&vi->rq[i].napi); - } } remove_vq_common(vi); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1e0a775ea882..f8528a4cf54f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1218,7 +1218,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto drop; flags &= ~VXLAN_HF_RCO; - vni &= VXLAN_VID_MASK; + vni &= VXLAN_VNI_MASK; } /* For backwards compatibility, only allow reserved fields to be @@ -1239,7 +1239,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) flags &= ~VXLAN_GBP_USED_BITS; } - if (flags || (vni & ~VXLAN_VID_MASK)) { + if (flags || vni & ~VXLAN_VNI_MASK) { /* If there are any unprocessed flags remaining treat * this as a malformed packet. This behavior diverges from * VXLAN RFC (RFC7348) which stipulates that bits in reserved diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 83c39e2858bf..88d121d43c08 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -806,21 +806,21 @@ static ssize_t cosa_read(struct file *file, spin_lock_irqsave(&cosa->lock, flags); add_wait_queue(&chan->rxwaitq, &wait); while (!chan->rx_status) { - current->state = TASK_INTERRUPTIBLE; + set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&cosa->lock, flags); schedule(); spin_lock_irqsave(&cosa->lock, flags); if (signal_pending(current) && chan->rx_status == 0) { chan->rx_status = 1; remove_wait_queue(&chan->rxwaitq, &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); spin_unlock_irqrestore(&cosa->lock, flags); mutex_unlock(&chan->rlock); return -ERESTARTSYS; } } remove_wait_queue(&chan->rxwaitq, &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); kbuf = chan->rxdata; count = chan->rxsize; spin_unlock_irqrestore(&cosa->lock, flags); @@ -890,14 +890,14 @@ static ssize_t cosa_write(struct file *file, spin_lock_irqsave(&cosa->lock, flags); add_wait_queue(&chan->txwaitq, &wait); while (!chan->tx_status) { - current->state = TASK_INTERRUPTIBLE; + set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&cosa->lock, flags); schedule(); spin_lock_irqsave(&cosa->lock, flags); if (signal_pending(current) && chan->tx_status == 0) { chan->tx_status = 1; remove_wait_queue(&chan->txwaitq, &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); chan->tx_status = 1; spin_unlock_irqrestore(&cosa->lock, flags); up(&chan->wsem); @@ -905,7 +905,7 @@ static ssize_t cosa_write(struct file *file, } } remove_wait_queue(&chan->txwaitq, &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); up(&chan->wsem); spin_unlock_irqrestore(&cosa->lock, flags); kfree(kbuf); diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index ccbdb05b28cd..75345c1e8c34 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -5370,6 +5370,7 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy, case 0x432a: /* BCM4321 */ case 0x432d: /* BCM4322 */ case 0x4352: /* BCM43222 */ + case 0x435a: /* BCM43228 */ case 0x4333: /* BCM4331 */ case 0x43a2: /* BCM4360 */ case 0x43b3: /* BCM4352 */ diff --git a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c index 50cdf7090198..8eff2753abad 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/vendor.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/vendor.c @@ -39,13 +39,22 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy, void *dcmd_buf = NULL, *wr_pointer; u16 msglen, maxmsglen = PAGE_SIZE - 0x100; - brcmf_dbg(TRACE, "cmd %x set %d len %d\n", cmdhdr->cmd, cmdhdr->set, - cmdhdr->len); + if (len < sizeof(*cmdhdr)) { + brcmf_err("vendor command too short: %d\n", len); + return -EINVAL; + } vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); ifp = vif->ifp; - len -= sizeof(struct brcmf_vndr_dcmd_hdr); + brcmf_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd); + + if (cmdhdr->offset > len) { + brcmf_err("bad buffer offset %d > %d\n", cmdhdr->offset, len); + return -EINVAL; + } + + len -= cmdhdr->offset; ret_len = cmdhdr->len; if (ret_len > 0 || len > 0) { if (len > BRCMF_DCMD_MAXLEN) { diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index c3817fae16c0..06f6cc08f451 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -95,7 +95,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = { .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ .base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ - .led_mode = IWL_LED_BLINK + .led_mode = IWL_LED_BLINK, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl1000_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", @@ -121,7 +122,8 @@ const struct iwl_cfg iwl1000_bg_cfg = { .base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .rx_with_siso_diversity = true + .rx_with_siso_diversity = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl100_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c index 21e5d0843a62..890b95f497d6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/iwlwifi/iwl-2000.c @@ -123,7 +123,9 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = { .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + const struct iwl_cfg iwl2000_2bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", @@ -149,7 +151,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = { .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ .base_params = &iwl2030_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl2030_2bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", @@ -170,7 +173,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = { .base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .rx_with_siso_diversity = true + .rx_with_siso_diversity = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl105_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", @@ -197,7 +201,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = { .base_params = &iwl2030_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .rx_with_siso_diversity = true + .rx_with_siso_diversity = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl135_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 332bbede39e5..724194e23414 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -93,7 +93,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = { .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ .base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ - .led_mode = IWL_LED_BLINK + .led_mode = IWL_LED_BLINK, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl5300_agn_cfg = { .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", @@ -158,7 +159,8 @@ const struct iwl_cfg iwl5350_agn_cfg = { .base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ - .internal_wimax_coex = true + .internal_wimax_coex = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl5150_agn_cfg = { .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index 8f2c3c8c6b84..21b2630763dc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -145,7 +145,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = { .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6005_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", @@ -199,7 +200,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6030_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", @@ -235,7 +237,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = { .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_RF_STATE + .led_mode = IWL_LED_RF_STATE, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6035_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", @@ -290,7 +293,8 @@ const struct iwl_cfg iwl130_bg_cfg = { .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ .base_params = &iwl6000_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ - .led_mode = IWL_LED_BLINK + .led_mode = IWL_LED_BLINK, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6000i_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", @@ -322,7 +326,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = { .base_params = &iwl6050_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ - .internal_wimax_coex = true + .internal_wimax_coex = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6050_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", @@ -347,7 +352,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = { .base_params = &iwl6050_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ - .internal_wimax_coex = true + .internal_wimax_coex = true, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl6150_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index 1ec4d55155f7..7810c41cf9a7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -793,7 +793,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, if (!vif->bss_conf.assoc) smps_mode = IEEE80211_SMPS_AUTOMATIC; - if (IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, + if (mvmvif->phy_ctxt && + IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, mvmvif->phy_ctxt->id)) smps_mode = IEEE80211_SMPS_AUTOMATIC; diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c index d530ef3da107..542ee74f290a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c @@ -832,7 +832,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, if (!vif->bss_conf.assoc) smps_mode = IEEE80211_SMPS_AUTOMATIC; - if (data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) + if (mvmvif->phy_ctxt && + data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id)) smps_mode = IEEE80211_SMPS_AUTOMATIC; IWL_DEBUG_COEX(data->mvm, diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 1ff7ec08532d..09654e73a533 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -405,7 +405,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ]; - if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) + if ((mvm->fw->ucode_capa.capa[0] & + IWL_UCODE_TLV_CAPA_BEAMFORMER) && + (mvm->fw->ucode_capa.api[0] & + IWL_UCODE_TLV_API_LQ_SS_PARAMS)) hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; } @@ -2215,7 +2218,19 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); - iwl_mvm_cancel_scan(mvm); + /* Due to a race condition, it's possible that mac80211 asks + * us to stop a hw_scan when it's already stopped. This can + * happen, for instance, if we stopped the scan ourselves, + * called ieee80211_scan_completed() and the userspace called + * cancel scan scan before ieee80211_scan_work() could run. + * To handle that, simply return if the scan is not running. + */ + /* FIXME: for now, we ignore this race for UMAC scans, since + * they don't set the scan_status. + */ + if ((mvm->scan_status == IWL_MVM_SCAN_OS) || + (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + iwl_mvm_cancel_scan(mvm); mutex_unlock(&mvm->mutex); } @@ -2559,12 +2574,29 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, int ret; mutex_lock(&mvm->mutex); + + /* Due to a race condition, it's possible that mac80211 asks + * us to stop a sched_scan when it's already stopped. This + * can happen, for instance, if we stopped the scan ourselves, + * called ieee80211_sched_scan_stopped() and the userspace called + * stop sched scan scan before ieee80211_sched_scan_stopped_work() + * could run. To handle this, simply return if the scan is + * not running. + */ + /* FIXME: for now, we ignore this race for UMAC scans, since + * they don't set the scan_status. + */ + if (mvm->scan_status != IWL_MVM_SCAN_SCHED && + !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { + mutex_unlock(&mvm->mutex); + return 0; + } + ret = iwl_mvm_scan_offload_stop(mvm, false); mutex_unlock(&mvm->mutex); iwl_mvm_wait_for_async_handlers(mvm); return ret; - } static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 194bd1f939ca..efa9688a4cf1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -134,9 +134,12 @@ enum rs_column_mode { #define MAX_NEXT_COLUMNS 7 #define MAX_COLUMN_CHECKS 3 +struct rs_tx_column; + typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl); + struct iwl_scale_tbl_info *tbl, + const struct rs_tx_column *next_col); struct rs_tx_column { enum rs_column_mode mode; @@ -147,13 +150,15 @@ struct rs_tx_column { }; static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl) + struct iwl_scale_tbl_info *tbl, + const struct rs_tx_column *next_col) { - return iwl_mvm_bt_coex_is_ant_avail(mvm, tbl->rate.ant); + return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant); } static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl) + struct iwl_scale_tbl_info *tbl, + const struct rs_tx_column *next_col) { if (!sta->ht_cap.ht_supported) return false; @@ -171,7 +176,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl) + struct iwl_scale_tbl_info *tbl, + const struct rs_tx_column *next_col) { if (!sta->ht_cap.ht_supported) return false; @@ -180,7 +186,8 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, } static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - struct iwl_scale_tbl_info *tbl) + struct iwl_scale_tbl_info *tbl, + const struct rs_tx_column *next_col) { struct rs_rate *rate = &tbl->rate; struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; @@ -1590,7 +1597,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm, for (j = 0; j < MAX_COLUMN_CHECKS; j++) { allow_func = next_col->checks[j]; - if (allow_func && !allow_func(mvm, sta, tbl)) + if (allow_func && !allow_func(mvm, sta, tbl, next_col)) break; } diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 7e9aa3cb3254..c47c8051da77 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -1128,8 +1128,10 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) if (mvm->scan_status == IWL_MVM_SCAN_NONE) return 0; - if (iwl_mvm_is_radio_killed(mvm)) + if (iwl_mvm_is_radio_killed(mvm)) { + ret = 0; goto out; + } if (mvm->scan_status != IWL_MVM_SCAN_SCHED && (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) || @@ -1148,16 +1150,14 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n", sched ? "offloaded " : "", ret); iwl_remove_notification(&mvm->notif_wait, &wait_scan_done); - return ret; + goto out; } IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n", sched ? "offloaded " : ""); ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); - if (ret) - return ret; - +out: /* * Clear the scan status so the next scan requests will succeed. This * also ensures the Rx handler doesn't do anything, as the scan was @@ -1167,7 +1167,6 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify) if (mvm->scan_status == IWL_MVM_SCAN_OS) iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); -out: mvm->scan_status = IWL_MVM_SCAN_NONE; if (notify) { @@ -1177,7 +1176,7 @@ out: ieee80211_scan_completed(mvm->hw, true); } - return 0; + return ret; } static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index 54fafbf9a711..f8d6f306dd76 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -750,8 +750,7 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) * request */ list_for_each_entry(te_data, &mvm->time_event_list, list) { - if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE && - te_data->running) { + if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); is_p2p = true; goto remove_te; @@ -766,10 +765,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm) * request */ list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) { - if (te_data->running) { - mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); - goto remove_te; - } + mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); + goto remove_te; } remove_te: diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 4a4c6586a8d2..8908be6dbc48 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -946,7 +946,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, goto nla_put_failure; genlmsg_end(skb, msg_head); - genlmsg_unicast(&init_net, skb, dst_portid); + if (genlmsg_unicast(&init_net, skb, dst_portid)) + goto err_free_txskb; /* Enqueue the packet */ skb_queue_tail(&data->pending, my_skb); @@ -955,6 +956,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, return; nla_put_failure: + nlmsg_free(skb); +err_free_txskb: printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); ieee80211_free_txskb(hw, my_skb); data->tx_failed++; diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index 1d4677460711..074f716020aa 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -1386,8 +1386,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) } return true; - } else if (0x86DD == ether_type) { - return true; + } else if (ETH_P_IPV6 == ether_type) { + /* TODO: Handle any IPv6 cases that need special handling. + * For now, always return false + */ + goto end; } end: diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f38227afe099..3aa8648080c8 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -340,12 +340,11 @@ static void xenvif_get_ethtool_stats(struct net_device *dev, unsigned int num_queues = vif->num_queues; int i; unsigned int queue_index; - struct xenvif_stats *vif_stats; for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { unsigned long accum = 0; for (queue_index = 0; queue_index < num_queues; ++queue_index) { - vif_stats = &vif->queues[queue_index].stats; + void *vif_stats = &vif->queues[queue_index].stats; accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); } data[i] = accum; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index f7a31d2cb3f1..997cf0901ac2 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -96,6 +96,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, static void make_tx_response(struct xenvif_queue *queue, struct xen_netif_tx_request *txp, s8 st); +static void push_tx_responses(struct xenvif_queue *queue); static inline int tx_work_todo(struct xenvif_queue *queue); @@ -657,6 +658,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue, do { spin_lock_irqsave(&queue->response_lock, flags); make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); + push_tx_responses(queue); spin_unlock_irqrestore(&queue->response_lock, flags); if (cons == end) break; @@ -1343,7 +1345,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s { unsigned int offset = skb_headlen(skb); skb_frag_t frags[MAX_SKB_FRAGS]; - int i; + int i, f; struct ubuf_info *uarg; struct sk_buff *nskb = skb_shinfo(skb)->frag_list; @@ -1383,23 +1385,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s frags[i].page_offset = 0; skb_frag_size_set(&frags[i], len); } - /* swap out with old one */ - memcpy(skb_shinfo(skb)->frags, - frags, - i * sizeof(skb_frag_t)); - skb_shinfo(skb)->nr_frags = i; - skb->truesize += i * PAGE_SIZE; - /* remove traces of mapped pages and frag_list */ + /* Copied all the bits from the frag list -- free it. */ skb_frag_list_init(skb); + xenvif_skb_zerocopy_prepare(queue, nskb); + kfree_skb(nskb); + + /* Release all the original (foreign) frags. */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + skb_frag_unref(skb, f); uarg = skb_shinfo(skb)->destructor_arg; /* increase inflight counter to offset decrement in callback */ atomic_inc(&queue->inflight_packets); uarg->callback(uarg, true); skb_shinfo(skb)->destructor_arg = NULL; - xenvif_skb_zerocopy_prepare(queue, nskb); - kfree_skb(nskb); + /* Fill the skb with the new (local) frags. */ + memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); + skb_shinfo(skb)->nr_frags = i; + skb->truesize += i * PAGE_SIZE; return 0; } @@ -1652,13 +1656,20 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, unsigned long flags; pending_tx_info = &queue->pending_tx_info[pending_idx]; + spin_lock_irqsave(&queue->response_lock, flags); + make_tx_response(queue, &pending_tx_info->req, status); - index = pending_index(queue->pending_prod); + + /* Release the pending index before pusing the Tx response so + * its available before a new Tx request is pushed by the + * frontend. + */ + index = pending_index(queue->pending_prod++); queue->pending_ring[index] = pending_idx; - /* TX shouldn't use the index before we give it back here */ - mb(); - queue->pending_prod++; + + push_tx_responses(queue); + spin_unlock_irqrestore(&queue->response_lock, flags); } @@ -1669,7 +1680,6 @@ static void make_tx_response(struct xenvif_queue *queue, { RING_IDX i = queue->tx.rsp_prod_pvt; struct xen_netif_tx_response *resp; - int notify; resp = RING_GET_RESPONSE(&queue->tx, i); resp->id = txp->id; @@ -1679,6 +1689,12 @@ static void make_tx_response(struct xenvif_queue *queue, RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; queue->tx.rsp_prod_pvt = ++i; +} + +static void push_tx_responses(struct xenvif_queue *queue) +{ + int notify; + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); if (notify) notify_remote_via_irq(queue->tx_irq); |